' % (email, verify_url, verify_url)\n try:\n send_mail(subject, \"\", settings.EMAIL_FROM, [email], html_message=html_message)\n except Exception as e:\n logger.error(e)\n # 有异常自动重试三次\n raise self.retry(exc=e, max_retries=3)\n","sub_path":"meiduo_mall02/celery_tasks/email/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":1281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"146608314","text":"'''\nProblem Statement\n\nYou have n barrels lined up in a row, numbered from left to right from one. Initially, the i-th\nbarrel contains ai liters of water.\nYou can pour water from one barrel to another. In one act of pouring, you can choose two\ndifferent barrels x and y (the x-th barrel shouldn't be empty) and pour any possible amount of\nwater from barrel x to barrel y (possibly, all water). You may assume that barrels have infinite\ncapacity, so you can pour any amount of water in each of them.\nCalculate the maximum possible difference between the maximum and the minimum amount\nof water in the barrels, if you can pour water at most k times.\n\nInput Format\n\nThe first line contains one integer t— the number of test cases.\nThe first line of each test case contains two integers n and k — the number of barrels and the\nnumber of pourings you can make.\nThe second line contains n integers a1,a2,…,an , where ai is the initial amount of water the i-th\nbarrel has.\nIt's guaranteed that the total sum of n over test cases doesn't exceed\n\nConstraints\n\n(1≤t≤1000)\n(1≤k\n(0≤ai≤10^9)\n2⋅(10^5).\n\nOutput Format\n\nFor each test case, print the maximum possible difference between the maximum and the\nminimum amount of water in the barrels, if you can pour water at most k times\n\n'''\nt = int(input())\nfor i in range(t):\n n, k = map(int, input().split())\n lit = list(map(int, input().split()))\n lit.sort()\n for i in range(k):\n if(lit[-1]!=0 and lit[-2]!=0):\n res=lit[-1]+lit[-2]\n lit=lit[:-2]\n lit.append(res)\n if(k>0):\n print(lit[-1])\n else:\n print(lit[-1]-lit[0])\n","sub_path":"Fill_Barrel.py","file_name":"Fill_Barrel.py","file_ext":"py","file_size_in_byte":1645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"415521915","text":"\"\"\"\nGiven two strings s and t, determine if they are isomorphic.\nTwo strings are isomorphic if the characters in s can be replaced to get t.\n\nAll occurrences of a character must be replaced with another character while preserving the order of characters.\nNo two characters may map to the same character but a character may map to itself.\n\nExample 1:\n\nInput: s = \"egg\", t = \"add\"\nOutput: true\nExample 2:\n\nInput: s = \"foo\", t = \"bar\"\nOutput: false\n\"\"\"\nclass Solution(object):\n def isIsomorphic(self, s, t):\n \"\"\"\n :type s: str\n :type t: str\n :rtype: bool\n \"\"\"\n if not s or not t:\n return True\n\n # 用来记录两个单词的字母要怎么匹配\n hashmap = {}\n if len(s) != len(t):\n return False\n\n for i in range(len(s)):\n if s[i] in hashmap:\n if hashmap[s[i]] != t[i]:\n return False\n else:\n # \"ab\"\n # \"aa\"\n # 此时a:a已经在dict里面了,但是b作为key却没在里面,但是a已经和a匹配过了,所以要返回false\n if t[i] in hashmap.values():\n return False\n else:\n hashmap[s[i]] = t[i]\n return True\n\n\"\"\"\nTime O(n) space O(n0\nhttps://www.youtube.com/watch?v=tBK5f-BJOdg\n答案:\n1.我们用hashmap(python 中的dictionary),来储存 s:t\n 例如: abb\n egg (a:e , b:g) 那么后续碰到a,b这两个字母,我们只用看看value对不对的上就可以了\n\n2.当两个string长度都不一样,肯定不是同构\n3. 假如说s[i] 在hashmap里面,那么取出s[i]的value,看和t[i]对不对的上\n4. 假如说s[i] 不在hashmap里面,就要看t[i]在不在hashmap.values()里,有的话说明错了\n 若都不在,那就把他们两加进map里\n5.经过重重考验,最后可以return True了\n\n\n\"\"\"\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"leetcode/Hashmap/205m. Isomorphic Strings(同构字符串).py","file_name":"205m. Isomorphic Strings(同构字符串).py","file_ext":"py","file_size_in_byte":1937,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"542701747","text":"import numpy as np\nimport numba\nimport copy\n\n\nnp.random.seed(42)\n\n\ndef reset_seed(seed=42):\n np.random.seed(seed)\n\n\n@numba.njit(parallel=True)\ndef cross_entropy_loss(y, y_hat):\n \"\"\"Implements the cross-entropy loss.\"\"\"\n loss = -y * np.log(y_hat + 1e-8)\n for i in numba.prange(loss.shape[1]):\n loss[np.isnan(loss[:, i]), i] = 0.0\n loss[np.isinf(loss[:, i]), i] = np.finfo(loss.dtype).max\n return np.sum(loss) / y.shape[0]\n\n\n@numba.njit()\ndef softmax(x_):\n \"\"\"Implements a numerically stable softmax function.\"\"\"\n exp = np.exp(x_ - np.max(x_))\n return exp / np.sum(exp)\n\n\n@numba.njit()\ndef sigma(x_):\n \"\"\"\n Implements the logistic function.\n \"\"\"\n return 1 / (1 + np.exp(-x_))\n\n\n@numba.njit()\ndef sigma_prime(x_):\n \"\"\"\n Implements the derivative of the logistic function.\n \"\"\"\n s = sigma(x_)\n return s * (1 - s)\n\n\n@numba.njit()\ndef relu(x_):\n \"\"\"Implements the ReLU activation function. Not used.\"\"\"\n return np.maximum(x_, 0)\n\n\n@numba.njit()\ndef relu_prime(x_):\n \"\"\"Implements the ReLU derivative. Not used.\"\"\"\n return np.minimum(np.maximum(x_, 0), 1)\n\n\n@numba.njit()\ndef predict(x_, w_, b_):\n \"\"\"\n Calculates the predictions y_hat = w_ @ x_ + b.\n \"\"\"\n return x_ @ w_ + b_\n\n\ndef l1_prime(weight):\n \"\"\"Implements L1 regularisation.\"\"\"\n return np.where(weight >= 0, 1.0, -1.0)\n\n\ndef l2_prime(weight):\n \"\"\"Implements L2 regularisation.\"\"\"\n return weight\n\n\ndef forward_pass(xs, weights, return_prediction=False, activation=sigma):\n \"\"\"\n Calculates one forward pass of the model. If return_predictions=True only the predictions of the model will be\n returned.\n \"\"\"\n # First forward dict contains outputs, second forward dict contains outputs after feeding them through a logistic\n # function.\n forward_dicts = [{}, {}]\n prev_output = xs\n # Loop through layers, calculate outputs and feed them through activation functions.\n for i in range(len(weights)):\n weight, bias = weights[i]\n forward_dicts[0][i + 1] = predict(prev_output, weight, bias)\n if i == len(weights) - 1:\n prev_output = forward_dicts[1][i + 1] = np.array(\n [softmax(forward_dicts[0][i + 1][j]) for j in range(forward_dicts[0][i + 1].shape[0])])\n else:\n prev_output = forward_dicts[1][i + 1] = activation(forward_dicts[0][i + 1])\n if return_prediction:\n return prev_output\n return forward_dicts\n\n\ndef backward_pass(xs, ys, weights, forward_dicts, activation_derivative=sigma_prime, l1_reg=0.0, l2_reg=0.0):\n \"\"\"\n Calculates one backward pass of the model.\n \"\"\"\n num_layers = len(forward_dicts[0])\n gradients = []\n y_hat = forward_dicts[1][num_layers]\n ys = np.eye(y_hat.shape[1])[ys]\n # Delta for last layer.\n delta = (1.0 / ys.shape[0]) * (y_hat - ys)\n for i in range(num_layers - 1, -1, -1):\n # Loop through the layers and calculate the gradients for each weight/bias vector.\n prev_output = forward_dicts[1][i] if i >= 1 else xs\n weight_gradient = prev_output.T @ delta + l1_reg * l1_prime(weights[i][0]) + l2_reg * l2_prime(weights[i][0])\n bias_gradient = np.sum(delta, axis=0)\n gradients.append((weight_gradient, bias_gradient))\n if i != 0:\n delta = (delta @ weights[i][0].T) * activation_derivative(forward_dicts[0][i])\n return gradients\n\n\ndef initialise_weights(layers):\n \"\"\"\n Randomly initialise the weights specified by layers.\n \"\"\"\n weights = []\n for i in range(len(layers)):\n input_dim, output_dim = layers[i]\n weight = np.random.randn(input_dim, output_dim) / 100\n bias = np.random.randn(output_dim) / 100\n weights.append((weight, bias))\n return weights\n\n\ndef analytical_gradients(xs, ys, weights, activation=sigma, activation_derivative=sigma_prime, l1_reg=0.0, l2_reg=0.0):\n \"\"\"\n Calculate the analytical gradients of the model.\n \"\"\"\n forward_dict = forward_pass(xs, weights, activation=activation)\n gradients = backward_pass(xs, ys, weights, forward_dict, activation_derivative=activation_derivative, l1_reg=l1_reg,\n l2_reg=l2_reg)\n return gradients, forward_dict\n\n\ndef calculate_error_loss(xs, weights, true):\n \"\"\"\n Calculate the error as well as the loss for a given dataset (xs, true) and weights.\n \"\"\"\n predictions = forward_pass(xs, weights, return_prediction=True)\n ys = np.eye(predictions.shape[1])[true]\n loss = cross_entropy_loss(ys, predictions) / predictions.shape[0]\n error = (predictions.argmax(axis=1) != true).sum() / true.size * 100.0\n return loss, error\n\n\ndef update_weights(weights, gradients, learning_rate, layer_count, momentum, prev_gradients):\n \"\"\"\n Updates the weights of all of the layers for given gradients.\n :param weights: The weights to be updated.\n :param gradients: The gradients w.r.t. the error.\n :param learning_rate: The learning rate.\n :param layer_count: The number of layers in the model.\n :param momentum: The momentum coefficient. Set to 0 to disable momentum.\n :param prev_gradients: The previous gradients (used for momentum).\n :return: The updated weights.\n \"\"\"\n for i in range(len(gradients)):\n weight_gradient, bias_gradient = gradients[i]\n prev_weight_gradient, prev_bias_gradient = prev_gradients[i]\n weight, bias = weights[layer_count - i - 1]\n weight -= learning_rate * weight_gradient + momentum * prev_weight_gradient\n bias -= learning_rate * bias_gradient + momentum * prev_bias_gradient\n weights[layer_count - i - 1] = (weight, bias)\n return weights\n\n\ndef train_mlp(xs, ys, epochs, learning_rate, layers, optimizer=update_weights, batching=\"Full\", batch_size=0,\n momentum=0.0, l1_reg=0.0, l2_reg=0.0, return_metrics=False, return_best_weights=False, print_metrics=True,\n test_xs=None, test_ys=None):\n \"\"\"\n Trains a multi-layer perceptron. The training is performed by gradient descent and backpropagation. The training\n supports different batch modes ('Full', 'Mini', and 'SGD'). The parameter batch_size specifies the size of a single\n batch for the batch mode 'Mini' (otherwise the parameter is ignored). The momentum parameter specifies the momentum\n coefficient. To disable momentum the parameter can be set to 0.0 (default). The parameters l1_reg and l2_reg control\n the regularisation coefficients. To disable regularisation set the parameters to 0.0 (default).\n :param xs: Training data.\n :param ys: Training targets.\n :param epochs: The number of epochs the training should be run for.\n :param learning_rate: The learning rate for the weight updates.\n :param layers: The specification of the layers. An iterable containing tuples of (input_dimension, output_dimension)\n are expected. Each tuple specifies a single layer in the network.\n :param optimizer: Function to perform the weight update.\n :param batching: Batch mode. Either 'Full' (default), 'Mini', or SGD.\n :param batch_size: The size of a single batch. Only relevant for batch mode 'Mini'.\n :param momentum: The momentum coefficient. Set to 0 to disable momentum (default).\n :param l1_reg: L1 regularisation coefficient.\n :param l2_reg: L2 regularisation coefficient.\n :param return_metrics: Returns a dictionary containing all the training/validation accuracies and losses per epoch.\n :param return_best_weights: Return the best weights (defined by the highest validation accuracy).\n :param print_metrics: Print the metrics during training.\n :param test_xs: Test data.\n :param test_ys: Test labels.\n :return: weights [list], (metrics [dict])\n \"\"\"\n weights = initialise_weights(layers)\n best_weights, best_error = weights, 100.0\n y_hat_alias, layer_count = f\"h{len(layers)}_sigma\", len(layers)\n metrics = {\"train_loss\": np.zeros(epochs), \"train_err\": np.zeros(epochs), \"test_loss\": np.zeros(epochs),\n \"test_err\": np.zeros(epochs)}\n prev_gradients = [(np.zeros_like(weight[0]), np.zeros_like(weight[1])) for weight in weights]\n # Reverse order of initial prev_gradients (based on weight shapes) as the gradients start from the last layer.\n prev_gradients.reverse()\n # Split the training data into full/mini/SGD batches.\n if batching != \"Full\":\n if batching == \"Mini\" and batch_size > 0:\n # Split into batches of size batch_size with the remainder being omitted.\n x_batches = np.array_split(xs, np.arange(batch_size, xs.shape[0], batch_size))\n y_batches = np.array_split(ys, np.arange(batch_size, ys.shape[0], batch_size))\n elif batching == \"SGD\":\n # Split into batches of size 1. This allows reusing the same training code for mini-batches.\n x_batches = np.hsplit(xs, xs.shape[1])\n y_batches = np.hsplit(ys, ys.shape[1])\n else:\n raise ValueError(\"Parameter batching must be one either 'Full', 'Mini' or 'SGD'.\")\n # Training\n for epoch in range(epochs):\n if batching != \"Full\":\n for i in range(len(x_batches)):\n # Train on mini-batches. prev_gradients is only used if momentum > 0.0.\n gradients, forward_dict = analytical_gradients(x_batches[i], y_batches[i], weights, l1_reg=l1_reg,\n l2_reg=l2_reg)\n weights = optimizer(weights, gradients, learning_rate, layer_count, momentum, prev_gradients)\n prev_gradients = gradients\n else:\n # Train on full batch. prev_gradients is only used if momentum > 0.0.\n gradients, forward_dict = analytical_gradients(xs, ys, weights)\n weights = optimizer(weights, gradients, learning_rate, layer_count, momentum, prev_gradients)\n prev_gradients = gradients\n # Calculate error and loss on the training and test (if present) sets.\n train_loss, train_error = calculate_error_loss(xs, weights, ys)\n metrics[\"train_err\"][epoch] = train_error\n metrics[\"train_loss\"][epoch] = train_loss\n if train_error < best_error:\n # Save best weights.\n best_weights = copy.deepcopy(weights)\n best_error = train_error\n if test_xs is not None and test_ys is not None:\n test_loss, test_error = calculate_error_loss(test_xs, weights, test_ys)\n metrics[\"test_err\"][epoch] = test_error\n metrics[\"test_loss\"][epoch] = test_loss\n if print_metrics:\n print(\n f\"Epoch {epoch} - Training loss {train_loss} - Training error {train_error} - Test loss {test_loss}\"\n f\" - Test error {test_error}\")\n reset_seed()\n if return_best_weights:\n weights = best_weights\n if return_metrics:\n return weights, metrics\n return weights\n","sub_path":"digit_classification/mlp.py","file_name":"mlp.py","file_ext":"py","file_size_in_byte":10933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"598431431","text":"#! /usr/bin/env python\n\nimport re\n\nclass MarkdownHelper(object):\n\n\trulesRegex = [\n\t\t# tables\n\t\t(r'((?:([^\\r\\n|]*)\\|)+(?:([^\\r\\n|]*)))\\r?\\n(?:( ?:?-+:? ?)\\|)+(?:( ?:?-+:? ?))\\r?\\n(((?:([^\\r\\n|]*)\\|)+(?:([^\\r\\n|]*))\\r?\\n)+)', \"\\n
\")\n\t\t# treatment on TD part Table\n\t\tfullText = re.sub(r'{{{{TD}}}}\\s*[|]?((.|\\n)*?)\\n?[|]?\\s*{{{{TD}}}}', replaceTD, fullText, flags=re.MULTILINE)\n\t\n\t\treturn fullText\n\n\n#Do for tests\nif __name__ == '__main__':\n\n\ttest = \"\"\"\n\t\t\nThis is a simple test for MardownHelper\n\n***\n\n# Title1\n\nunder the title 1\n\n## Title 2\n\nunder thte title 2\n\n\nList:\n\n- l1\n- l2\n- l3\n- l4\n\n\n\"\"\"\n\n\t# historic calling\n\t#with open('../tests.md', 'r') as myfile:\n\t#\ttest = myfile.read()\n\tfrom pathlib import Path\n\ttxt = Path('../tests.md').read_text()\n\n\ttransformedText = MarkdownHelper.transformMdToHtml(txt)\n\tprint(transformedText)","sub_path":"python/MarkdownHelper.py","file_name":"MarkdownHelper.py","file_ext":"py","file_size_in_byte":4085,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"312198016","text":"###\n# PASA Confidentiality Notice:\n# This source code and information contained herewith may be legally privileged and confidential\n# Any dissemination, distribution or copy of this source code is strictly prohibited.\n#\n# Copyright (C) 2019, Panasonic Automotive Systems Company of America\n# All Rights Reserved\n#\n#\n# @file: email_test.py\n#\n# @author: Panasonic, developer\n#\n##\n\nimport unittest\n\nfrom unittest.mock import patch, MagicMock\n\nfrom sns2email.email import EmailSender, CHARSET\n\n\nclass TestEmailSender(unittest.TestCase):\n @patch(\"sns2email.email.os\")\n def test_email_create(self, os):\n sender = \"test_sender\"\n recipient = \"test_recipient\"\n os.getenv.side_effect = [sender, recipient]\n\n email = EmailSender()\n\n self.assertEqual(email.sender, sender)\n self.assertEqual(email.recipient, recipient)\n\n @patch(\"sns2email.email.boto3\")\n @patch(\"sns2email.email.os\")\n def test_email_send(self, os, boto3):\n subject = \"test_subject\"\n body = \"test_body\"\n sender = \"test_sender\"\n recipient = \"test_recipient\"\n os.getenv.side_effect = [sender, recipient]\n ses_client_mock = MagicMock()\n boto3.client.side_effect = [ses_client_mock]\n\n email = EmailSender().send(subject=subject, body=body)\n\n ses_client_mock.send_email.assert_called_once_with(Destination={'ToAddresses': [recipient]}, Message={'Body': {'Text': {'Charset': 'UTF-8', 'Data': body}}, 'Subject': {'Charset': 'UTF-8', 'Data': subject}}, Source=sender)\n","sub_path":"lambda/sns2email/tests/email_test.py","file_name":"email_test.py","file_ext":"py","file_size_in_byte":1553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"146387472","text":"# -*- coding: utf-8 -*-\n#this is a module containing all the relationship-related functions.\nimport hashlib\nimport re\nimport configparser\nimport json\nimport os\n\nhash = hashlib.md5()\n\nconfig = configparser.ConfigParser()\nconfigPath = './config/graph.cfg'\nconfig.read(configPath)\n\nunwantedWords = config['relationship']['unwanted'].split(',')\n#print(unwantedWords)\n\nconfigPath = './config/data.cfg'\nconfig.read(configPath)\ndataDir = config['data']['dataDir']\n\nif os.path.isfile(dataDir + 'graph/relationships/relationships.txt'):\n with open(dataDir + 'graph/relationships/relationships.txt', 'r') as g:\n data = [item.strip() for item in g.readlines()]\nelse:\n data = []\n\n\n#print(data)\n\ndef formatRel(rel, unwantedWords):\n for word in unwantedWords:\n rel = re.sub(word + ' ','',rel)\n return rel\n\n\ndef createRelationshipFromDict(dictionary):\n start = dictionary['start']\n end = dictionary['end']\n relationship = formatRel(dictionary['relationship'], unwantedWords)\n \n sentence = start + '---' + relationship +'---' + end\n sentence = sentence.replace('/','-')\n# hash.update(sentence.encode('utf-8'))\n# h = hash.hexdigest()\n \n with open(dataDir + 'graph/relationships/relationships.txt', 'a') as f:\n js = json.dumps(dictionary)\n if js in data:\n print('Relationship already existed: ', start , ' -[', relationship, ']-> ', end)\n return None\n print(js, file=f)\n print('Relationship created: ', start , ' -[', relationship, ']-> ', end)\n\n\ndef createRelationshipFromCategoryBox(catbox):\n #catbox['main'] is a child node of categories.\n main = catbox['main']\n categories = catbox['categories']\n\n relationships = [{'start': category['name'], 'end': main, 'relationship': 'subcategory', 'source':'catbox'} for category in categories]\n [createRelationshipFromDict(relationship) for relationship in relationships]\n \ndef createRelationshipFromCategoryPage(catPage):\n try:\n main = catPage['main']['name']\n children = catPage['child']\n relationships = [{'start':main, 'end':child['name'], 'relationship':'subcategory','source':'catpage'} for child in children]\n [createRelationshipFromDict(relationship) for relationship in relationships]\n except TypeError:\n pass\n\ndef createRelationshipFromNavbox(table):\n tb = table['navbox']\n mainNode = table['main']\n relationships = []\n name = mainNode['name']\n try:\n links = mainNode['links']\n except (KeyError, AttributeError):\n links = None\n \n def createRelationship(layer, rel = '', z=''):\n if type(layer) == list:\n for item in layer:\n R = rel\n Z = z\n Z = Z.split('---')\n #Z is now an array\n #Remove empty elem\n Arr = []\n for e in Z:\n if e != '':\n Arr.append(formatRel(e, unwantedWords))\n \n relationship = {'start': name, 'end':item['name'], 'relationship': formatRel(R, unwantedWords), 'source':'navbox', 'relLabel': Arr}\n relationships.append(relationship)\n #createSimpleNode(item['name'])\n else:\n for key in layer:\n #print([key, rel])\n if key != rel:\n R = key\n Z = z + '---' + key\n else:\n R = rel\n Z = z\n createRelationship(layer[key], R, Z)\n #print('Created nodes from navbox layer: ', layer)\n \n createRelationship(tb)\n \n if links:\n if len(links) > 1:\n mainLinkRelationships = []\n mainLinkRelationships = []\n for link in links:\n #create relationships between links and childNodes\n linkChildRelationships = []\n mainLinkRelationship = {'start': name, 'end':link['name'], 'relationship': 'related concept', 'source':'navtitle'}\n mainLinkRelationships.append(mainLinkRelationship)\n for relationship in relationships:\n linkChildRelationship = {'start': link['name'], 'end':relationship['end'], 'relationship': relationship['relationship'], 'source':'navtitle'}\n linkChildRelationships.append(linkChildRelationship)\n #create relationships between mainNode and links\n \n \n \n relationships = relationships + linkChildRelationships + mainLinkRelationships\n print('Created relationships from navbox') \n [createRelationshipFromDict(relationship) for relationship in relationships]\n return relationships\n \n#createRelationshipFromCategoryBox(catbox)\n#createRelationshipFromCategoryPage(categoryPage)\n#rel = createRelationshipFromNavbox(tables[2])\n#print(rel)","sub_path":"graph/relationship.py","file_name":"relationship.py","file_ext":"py","file_size_in_byte":4918,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"353456049","text":"# encoding: UTF-8\n\nfrom __future__ import print_function\nimport json\nimport requests\nfrom socketIO_client import SocketIO\nfrom threading import Thread\nfrom queue import Queue, Empty\n\n\n########################################################################\nclass FxcmApi(object):\n \"\"\"FXCM\"\"\"\n API_URL = 'https://api-demo.fxcm.com:443'\n WEBSOCKET_PORT = 443\n METHOD_GET = 'get'\n METHOD_POST = 'post'\n \n MODEL_OFFER = 'Offer'\n MODEL_ACCOUNT = 'Account'\n MODEL_ORDER = 'Order'\n MODEL_OPENPOSITION = 'OpenPosition'\n MODEL_SUMMARY = 'Summary'\n MODEL_PROPERTIES = 'Properties'\n MODEL_CLOSEDPOSITION = 'ClosedPosition'\n\n #----------------------------------------------------------------------\n def __init__(self):\n \"\"\"Constructor\"\"\"\n self.url = ''\n self.port = ''\n self.token = ''\n self.proxy = ''\n \n self.sio = None\n self.bearer = ''\n self.headers = None\n \n self.queue = Queue()\n self.reqid = 0\n self.active = False\n self.reqThread = None\n self.sioThread = None\n \n #----------------------------------------------------------------------\n def connect(self, url, port, token, proxy=''):\n \"\"\"连接\"\"\"\n self.url = url\n self.port = port\n self.token = token\n self.proxy = proxy\n \n self.active = True\n \n self.reqThread = Thread(target=self.runReq)\n self.reqThread.start()\n \n self.sioThread = Thread(target=self.runSio)\n self.sioThread.start()\n \n #----------------------------------------------------------------------\n def stop(self):\n \"\"\"停止\"\"\"\n if self.active:\n self.active = False\n self.reqThread.join()\n \n self.sio._close()\n self.sioThread.join()\n \n #----------------------------------------------------------------------\n def initSocketIO(self):\n \"\"\"初始化SocketIO客户端\"\"\"\n params = {\n 'access_token': self.token, \n 'agent': \"leiwang-rest-api\"\n }\n \n proxy = {}\n if self.proxy:\n proxy['https'] = self.proxy \n \n self.sio = SocketIO(self.url, self.port, params=params, proxies=proxy)\n \n self.sio.on('connect', self.onConnect)\n self.sio.on('disconnect', self.onDisconnect)\n \n #----------------------------------------------------------------------\n def generateBearer(self):\n \"\"\"创建通讯授权码\"\"\"\n self.bearer = \"Bearer \" + self.sio._engineIO_session.id + self.token\n \n #----------------------------------------------------------------------\n def generateHeaders(self):\n \"\"\"生成通讯头部\"\"\"\n self.headers = {\n 'User-Agent': 'request',\n 'Authorization': self.bearer,\n 'Accept': 'application/json',\n 'Content-Type': 'application/x-www-form-urlencoded'\n }\n \n #----------------------------------------------------------------------\n def runReq(self):\n \"\"\"处理主动请求\"\"\"\n while self.active:\n try:\n d = self.queue.get(timeout=1)\n self.processReq(d)\n except Empty:\n pass\n \n #----------------------------------------------------------------------\n def runSio(self):\n \"\"\"处理回调数据\"\"\"\n self.initSocketIO()\n self.generateBearer()\n self.generateHeaders() \n self.sio.wait()\n \n #----------------------------------------------------------------------\n def sendReq(self, method, uri, params, callback):\n \"\"\"发出请求\"\"\"\n self.reqid += 1\n \n d = {\n 'method': method,\n 'uri': uri,\n 'params': params,\n 'callback': callback,\n 'reqid': self.reqid\n }\n \n self.queue.put(d)\n \n return self.reqid\n \n #----------------------------------------------------------------------\n def processReq(self, d):\n \"\"\"处理请求\"\"\"\n method = d['method']\n uri = d['uri']\n params = d['params']\n callback = d['callback']\n reqid = d['reqid']\n \n url = self.url + uri\n \n proxy = {}\n if self.proxy:\n proxy['https'] = self.proxy\n \n if method == self.METHOD_GET:\n resp = requests.get(url, headers=self.headers, params=params, proxies=proxy)\n elif method == self.METHOD_POST:\n resp = requests.post(url, headers=self.headers, data=params, proxies=proxy)\n \n if resp.status_code == 200:\n data = resp.json()\n if data[\"response\"][\"executed\"] is True:\n callback(data, reqid)\n return\n if 'response' in data:\n self.onError(data[\"response\"][\"error\"], reqid)\n else:\n self.onError(u'HTTP请求失败,错误代码%s' %resp.status_code)\n \n #----------------------------------------------------------------------\n def getInstruments(self):\n \"\"\"查询合约代码\"\"\"\n uri = '/trading/get_instruments'\n reqid = self.sendReq(self.METHOD_GET, uri, {}, self.onGetInstruments)\n return reqid\n \n #----------------------------------------------------------------------\n def getModel(self, model):\n \"\"\"查询表\"\"\"\n uri = '/trading/get_model'\n params = {'models': model}\n reqid = self.sendReq(self.METHOD_GET, uri, params, self.onGetModel)\n return reqid \n \n #----------------------------------------------------------------------\n def subscribe(self, symbol):\n \"\"\"订阅行情\"\"\"\n uri = '/subscribe'\n params = {'pairs': symbol}\n reqid = self.sendReq(self.METHOD_POST, uri, params, self.onSubscribe)\n self.sio.on(symbol, self.processPriceUpdate)\n return reqid\n \n #----------------------------------------------------------------------\n def unsubscribe(self, symbol):\n \"\"\"退订行情\"\"\"\n uri = '/unsubscribe'\n params = {'pairs': symbol}\n reqid = self.sendReq(self.METHOD_POST, uri, params, self.onUnsubscribe)\n return reqid \n \n #----------------------------------------------------------------------\n def subscribeModel(self, model):\n \"\"\"订阅表\"\"\"\n uri = '/trading/subscribe'\n params = {'models': model}\n reqid = self.sendReq(self.METHOD_POST, uri, params, self.onSubscribeModel)\n self.sio.on(model, self.processModelUpdate)\n return reqid\n \n #----------------------------------------------------------------------\n def unsubscribeModel(self, model):\n \"\"\"退订表\"\"\"\n uri = '/trading/unsubscribe'\n params = {'models': model}\n reqid = self.sendReq(self.METHOD_POST, uri, params, self.onUnsubscribeModel)\n return reqid \n \n #----------------------------------------------------------------------\n def updateSubscriptions(self, symbol):\n \"\"\"订阅报价表\"\"\"\n uri = '/trading/update_subscriptions'\n params = {\n 'symbol': symbol,\n 'visible': 'true'\n }\n #params = {'symbol': symbol} \n reqid = self.sendReq(self.METHOD_POST, uri, params, self.onUpdateSubscriptions)\n return reqid \n \n #----------------------------------------------------------------------\n def openTrade(self, accountID, symbol, isBuy, amount,\n atMarket, orderType, timeInForce,\n rate=0, limit=0, stop=0, \n trailingStep=0, isInPips=False):\n \"\"\"市价开仓交易\"\"\"\n uri = '/trading/open_trade'\n params = {\n 'account_id': accountID,\n 'symbol': symbol,\n 'is_buy': isBuy,\n 'amount': amount,\n 'at_market': atMarket,\n 'order_type': orderType,\n 'time_in_force': timeInForce,\n 'is_in_pips': isInPips\n }\n \n if rate:\n params['rate'] = rate\n \n if rate:\n params['limit'] = limit\n \n if stop:\n params['stop'] = stop\n \n if trailingStep:\n params['trailing_step'] = trailingStep\n \n reqid = self.sendReq(self.METHOD_POST, uri, params, self.onOpenTrade)\n return reqid \n \n #----------------------------------------------------------------------\n def createEntryOrder(self, accountID, symbol, isBuy, rate, \n amount, orderType, timeInForce,\n limit=0, stop=0, trailingStep=0, isInPips=False):\n \"\"\"限价开仓交易\"\"\"\n uri = '/trading/create_entry_order'\n \n params = {\n 'account_id': accountID,\n 'symbol': symbol,\n 'is_buy': isBuy,\n 'rate': rate,\n 'amount': amount,\n 'order_type': orderType,\n 'time_in_force': timeInForce,\n 'is_in_pips': isInPips\n }\n \n if rate:\n params['limit'] = limit\n \n if stop:\n params['stop'] = stop\n \n if trailingStep:\n params['trailing_step'] = trailingStep\n \n reqid = self.sendReq(self.METHOD_POST, uri, params, self.onOpenTrade)\n return reqid\n \n #----------------------------------------------------------------------\n def closeTrade(self, tradeID, amount, atMarket, orderType, timeInForce, rate=0):\n \"\"\"平仓交易\"\"\"\n uri = '/trading/close_trade'\n params = {\n 'trade_id': tradeID,\n 'amount': amount,\n 'at_market': atMarket,\n 'order_type': orderType,\n 'time_in_force': timeInForce\n }\n \n if rate:\n params['rate'] = rate\n\n reqid = self.sendReq(self.METHOD_POST, uri, params, self.onCloseTrade)\n return reqid \n \n #----------------------------------------------------------------------\n def changeOrder(self, orderID, rate, range_, amount, trailingStep=0):\n \"\"\"修改委托\"\"\"\n uri = '/trading/change_order'\n params = {\n 'order_id': orderID,\n 'rate': rate,\n 'range': range_,\n 'amount': amount\n }\n \n if trailingStep:\n params['trailing_step'] = trailingStep\n \n reqid = self.sendReq(self.METHOD_POST, uri, params, self.onChangeOrder)\n return reqid \n \n #----------------------------------------------------------------------\n def deleteOrder(self, orderID):\n \"\"\"撤销委托\"\"\"\n uri = '/trading/delete_order'\n params = {'order_id': orderID}\n reqid = self.sendReq(self.METHOD_POST, uri, params, self.onDeleteOrder)\n return reqid \n \n #----------------------------------------------------------------------\n def onConnect(self):\n \"\"\"连接回调\"\"\"\n print('onConnect')\n \n #----------------------------------------------------------------------\n def onDisconnect(self):\n \"\"\"断开回调\"\"\"\n print('onClose')\n \n #----------------------------------------------------------------------\n def onError(self, error, reqid):\n \"\"\"错误回调\"\"\"\n print('onError', error)\n \n #----------------------------------------------------------------------\n def onGetInstruments(self, data, reqid):\n \"\"\"查询合约代码回调\"\"\"\n print(data, reqid)\n \n #----------------------------------------------------------------------\n def onGetModel(self, data, reqid):\n \"\"\"查询表回调\"\"\"\n print('*' * 30)\n print(data)\n for d in data['offers']:\n #if str(d['currency']) == 'EUR/USD':\n # print d\n print(d['currency'])#, d['visible']\n #print len(data['summary'])\n #print data\n \n \n #----------------------------------------------------------------------\n def onSubscribe(self, data, reqid):\t\n \"\"\"订阅行情回调\"\"\"\n print(data, reqid) \n \n #----------------------------------------------------------------------\n def onUnsubscribe(self, data, reqid):\n \"\"\"退订行情回调\"\"\"\n print(data, reqid) \n \n #----------------------------------------------------------------------\n def onSubscribeModel(self, data, reqid):\n \"\"\"订阅表回调\"\"\"\n print(data, reqid) \n \n #----------------------------------------------------------------------\n def onUnsubscribeModel(self, data, reqid):\n \"\"\"退订表回调\"\"\"\n print(data, reqid) \n \n #----------------------------------------------------------------------\n def onUpdateSubscriptions(self, data, reqid):\n \"\"\"订阅报价表回调\"\"\"\n print(data, reqid)\n \n #----------------------------------------------------------------------\n def onOpenTrade(self, data, reqid):\n \"\"\"开仓回调\"\"\"\n print(data, reqid)\n \n #----------------------------------------------------------------------\n def onCloseTrade(self, data, reqid):\n \"\"\"平仓回调\"\"\"\n print(data, reqid) \n \n #----------------------------------------------------------------------\n def onChangeOrder(self, data, reqid):\n \"\"\"改单回调\"\"\"\n print(data, reqid) \n\n #----------------------------------------------------------------------\n def onDeleteOrder(self, data, reqid):\n \"\"\"撤单回调\"\"\"\n print(data, reqid) \n \n #----------------------------------------------------------------------\n def processPriceUpdate(self, msg):\n \"\"\"行情推送\"\"\"\n data = json.loads(msg)\n self.onPriceUpdate(data)\n \n #----------------------------------------------------------------------\n def processModelUpdate(self, msg):\n \"\"\"表推送\"\"\"\n print(msg)\n data = json.loads(msg)\n self.onModelUpdate(data)\n \n #----------------------------------------------------------------------\n def onPriceUpdate(self, data):\n \"\"\"行情推送\"\"\"\n print(data)\n \n #----------------------------------------------------------------------\n def onModelUpdate(self, data):\n \"\"\"表推送\"\"\"\n print(data)\n #print '*' * 30\n #fsubscribeModel\n #print len(data), data.get('isTotal', None), data\n #print '*' * 30\n #for d in data:\n # print d\n\n ","sub_path":"vnpy/api/fxcm/vnfxcm.py","file_name":"vnfxcm.py","file_ext":"py","file_size_in_byte":14802,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"560443627","text":"def isIn(c, temp):\n for i in range(0, len(temp)):\n if c == temp[i]:\n return True\n return False\n\n\ndef removeRepeat(s):\n rmv = []\n for i in range(0, len(s)):\n if isIn(s[i], rmv):\n continue\n else:\n rmv.append(s[i])\n return rmv\n\n\ndef checkIsogram(s):\n rmv = removeRepeat(s)\n if len(rmv) == len(s):\n return True\n else:\n return False\n\n\nt = int(input())\nfor i in range(0, t):\n inp = str(input())\n s = inp\n if checkIsogram(s):\n print(1)\n else:\n print(0)","sub_path":"Code/CodeRecords/2559/60673/300507.py","file_name":"300507.py","file_ext":"py","file_size_in_byte":563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"69703121","text":"# SPDX-FileCopyrightText: 2021 Kevin Matocha\n#\n# SPDX-License-Identifier: MIT\n\n\"\"\"\n`Animation`\n================================================================================\nCircuitPython Animation Class to make it easy to move around displayio and\nvectorio graphical elements.\n\n* Author(s): Kevin Matocha\n\nImplementation Notes\n--------------------\n\n**Hardware:**\n\n**Software and Dependencies:**\n\n* Adafruit CircuitPython firmware for the supported boards:\n https://github.com/adafruit/circuitpython/releases\n\n\"\"\"\n\n# Animation class for use with displayio Groups\n\nfrom adafruit_displayio_layout.widgets.easing import linear_interpolation\n\n\n# pylint: disable=too-many-arguments, anomalous-backslash-in-string, invalid-name\n# pylint: disable=unused-argument, too-few-public-methods, useless-super-delegation\n\n\nclass Animation(list):\n \"\"\"An Animation class to make it easy to making moving animations with CircuitPython's\n displayio and vectorio graphical elements.\n\n After instancing an `Animation()` object, use `Animation.add_entry()` to add\n frame animation sections. Once all your animation entries are added, then perform\n the frame animation using `Animation.execute_frame()`.\n\n \"\"\"\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n\n def add_entry(self, group, frame_start, frame_end, function, **kwargs):\n \"\"\"Adds an animation entry into the Animation list.\n\n :param displayio.Group group: the displayio Group that will be animated\n within this frame range\n :param float frame_start: the starting frame position for this animation\n :param float frame_end: the ending frame position for this animation\n :param function: the name of the function to be called to mutate\n the ``group`` during the frame range\n :param \\*\\*kwargs: the additional arguments that should be passed to\n `function` when `Animation.execute_frame()` is called to trigger the\n animation to execute\n\n Note: See the definition of `Animation.execute_frame()` to understand\n what other parameters are sent to `function` during animations.\n\n \"\"\"\n\n myentry = Entry(\n group,\n frame_start,\n frame_end,\n function, # will be called function(position=position, arguments...)\n kwargs,\n )\n\n self.append(myentry)\n\n def execute_frame(self, frame):\n \"\"\"The function that performs the actual frame animation execution.\n\n This function searches through all ``Entry`` items that have been added to\n the Animation instance to determine if this frame is within the window of\n ``frame_start`` to ``frame_end``. If the requested frame is within the window,\n this calls the ``Entry.function`` with several \"internal\" parameters along\n with the additional \"user\" parameters that were input as additional arguments\n in the ``Animation.add_entry()`` function.\n\n The parameters that are sent to ``Entry.function()`` are:\n - float position: a value between 0.0 and 1.0 representing the current ``frame``\n distance within the window of ``frame_start`` and ``frame_end``. For example, if\n the current ``frame`` is equal to ``frame_start``, then ``position`` is 0.0.\n - displayio.Group group: the group to be animated using ``function``\n - int x0: the x-position of the group at ``frame_start``\n - int y0: the y-position of the group at ``frame_start``\n - float frame: the current frame that is being executed\n - float frame_start: the starting frame for this frame window\n - float frame_end: the ending frame for this frame window\n - \\*\\*kwargs: any other parameters that were defined in the Entry\n\n The ``function`` should be designed to ignore any unneeded input parameters by\n including ``**kwargs`` as one of the input parameters. This will cause the\n function to ignore excess arguments.\n\n Note: If a function requires the (x0,y0) values, you **must** initally perform\n ``Animation.execute_frame()`` at frame == frame_start. The ``Animation.execute_frame()``\n initializes the (x0, y0) values only when called with the value of ``frame_start``.\n\n Other Note: The frame window is \"exclusive\", so no animation is performed when\n ``frame == frame_end``.\n\n :param float frame: The frame to be displayed. Note: This is a float, so subframes\n can be animated.\n \"\"\"\n for entry in self:\n\n if frame == entry.frame_start: # initialize startx, starty\n entry.startx = entry.group.x\n entry.starty = entry.group.y\n\n if entry.frame_start <= frame < entry.frame_end:\n # This frame is within the entry frame range, so animate it\n\n # calculate a value between 0.0 and 1.0 to show the current frame's\n # position within this entry's frame range\n\n if (\n entry.frame_end - 1 - entry.frame_start\n ) <= 0: # prevent divide by zero\n position = 1.0\n else:\n position = (frame - entry.frame_start) / (\n entry.frame_end - 1 - entry.frame_start\n )\n entry.function(\n position=position,\n group=entry.group,\n x0=entry.startx,\n y0=entry.starty,\n frame=frame,\n frame_start=entry.frame_start,\n frame_end=entry.frame_end,\n **entry.kwargs,\n )\n\n\nclass Entry:\n \"\"\"This `Entry` class is a holder for the conditions that define an animated\n frame range. This holds the group, the \"augmentation\" function and arguments\n that are run at each call of `Animation.execute_frame`.\n\n Before running your loop with `Animation.execute_frame`, add all of your\n entries to the Animation object using `Animation.add_entry()`. Any excess\n arguments that are not handled by `Animation.execute_frame` will be passed\n to the `function` parameter (see notes on using the ``kwargs`` notation).\n\n Here is a code example. Append some display elements into ``group1``, create\n an Animation instance and then add an animation entry:\n\n .. code-block:: python\n\n group1=displayio.Group(max_size=1)\n\n animation=Animation()\n\n animation.add_entry(group=group1,\n frame_start=5, frame_end=20,\n function=translate,\n x1=50, y1=20, x2=50, y2=50,\n easing_function_x=quadratic_easeinout,\n easing_function_y=quadratic_easeinout)\n\n :param displayio.Group group: the group that is animated in this set of frames\n :param float frame_start: the starting frame for this animation\n :param float frame_end: the ending frame for this animation\n :param function: the function that mutates the group to cause the animation\n :param kwargs: a set of additional arguments that will be passed to the\n ``function`` during the animation\n \"\"\"\n\n def __init__(\n self,\n group,\n frame_start,\n frame_end,\n function,\n kwargs,\n ):\n self.group = group\n self.frame_start = frame_start\n self.frame_end = frame_end\n self.function = function\n self.kwargs = kwargs\n\n # Create placeholder instance variables, to store the initial\n # group's (x,y) position at the initial action frame\n self.startx = None\n self.starty = None\n\n\n#####################\n# Animation functions\n#####################\n# This is a starter set of functions that can be used with the Animation class\n#\n# The function can ignore some parameters that the ``execute_frame`` function sends.\n# Be sure to include ``**kwargs`` to the function input parameters\n# so the function will ignore all unused input parameters.\n#\n# Here are the parameters that ``execute_frame`` always sends to the function:\n# float position: a linear interpolation of the current frame's position between ``frame_start``\n# and ``frame_end``\n# displayio.Group group: the group in the Entry\n# int x0: initial x-position at the starting frame\n# int y0: initial y-position at the starting frame\n# float frame: the current frame\n# float frame_start: the starting frame of this animation entry\n# float frame_end: the ending frame of this animation entry (should be used as exclusive)\n# Other arguments that are defined in the `add_entry` call.\n\n\ndef translate(\n *,\n x1,\n y1,\n x2,\n y2,\n easing_function_x=linear_interpolation,\n easing_function_y=linear_interpolation,\n group,\n position,\n **kwargs\n):\n \"\"\"Performs a translation animation between two endpoints. Use two different\n easing functions to get all kinds of variety of cool motion.\n\n :param int x1: initial x-position of ``group``\n :param int y1: initial y-position of ``group``\n :param int x2: final x-position of ``group``\n :param int y2: final y-position of ``group``\n\n :param function easing_function_x: easing function that modifies the ``position`` value\n for the x-motion (default: linear_interpolation)\n :param function easing_function_y: easing function that modifies the ``position`` value\n for the y-motion (default: linear_interpolation)\n\n :param displayio.Group group: the display group that is sent to the ``function``. If using\n `Animation.execute_frame()` the group input parameter will be included automatically\n from the Entry.\n :param float position: float position: a linear interpolation of the current frame's\n position between ``frame_start`` and ``frame_end``. If using\n `Animation.execute_frame()` the ``position`` parameter will be included automatically.\n \"\"\"\n # including kwargs here is necessary to ignore excess arguments\n # user parameters: x1, y1, x2, y2, easing_function_x, easing_function_y\n # parameters handled from `execute_frame`: group, position\n\n group.x = round((x2 - x1) * easing_function_x(position)) + x1\n group.y = round((y2 - y1) * easing_function_y(position)) + y1\n\n\ndef translate_relative(\n *,\n delta_x,\n delta_y,\n easing_function_x,\n easing_function_y,\n group,\n x0,\n y0,\n position,\n **kwargs\n):\n \"\"\"Performs a relative translation animation between two endpoints. Use two different\n easing functions to get all kinds of variety of cool motion.\n\n Note: To use relative translations, be sure to run `execute_frame` at ``frame_start`` first\n so the initial (x0, y0) position is stored. For example, if you run the frames in reverse,\n you must run `execute_frame` at ``frame_start`` at least once initialize the initial (x0, y0)\n position.\n\n Special note: Relative translations can get complicated. If you want to tightly control\n predefined positions, then `translate` is the best approach. By combining overlapping\n relative translations, you can probably come up with all kinds of clever and confusing\n animations. Perhaps the `translate_relative` function is an avenue to create animated\n \"generative art\" projects.\n\n :param int x2: final x-position of ``group``\n :param int y2: final y-position of ``group``\n\n :param function easing_function_x: easing function that modifies the ``position`` value\n for the x-motion (default: linear_interpolation)\n :param function easing_function_y: easing function that modifies the ``position`` value\n for the y-motion (default: linear_interpolation)\n\n :param displayio.Group group: the display group that is sent to the ``function``. If using\n `Animation.execute_frame()` the ``group`` input parameter will be included automatically\n from the Entry.\n :param int x0: initial x-position of ``group``. If using `Animation.execute_frame()`\n the ``x0`` input parameter will be included automatically from the Entry.\n :param int y0: initial y-position of ``group``. If using `Animation.execute_frame()`\n the ``y0`` input parameter will be included automatically from the Entry.\n :param float position: float position: a linear interpolation of the current frame's\n position between ``frame_start`` and ``frame_end``. If using\n `Animation.execute_frame()` the ``position`` parameter will be included automatically.\n \"\"\"\n # including kwargs here is necessary to ignore excess arguments\n # user parameters: x2, y2, easing_function_x, easing_function_y\n # parameters handled from `execute_frame`: group, x0, y0, position\n group.x = round((delta_x) * easing_function_x(position)) + x0\n group.y = round((delta_y) * easing_function_y(position)) + y0\n\n\ndef wiggle(\n *,\n delta_x=0,\n delta_y=0,\n xsteps=None,\n ysteps=None,\n group,\n x0,\n y0,\n frame_start,\n frame,\n **kwargs\n):\n \"\"\"Performs a nervous wiggling animation around the starting point. To achieve a random-looking\n wiggle, set ``xsteps`` and ``ysteps`` to two different prime numbers.\n\n Note: To use `wiggle`, be sure to run `execute_frame` at ``frame_start`` first\n so the initial (x0, y0) position is stored. For example, if you run the frames in reverse,\n you must run `execute_frame` at ``frame_start`` at least once initialize the initial (x0, y0)\n position.\n\n :param int delta_x: amount of x-movement, in pixels (default = 0)\n :param int delta_y: amount of y-movement, in pixels (default = 0)\n :param int xsteps: number of frame steps it takes to make a full x-direction wiggle\n :param int ysteps: number of frame steps it takes to make a full y-direction wiggle\n :param displayio.Group group: the display group that is sent to the ``function``. If using\n `Animation.execute_frame()` the ``group`` input parameter will be included automatically\n from the Entry.\n :param int x0: initial x-position of ``group``. If using `Animation.execute_frame()`\n the ``x0`` input parameter will be included automatically from the Entry.\n :param int y0: initial y-position of ``group``. If using `Animation.execute_frame()`\n the ``y0`` input parameter will be included automatically from the Entry.\n :param float frame_start: the starting frame of this animation entry. If using\n `Animation.execute_frame()` the ``frame_start`` parameter will be included automatically\n from the Entry.\n :param float frame: the current frame being animated. If using\n `Animation.execute_frame()` the ``frame`` parameter will be included automatically.\n \"\"\"\n\n # including kwargs here is necessary to ignore excess arguments\n # user parameters: delta_x, delta_y, xsteps, ysteps\n # parameters handled from `execute_frame`: group, x0, y0, position, frame_start, frame\n\n if (xsteps is not None) and (delta_x != 0):\n xpositions = (\n list(range(xsteps // 2))\n + list(range(xsteps // 2 - 2, -1 * xsteps // 2, -1))\n + list(range(-1 * xsteps // 2 + 2, 0))\n )\n group.x = x0 + round(\n delta_x / xsteps * xpositions[int((frame - frame_start) % len(xpositions))]\n )\n\n if (ysteps is not None) and (delta_y != 0):\n ypositions = (\n list(range(ysteps // 2))\n + list(range(ysteps // 2 - 2, -1 * ysteps // 2, -1))\n + list(range(-1 * ysteps // 2 + 2, 0))\n )\n group.y = y0 + round(\n delta_y / ysteps * ypositions[int((frame - frame_start) % len(ypositions))]\n )\n","sub_path":"displayio_animation.py","file_name":"displayio_animation.py","file_ext":"py","file_size_in_byte":15621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"372515421","text":"from Graph import Graph\nimport random\n\n# Recursive function to calculate the ordering of the vertices\ndef recursive_DFS(G,vertex):\n stack = []\n G.mark_visited(vertex)\n # For every unvisited neighbour of the chosen vertex, DFS is run on\n # them\n for neighbour in G.unvisited_neighbours(vertex):\n dict = G.get_unvisited_dict()\n if neighbour in dict.keys():\n stack.extend(recursive_DFS(G,neighbour))\n stack.append(vertex)\n return stack\n\n# Function to calculate the component sizes by going through the given graph \n# in the order of the stack\ndef DFS_reversed(G,stack):\n component_sizes = []\n num_of_vertices = len(G.all_vertices())\n # Starting from the last vertex in the stack, we use DFS to find the strongly\n # connected components of the graph and append the size of the component to \n # the list of component sizes\n while(len(G.unvisited_vertices()) != 0):\n vertex = stack.pop()\n if vertex in G.unvisited_vertices():\n component_sizes.append(len(recursive_DFS(G,vertex)))\n return component_sizes\n\n# Function to compute the component sizes of the strongly connected \n# components of the given graph\ndef SCC(G,G_rev):\n # We choose a start vertex randomly and use DFS to order the vertices in\n # the first pass\n start_vertex = random.choice(G.unvisited_vertices())\n stack = recursive_DFS(G,start_vertex)\n while(len(G.unvisited_vertices()) != 0):\n stack.extend(recursive_DFS(G,random.choice(G.unvisited_vertices())))\n component_sizes = []\n # Using the ordering given by the algorithm in the first pass, we use \n # reversed DFS to find the component sizes\n component_sizes = DFS_reversed(G_rev,stack)\n return component_sizes","sub_path":"StronglyConnectedComponents/SCC.py","file_name":"SCC.py","file_ext":"py","file_size_in_byte":1750,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"111286347","text":"'''\nCreated on Apr 2, 2016\n\n@author: Edielson\n'''\n\nimport csv\nimport numpy as np\nimport struct \n\ndef LoadCSV(fileName,numFeatures):\n \n with open(fileName, 'rb') as csvfile:\n dataReader = csv.reader(csvfile, delimiter=',', quotechar='|')\n if numFeatures > 1:\n dataTrain=np.empty(shape=[0, numFeatures])\n for row in dataReader:\n listAux=[]\n for column in row:\n listAux.append(float(column))\n dataTrain = np.append(dataTrain, [listAux], axis=0)\n else:\n dataTrain=[]\n for row in dataReader:\n for column in row:\n dataTrain.append(float(column))\n \n return dataTrain \n\ndef LoadMel(fileName,numFeatures):\n with open(fileName, mode='rb') as file: # b is important -> binary\n fileContent = file.read(12*8)\n Mel=np.empty(shape=[0, numFeatures])\n while fileContent:\n out = struct.unpack('12d',fileContent[0:8*12])\n Mel = np.append(Mel, [out], axis=0)\n fileContent = file.read(12*8)\n return Mel\n return ","sub_path":"python/SurfaceRoughness/Tools.py","file_name":"Tools.py","file_ext":"py","file_size_in_byte":1165,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"569767726","text":"# -*- coding: utf-8 -*-\n# @time :21-1-13 下午3:55\nfrom argparse import ArgumentParser\nimport cv2\nimport numpy as np\nimport os\nimport json\nfrom mmdet.apis import inference_detector, init_detector, show_result_pyplot\n\n\n\njson_file = '/swdata/df/cz_data/data/tile_round1_train_20201231/test.json'\npath = '/swdata/df/cz_data/data/tile_round1_testA_20201231/testA_imgs'\nconfig = '/swdata/df/cz_data/mmdetection/df_use/retain/retain_net.py'\ncheckpoint = '/swdata/df/cz_data/mmdetection/df_use/log/epoch_12.pth'\n\nwith open(json_file, 'r') as d:\n data = json.load(d)\n\ncategories = data['categories']\nlabels = {}\nfor categorie in categories:\n id = categorie['id']\n name = categorie['name']\n labels[id] = int(name)\nmin_score = 1.\nout = []\nfiles = os.listdir(path)\n\ndone_files = []\nmodel = init_detector(config, checkpoint, device='cuda:0')\nfor file in files:\n done_files.append(file)\n print((len(done_files)*1.)/len(files))\n file_path = os.path.join(path, file)\n results = inference_detector(model, file_path)\n for result_inx in range(len(results)):\n out_single = {}\n name = file\n # print(name)\n result = results[result_inx]\n if len(result) == 0:\n continue\n categorie = labels[result_inx+1]\n # label_id = labels[result_inx]\n for bboxes in result:\n bbox = bboxes[:4]\n score = bboxes[4]\n if score < min_score:\n min_score = score\n print(min_score)\n if score < 0.01:\n continue\n # bbox = bbox.astype(np.int)\n bbox_single = []\n for bb in bbox:\n bbox_single.append(int(bb))\n # print(name)\n out_single['name'] = name\n out_single['category'] = categorie\n out_single['bbox'] = bbox_single\n out_single['score'] = np.float(score)\n out.append(out_single)\n\nprint(min_score)\nresult_out = json.dumps(out, ensure_ascii=False, indent=4)\nwith open('result1.json', 'w+', encoding='utf-8') as f:\n f.write(result_out)\n\n","sub_path":"df_use/infer.py","file_name":"infer.py","file_ext":"py","file_size_in_byte":2092,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"227634271","text":"\n# coding=utf-8\nimport ujson as json\nimport redis, time, random\nfrom datetime import datetime\nfrom django.contrib.auth.models import User\nfrom location import REDLOC4\nfrom score import BAN_REASON, RATELIMIT_TTL, SUPER_FLOODING_THRESHOLD, FLOODING_THRESHOLD, LAZY_FLOODING_THRESHOLD, SHORT_MESSAGES_ALWD, \\\nSHARED_PHOTOS_CEILING, PHOTO_DELETION_BUFFER\nfrom models import UserProfile, Photo\n\n'''\n##########Redis Namespace##########\n\ncity_shops = \"city_shops\"\n\"historical_calcs\"\nlip: 'stores latest ip of a user for up to 5 mins'\nlogged_users = \"logged_users\"\nsorted_set = \"online_users\"\nuser_ban = \"ub:\"+str(user_id)\nuser_times = \"user_times:\"+str(user_id)\n\npusk:: # photo_upload_secret_key\ntisk::: # text input secret_key\n\nrlfh: - rate limited from home (e.g. because of abusive reasons)\nrlfpg: - rate limited from public group (e.g. because of spammy behavior)\nrlfpvg: - rate limited from private group (e.g. because of spammy behavior)\nrlfpc: - rate limited from photo comments (e.g. because of spammy behavior)\nrlfhr: - rate limited from home replies (e.g. because of spammy behavior)\n\nhir: - home input rate (list holding times of posting on home for a specific user)\npgir: - public group input rate (list holding times of posting in public group for a specific user)\npvgir: - private group input rate (list holding times of posting in public group for a specific user)\npcir: - photo comment input rate (list holding times of posting on photo comments for a specific user)\nhrir: - home reply input rate (list holding times of posting in home replies)\n\nhit: - home input text (list holding previous text of home posting for a specific user)\npgit: - public group input text (list holding text of public group posting for a specific user in a specific group)\npvgit: - private group input rate (list holding times of posting in private group for a specific user)\npcit: - photo comment input text (list holding text of photo comments for a specific user and a specific photo)\nhrit: - home reply input text (list holding text of home replies for a specific user under a specific home sentence)\n\nsm::: - counter to count short messages sent on home objects\n\nuser_id: is a key containing user_ids of certain usernames\nuname: is a hash containing username and avatar_url data of a user\npht: is a hash containing image_url and caption data of a photo posted by user\nphd: contains cached photo sharing data for a specific user_id (cached for 20 mins)\naurl: is 'avatar_uploading_rate_limit', and is used to rate limit how frequently a user can change their avatar\n\nvb:: 'visited_by' key that stores which star_id was visited by which visitor_id\n\n------------ Personal Group Metrics ------------\n\nlig_pm: contains user_id that performed latest interaction in private mehfil with id group_id\nlig_pg: contains user_id that performed latest interaction in private chat with id group_id\npm_ch contains list of private mehfil IDs alongwith number of chats occuring in those private mehfils\npm_sw contains list of private mehfil IDs alongwith number of switchovers occuring in those private mehfils\npg_ch contains list of private chat IDs alongwith number of chats occuring in those private chats\npg_sw contains list of private chat IDs alongwith number of switchovers occuring in those private chats\n\ngs_pm:: contains a 'session key' that idenitifies whether a new session has started for a user visiting a particular private mehfil\ngs_pg:: contains a 'session key' that idenitifies whether a new session has started for a user visiting a particular private chat\npm_sess contains : pairs alongwith number of 24 hour sessions for that pair\npg_sess contains : pairs alongwith number of 24 hour sessions for that pair\n\np2p_sms is a list of :: tuples, tracking SMSes in private chat\nrc: sets a key as a \"red carpet\" for sent_to_id (waits for them to return to Damadam as a result of an SMS sent by a friend)\nsms_eft tracks sms effectiveness. It contains : pairs for users who returned to Damadam after being sent an SMS\n\nexits contains : pairs alongwith times of exiting a private chat. Useful for charting average life of a private chat.\ndel_after_exit contains groups and exit times for groups that were deleted due to exiting\ndel_after_idle contains groups and exit times for groups that were deleted due to idleness\n\n------------ Social Sharing ------------\n\nas:: key to check whether a photo was already shared by a certain user (in whatsapp)\n\npdim: key that temporarily caches a shared photo's width and height\n\n\"sp: is a sorted set containing information about photos shared of each user\n\n###########\n'''\n\nPOOL = redis.ConnectionPool(connection_class=redis.UnixDomainSocketConnection, path=REDLOC4, db=0)\n\nTWO_WEEKS = 60*60*24*7*2\nTHREE_DAYS = 60*60*24*3\nONE_DAY = 60*60*24\nONE_HOUR = 60*60\nTWELVE_HOURS = 60*60*12\nTHIRTY_MINS = 30*60\nTWENTY_MINS = 20*60\nTEN_MINS = 10*60\nSEVEN_MINS = 7*60\nFIVE_MINS = 5*60\nTHREE_MINS = 3*60\nONE_MIN = 60\n\n\ndef convert_to_epoch(time):\n\treturn (time-datetime(1970,1,1)).total_seconds()\n\n\ndef save_deprecated_photo_ids_and_filenames(deprecated_photos):\n\tmy_server = redis.Redis(connection_pool=POOL)\n\tfinal_list = []\n\tfor filename, photo_id in deprecated_photos:\n\t\tfilename = filename.split('photos/')[1]\n\t\tfinal_list.append(filename)\n\t\tfinal_list.append(photo_id)\n\tmy_server.zadd(\"deprecated_photos\",*final_list)\n\n\n# def log_pic_uploader_status(user_id, is_verified):\n# \tmy_server = redis.Redis(connection_pool=POOL)\n# \tverified = '1' if is_verified else '0'\n# \tmy_server.lpush('uploaded_pics',verified+\":\"+str(user_id))\n\n# def save_user_choice(user_id, choice):\n# \tmy_server = redis.Redis(connection_pool=POOL)\n# \tmy_server.lpush(\"new_user_choice\",{'user_id':user_id,'user_choice':choice})\n\n# def log_referrer(referrer, loc, user_id):\n# \tmy_server = redis.Redis(connection_pool=POOL)\n# \tmy_server.lpush(\"referrer\",{'referrer':referrer,'origin':loc, 'user_id':user_id, 'time_stamp':time.time()})\n\ndef return_referrer_logs(log_name):\n\treturn redis.Redis(connection_pool=POOL).lrange(log_name,0,-1)\n\n\n# def error_logger(obj_creator_reported_id, object_creator_actual_id,actual_object_attributes, reported_link_attributes, from_loc, is_post_request,referrer):\n# \tmy_server = redis.Redis(connection_pool=POOL)\n# \tmy_server.lpush(\"block_error\",{'obj_creator_reported_id':obj_creator_reported_id,'object_creator_actual_id':object_creator_actual_id,\\\n# \t\t'actual_object_attributes':actual_object_attributes, 'reported_link_attributes':reported_link_attributes,'where_from':from_loc, \\\n# \t\t'is_post_request':is_post_request,'referrer':referrer})\n\n\n# def log_html_error(obj_list, forms, page, nickname, referrer):\n# \tmy_server = redis.Redis(connection_pool=POOL)\n# \tmy_server.lpush(\"matka_error\",{'obj_list':obj_list,'forms':forms, 'page':page, 'username':nickname,'referrer':referrer ,'time':time.time()})\n\n# def log_button_error(target_user_id, id_type,target_username,own_id, object_id,referrer):\n# \tmy_server = redis.Redis(connection_pool=POOL)\n# \tmy_server.lpush(\"button_error\",{'target_user_id':target_user_id,'id_type':id_type, 'target_username':target_username,'own_id':own_id, \\\n# \t\t'object_id':object_id,'referrer':referrer,'time':time.time()})\n\n\n# def save_number_verification_error_data(user_id, err_data, err_type=None, on_fbs=None, is_auth=None, which_flow=None):\n# \tmy_server = redis.Redis(connection_pool=POOL)\n# \tif which_flow == 'consumer':\n# \t\terr_data[\"user_id\"], err_data[\"err_type\"], err_data[\"on_fbs\"], err_data[\"is_auth\"] = user_id, err_type, on_fbs, is_auth\n# \t\tmy_server.lpush(\"consumer_number_errors\",err_data)\n# \telse:\n# \t\terr_data[\"user_id\"], err_data[\"err_type\"], err_data[\"on_fbs\"], err_data[\"is_auth\"] = user_id, err_type, on_fbs, is_auth\n# \t\tmy_server.lpush(\"seller_number_errors\",err_data)\n\n#######################Ecomm Metrics######################\n\ndef log_ecomm_user_visit(user_id):\n\tmy_server = redis.Redis(connection_pool=POOL)\n\tmy_server.lpush(\"ecomm_visits\",user_id)\n\ndef get_and_reset_daily_ecomm_visits():\n\tmy_server = redis.Redis(connection_pool=POOL)\n\tall_visits = my_server.lrange(\"ecomm_visits\",0,-1)\n\tpipeline1 = my_server.pipeline()\n\tpipeline1.lpush(\"weekly_ecomm_visits\",all_visits)\n\tpipeline1.delete(\"ecomm_visits\")\n\tpipeline1.execute()\n\treturn all_visits, my_server.llen(\"weekly_ecomm_visits\")\n\ndef get_and_reset_weekly_ecomm_visits():\n\timport ast\n\tmy_server = redis.Redis(connection_pool=POOL)\n\tweekly_visits = my_server.lrange(\"weekly_ecomm_visits\",0,-1)\n\tweekly_gross_visits = []\n\tfor daily_visits in weekly_visits:\n\t\tweekly_gross_visits += ast.literal_eval(daily_visits)\n\tmy_server.delete(\"weekly_ecomm_visits\")\n\treturn weekly_gross_visits\n\n\ndef insert_metrics(ecomm_metrics, reporting_time, period=None):\n\tmy_server = redis.Redis(connection_pool=POOL)\n\tif period == 'daily':\n\t\tmapping = {'entry_time':reporting_time, 'unique_clicks_per_unique_visitor':ecomm_metrics[0], 'unique_clicks_per_unique_clicker':ecomm_metrics[1], \\\n\t\t'proportion_of_clickers_to_visitors':ecomm_metrics[2], 'unique_new_clickers_per_unique_new_visitors':ecomm_metrics[3], \\\n\t\t'unique_new_clicks_per_unique_new_visitor':ecomm_metrics[4], 'total_unique_visitors':ecomm_metrics[5], 'total_unique_clicks':ecomm_metrics[6]}\n\t\tmy_server.lpush(\"ecomm_metrics\",mapping)\n\tif period == 'weekly':\n\t\tmapping = {'entry_time':reporting_time, 'weekly_unique_clicks_per_unique_visitor':ecomm_metrics[0], 'weekly_unique_clicks_per_unique_clicker':ecomm_metrics[1], \\\n\t\t'weekly_proportion_of_clickers_to_visitors':ecomm_metrics[2], 'weekly_unique_visitors':ecomm_metrics[3], \\\n\t\t'weekly_unique_clicks':ecomm_metrics[4]}\n\t\tmy_server.lpush(\"weekly_ecomm_metrics\",mapping)\n\n\n\ndef return_all_metrics_data():\n\tmy_server = redis.Redis(connection_pool=POOL)\n\treturn my_server.lrange(\"ecomm_metrics\", 0, -1), my_server.lrange(\"weekly_ecomm_metrics\", 0, -1)\n\n#######################Photo Secret Key######################\n\ndef set_photo_upload_key(user_id, secret_key, group_id=None):\n\t\"\"\"\n\tUsed to prevent double form submission when uploading photos (public photos or personal group photos)\n\t\"\"\"\n\tmy_server = redis.Redis(connection_pool=POOL)\n\tsec_key = \"pusk:\"+str(user_id)+\":\"+group_id if group_id else \"pusk:\"+str(user_id)\n\tmy_server.setex(sec_key,secret_key,TWENTY_MINS)\n\n\ndef get_and_delete_photo_upload_key(user_id, group_id=None):\n\tmy_server = redis.Redis(connection_pool=POOL)\n\tuser_id = str(user_id)\n\tsec_key = \"pusk:\"+user_id+\":\"+group_id if group_id else \"pusk:\"+user_id\n\tsecret_key = my_server.get(sec_key)\n\tif secret_key:\n\t\tmy_server.delete(sec_key)\n\t\treturn secret_key\n\telse:\n\t\treturn '1'\n\n\ndef set_text_input_key(user_id, obj_id, obj_type, secret_key):\n\t\"\"\"\n\tUsed to prevent double form submission\n\t\"\"\"\n\tmy_server = redis.Redis(connection_pool=POOL)\n\tsec_key = \"tisk:\"+str(user_id)+\":\"+obj_type+\":\"+str(obj_id)\n\tmy_server.setex(sec_key,secret_key,TWENTY_MINS)\n\n\ndef get_and_delete_text_input_key(user_id, obj_id, obj_type):\n\t\"\"\"\n\tChecks if secret key exists and returns an appropriate response\n\t\"\"\"\n\tmy_server = redis.Redis(connection_pool=POOL)\n\tsec_key = \"tisk:\"+str(user_id)+\":\"+obj_type+\":\"+str(obj_id)\n\tsecret_key = my_server.get(sec_key)\n\tif secret_key:\n\t\tmy_server.delete(sec_key)\n\t\treturn secret_key\n\telse:\n\t\treturn '1'\n\n\n######################## Shared urls caching (for private chat) ########################\n\ndef cache_meta_data(url, mapping, time_timen_to_sniff, time_taken_to_parse, is_youtube, deg_of_comp):\n\t\"\"\"\n\tCache shared url's meta_data for upto a day\n\t\"\"\"\n\tpipeline1 = redis.Redis(connection_pool=POOL).pipeline()\n\tpipeline1.hmset(url,mapping)\n\tpipeline1.expire(url,ONE_DAY)\n\tpipeline1.lpush(\"shared_urls\",url+\":\"+is_youtube+\":\"+\"{0:.2f}\".format(time_taken_to_parse)+\":\"+\"{0:.2f}\".format(time_timen_to_sniff)+\":\"+\"{0:.2f}\".format(time.time())+\":\"+deg_of_comp)\n\tpipeline1.ltrim(\"shared_urls\",0,999)#saving up to 1000 hits\n\tpipeline1.execute()\n\ndef get_cached_meta_data(url):\n\t\"\"\"\n\tReturns cached meta data, and extends life of cache by 3 days if it's a successful hit\n\t\"\"\"\n\tmy_server = redis.Redis(connection_pool=POOL)\n\tmeta_data = my_server.hgetall(url)\n\tif meta_data:\n\t\tmy_server.expire(url,THREE_DAYS)\n\t\treturn meta_data\n\telse:\n\t\treturn {}\n\n\n###################### Photo dimensions and data caching ######################\n\ndef get_cached_photo_dim(photo_id):\n\t\"\"\"\n\tReturn photo with photo_id's height and width\n\t\"\"\"\n\tkey = 'pdim:'+photo_id\n\tmy_server = redis.Redis(connection_pool=POOL)\n\tmy_server.expire(key,THREE_DAYS)#extending life of cache\n\treturn my_server.hmget(key,'h','w')\n\n\ndef cache_photo_dim(photo_id,img_height,img_width):\n\t\"\"\"\n\tCache photo dimensions for use in photo sharing to personal groups\n\t\"\"\"\n\tmapping, key = {'h':img_height,'w':img_width}, 'pdim:'+photo_id\n\tmy_server = redis.Redis(connection_pool=POOL)\n\tmy_server.hmset(key,mapping)\n\tmy_server.expire(key,THREE_DAYS)\n\n\ndef retrieve_photo_data(photo_ids, owner_id):\n\t\"\"\"\n\tRetrieves photo data (caption and url)\n\t\"\"\"\n\tmy_server = redis.Redis(connection_pool=POOL)\n\tphoto_data, missing_photos = {}, []\n\tfor photo_id in photo_ids:\n\t\tcaption, image_url, upload_time = my_server.hmget('pht:'+photo_id,'caption','image_url','upload_time')\n\t\tif caption and image_url and upload_time:\n\t\t\tphoto_data[photo_id] = {'caption':caption.decode('utf-8'),'image_url':image_url,'id':photo_id,'upload_time':upload_time}\n\t\telse:\n\t\t\tmissing_photos.append(photo_id)\n\tif missing_photos:\n\t\tmissing_data = Photo.objects.filter(id__in=missing_photos).values('id','image_file','caption','upload_time')\n\t\tfor data in missing_data:\n\t\t\tphoto_id = str(data['id'])\n\t\t\tkey = 'pht:'+photo_id\n\t\t\tupload_time = str(convert_to_epoch(data['upload_time']))\n\t\t\tphoto_data[photo_id] = {'caption':data['caption'],'image_url':data['image_file'],'id':photo_id,'upload_time':upload_time}\n\t\t\tmy_server.hmset(key,{'caption':data['caption'],'image_url':data['image_file'],'upload_time':upload_time})\n\t\t\tmy_server.expire(key,THREE_DAYS)\n\treturn photo_data\n\n###################### User credentials caching ######################\n\n\ndef retrieve_bulk_credentials(user_ids, decode_unames=False):\n\t\"\"\"\n\tReturns usernames and avatars if fed a list of user_ids\n\n\tAlso caches the data for up to TWO days\n\tIf avatar was never uploaded, 'empty' string is returned instead\n\tReturned format is dictionary of dictionaries, where int(user_ids) serve as keys. This ensures O(1) lookup down the road\n\t\"\"\"\n\tif not user_ids:\n\t\treturn {}\n\tmy_server = redis.Redis(connection_pool=POOL)\n\tpipeline1 = my_server.pipeline()\n\tfor user_id in user_ids:\n\t\tpipeline1.hmget('uname:'+str(user_id),'uname','avurl')\n\tcredentials_wip = pipeline1.execute() #credentials_wip is a list of lists\n\tcounter = 0\n\tcredentials = {}#list of dictionaries\n\tuncollected_unames, uncollected_avurls = [], []\n\tfor uname,avurl in credentials_wip:\n\t\tcurrent_uid = int(user_ids[counter])\n\t\tuname = '' if not uname else uname\n\t\tavurl = '' if not avurl else avurl\n\t\tif decode_unames:\n\t\t\tcredentials[current_uid] = {'uname':uname.decode('utf-8'),'avurl':avurl}\n\t\telse:\n\t\t\tcredentials[current_uid] = {'uname':uname,'avurl':avurl}\n\t\tif uname and avurl:\n\t\t\tpass\n\t\telif avurl:\n\t\t\t# log that this user's uname has to be retrieved\n\t\t\tuncollected_unames.append(current_uid)\n\t\telif uname:\n\t\t\t# log that this user's avurl has to be retrieved\n\t\t\tuncollected_avurls.append(current_uid)\n\t\telse:\n\t\t\t# log that this user's both credential have to be retrieved\n\t\t\tuncollected_unames.append(current_uid)\n\t\t\tuncollected_avurls.append(current_uid)\n\t\tcounter += 1\n\tcollected_unames, collected_avurls = [], []\n\tif uncollected_unames:\n\t\tcollected_unames = User.objects.filter(id__in=uncollected_unames).values('id','username')\n\t\tpipeline2 = my_server.pipeline()\n\t\tfor uname in collected_unames:\n\t\t\tuser_id = uname['id']\n\t\t\thash_name = 'uname:'+str(user_id)\n\t\t\tcredentials[user_id]['uname'] = uname['username']\n\t\t\tpipeline2.hset(hash_name, 'uname', uname['username'])\n\t\t\tpipeline2.expire(hash_name,ONE_DAY)\n\t\tpipeline2.execute()\n\tif uncollected_avurls:\n\t\tcollected_avurls = UserProfile.objects.filter(user_id__in=uncollected_avurls).values('user_id','avatar')\n\t\tpipeline3 = my_server.pipeline()\n\t\tfor avurl in collected_avurls:\n\t\t\tuser_id = avurl['user_id']\n\t\t\thash_name = 'uname:'+str(user_id)\n\t\t\tif not avurl['avatar']:\n\t\t\t\tavurl['avatar'] = 'empty'\n\t\t\tcredentials[user_id]['avurl'] = avurl['avatar']\n\t\t\tpipeline3.hset(hash_name, 'avurl', avurl['avatar'])\n\t\t\tpipeline3.expire(hash_name,ONE_DAY)\n\t\tpipeline3.execute()\n\treturn credentials\n\n\ndef retrieve_bulk_avurls(user_ids):\n\t\"\"\"\n\tRetrieves avatar_urls in bulk for a given list of user_ids\n\t\"\"\"\n\tmy_server = redis.Redis(connection_pool=POOL)\n\tpipeline1 = my_server.pipeline()\n\tfor user_id in user_ids:\n\t\tpipeline1.hget('uname:'+str(user_id),'avurl')\n\tavatars_wip = pipeline1.execute()\n\tcounter = 0\n\tavatars, uncollected_avurls = {}, []\n\tfor avatar in avatars_wip:\n\t\tuser_id = int(user_ids[counter])\n\t\tif avatar:\n\t\t\tavatars[user_id] = avatar\n\t\telse:\n\t\t\tuncollected_avurls.append(user_id)\n\t\tcounter += 1\n\tif uncollected_avurls:\n\t\tcollected_avurls = UserProfile.objects.filter(user_id__in=uncollected_avurls).values('user_id','avatar')\n\t\tpipeline2 = my_server.pipeline()\n\t\tfor avurl in collected_avurls:\n\t\t\tuser_id = avurl['user_id']\n\t\t\thash_name = 'uname:'+str(user_id)\n\t\t\tif not avurl['avatar']:\n\t\t\t\tavurl['avatar'] = 'empty'\n\t\t\tavatars[user_id] = avurl['avatar']\n\t\t\tpipeline2.hset(hash_name,'avurl',avurl['avatar'])\n\t\t\tpipeline2.expire(hash_name,ONE_DAY)\n\t\tpipeline2.execute()\n\treturn avatars\n\n\ndef retrieve_bulk_unames(user_ids, decode=False):\n\t\"\"\"\n\tReturns usernames in bulk, in id-username dictionary format\n\t\"\"\"\n\tmy_server = redis.Redis(connection_pool=POOL)\n\tpipeline1 = my_server.pipeline()\n\tfor user_id in user_ids:\n\t\tpipeline1.hget('uname:'+str(user_id),'uname')\n\tusernames_wip = pipeline1.execute()\n\tcounter = 0\n\tusernames, uncollected_uname_ids = {}, []\n\tfor username in usernames_wip:\n\t\tid_ = int(user_ids[counter])\n\t\tif username:\n\t\t\tif decode:\n\t\t\t\tusernames[id_] = username.decode('utf-8')\n\t\t\telse:\n\t\t\t\tusernames[id_] = username\n\t\telse:\n\t\t\tusernames[id_] = ''\n\t\t\tuncollected_uname_ids.append(id_)\n\t\tcounter += 1\n\tif uncollected_uname_ids:\n\t\tresidual_unames = dict(User.objects.filter(id__in=uncollected_uname_ids).values_list('id','username'))\n\t\tpipeline2 = my_server.pipeline()\n\t\tfor key in residual_unames:\n\t\t\tusernames[key], hash_name = residual_unames[key], 'uname:'+str(key)\n\t\t\tpipeline2.hset(hash_name,'uname',residual_unames[key])\n\t\t\tpipeline2.expire(hash_name,ONE_DAY)\n\t\tpipeline2.execute()\n\treturn usernames\n\n\n\ndef retrieve_uname(user_id,decode=False):\n\t\"\"\"\n\tReturns user's nickname\n\t\"\"\"\n\tmy_server = redis.Redis(connection_pool=POOL)\n\thash_name = 'uname:'+str(user_id)\n\tusername = my_server.hget(hash_name,'uname')\n\tif username:\n\t\tif decode:\n\t\t\treturn username.decode('utf-8')\n\t\telse:\n\t\t\treturn username\n\telse:\n\t\tusername = User.objects.filter(id=user_id).values_list('username',flat=True)[0]\n\t\tpipeline1 = my_server.pipeline()\n\t\tpipeline1.hset(hash_name,'uname',username)\n\t\tpipeline1.expire(hash_name,ONE_DAY)\n\t\tpipeline1.execute()\n\t\treturn username\n\n\ndef retrieve_credentials(user_id,decode_uname=False):\n\t\"\"\"\n\tReturns username and avatar_url for given user_id\n\t\"\"\"\n\tmy_server = redis.Redis(connection_pool=POOL)\n\thash_name = 'uname:'+str(user_id)\n\tusername, avurl = my_server.hmget(hash_name,'uname','avurl')\n\tif username and avurl:\n\t\tif decode_uname:\n\t\t\treturn username.decode('utf-8'),avurl\n\t\telse:\n\t\t\treturn username, avurl\n\telif username:\n\t\tavurl = UserProfile.objects.filter(user_id=user_id).values_list('avatar',flat=True)[0]\n\t\tif not avurl:\n\t\t\tavurl = 'empty'\n\t\tpipeline1 = my_server.pipeline()\n\t\tpipeline1.hset(hash_name,'avurl',avurl)\n\t\tpipeline1.expire(hash_name,ONE_DAY)\n\t\tpipeline1.execute()\n\t\tif decode_uname:\n\t\t\treturn username.decode('utf-8'),avurl\n\t\telse:\n\t\t\treturn username, avurl\n\telif avurl:\n\t\tusername = User.objects.filter(id=user_id).values_list('username',flat=True)[0]\n\t\tpipeline1 = my_server.pipeline()\n\t\tpipeline1.hset(hash_name,'uname',username)\n\t\tpipeline1.expire(hash_name,ONE_DAY)\n\t\tpipeline1.execute()\n\t\treturn username, avurl\n\telse:\n\t\tusername = User.objects.filter(id=user_id).values_list('username',flat=True)[0]\n\t\tavurl = UserProfile.objects.filter(user_id=user_id).values_list('avatar',flat=True)[0]\n\t\tif not avurl:\n\t\t\tavurl = 'empty'\n\t\tmapping = {'uname':username,'avurl':avurl}\n\t\tpipeline1 = my_server.pipeline()\n\t\tpipeline1.hmset(hash_name,mapping)\n\t\tpipeline1.expire(hash_name,ONE_DAY)\n\t\tpipeline1.execute()\n\t\treturn username, avurl\n\n\ndef retrieve_user_id(username):\n\t\"\"\"\n\tReturns user's user_id (for a given username)\n\t\"\"\"\n\tkey = 'user_id:'+username\n\tmy_server = redis.Redis(connection_pool=POOL)\n\tuser_id = my_server.get(key)\n\tif user_id:\n\t\tmy_server.expire(key,THREE_DAYS)# extend lease for 3 days\n\t\treturn user_id\n\telse:\n\t\ttry:\n\t\t\tuser_id = User.objects.filter(username=username).values_list('id',flat=True)[0]\n\t\texcept IndexError:\n\t\t\treturn None\n\t\tmy_server.setex(key,user_id,ONE_DAY)\n\t\treturn str(user_id)\n\n\ndef retrieve_avurl(user_id):\n\t\"\"\"\n\tReturns user's avatar_url\n\t\"\"\"\n\thash_name = 'uname:'+str(user_id)\n\tmy_server = redis.Redis(connection_pool=POOL)\n\tavurl = my_server.hget(hash_name,'avurl')\n\tif not avurl:\n\t\tavurl = UserProfile.objects.filter(user_id=user_id).values_list('avatar',flat=True)[0]\n\t\tif not avurl:\n\t\t\tavurl = 'empty'\n\t\tpipeline1 = my_server.pipeline()\n\t\tpipeline1.hset(hash_name,'avurl',avurl)\n\t\tpipeline1.expire(hash_name,ONE_DAY)\n\t\tpipeline1.execute()\n\treturn avurl\n\n\ndef invalidate_avurl(user_id,set_rate_limit=None):\n\t\"\"\"\n\tInvalidate cached avatar_url if user uploads new avatar\n\n\tIf set_rate_limit is True, a rate limit is imposed on uploading a new avatar\n\t\"\"\"\n\tmy_server = redis.Redis(connection_pool=POOL)\n\tuser_id = str(user_id)\n\tmy_server.hdel('uname:'+user_id,'avurl')\n\tif set_rate_limit:\n\t\tmy_server.setex('aurl:'+user_id,1,THREE_MINS)\n\n\ndef get_aurl(user_id):\n\t\"\"\"\n\tRetrieves the status of avatar uploading rate limit\n\t\"\"\"\n\treturn redis.Redis(connection_pool=POOL).ttl('aurl:'+str(user_id))\n\n\n#####################Retention Logger#####################\ndef log_retention(server_instance, user_id):\n\ttime_now = time.time()\n\tif server_instance.exists(\"user_times:\"+user_id):\n\t\tif time_now - float(server_instance.lrange(\"user_times:\"+user_id,0,0)[0]) > TEN_MINS:\n\t\t\tserver_instance.lpush(\"user_times:\"+user_id,time_now)\n\t\t\tserver_instance.sadd(\"logged_users\",user_id)\n\telse:\n\t\t# contains a user's times of browsing Damadam\n\t\tserver_instance.lpush(\"user_times:\"+user_id,time_now)\n\t\t# contains all user_ids that have ever been logged\n\t\tserver_instance.sadd(\"logged_users\",user_id)\n\ndef retrieve_retention_ids():\n\tmy_server = redis.Redis(connection_pool=POOL)\n\treturn my_server.smembers(\"logged_users\")\n\ndef retrieve_retention_data(user_ids):\n\tmy_server = redis.Redis(connection_pool=POOL)\n\tpipeline1 = my_server.pipeline()\n\tfor id_ in user_ids:\n\t\tpipeline1.lrange(\"user_times:\"+id_,0,-1)\n\tresult1 = pipeline1.execute()\n\tuser_times = []\n\tcounter = 0\n\tfor id_ in user_ids:\n\t\tuser_times.append((id_,result1[counter]))\n\t\tcounter += 1\n\treturn user_times\n\n# def reduce_retention_data():\n\t\"\"\"\n\tto delete, get ids of really old \"last_active\"\n\tdates from session table in DB (to ensure it's \n\tan old user). Then delete those \"user_times\"+str(user_id)\n\t\"\"\"\n\n#######################Whose Online#######################\n\n\ndef expire_online_users():\n\t\"\"\"\n\tExpires online_users from tasks.py\n\t\"\"\"\n\tredis.Redis(connection_pool=POOL).zremrangebyscore(\"online_users\",'-inf','('+str(time.time()-TEN_MINS))\n\n\n\ndef set_online_users(user_id,user_ip):\n\t\"\"\"\n\tInvoked from WhoseOnline.py middleware\n\t\"\"\"\n\tpipeline1 = redis.Redis(connection_pool=POOL).pipeline()\n\tpipeline1.zadd(\"online_users\",user_id+\":\"+user_ip,time.time())\n\tpipeline1.setex(\"lip:\"+user_id,user_ip,FIVE_MINS)\n\tpipeline1.execute()\n\t############ logging user retention ############\n\t# if random.random() < 0.45:\n\t# \tlog_retention(my_server,user_id)\n\n\ndef get_recent_online():\n\t\"\"\"\n\tInvoked by tasks.py to show whoever is online in OnlineKon\n\t\"\"\"\n\tsorted_set = \"online_users\"\n\tten_mins_ago = time.time() - TEN_MINS\n\tonline_users = redis.Redis(connection_pool=POOL).zrangebyscore(sorted_set,ten_mins_ago,'+inf')\n\tonline_ids = []\n\tfor user in online_users:\n\t\tonline_ids.append(user.split(\":\",1)[0])\n\treturn online_ids\n\n\n######################################## Detect Clones of User ID ########################################\n\n\ndef get_clones(user_id):\n\t\"\"\"\n\tInvoked in views.py to show possible clones of users\n\t\"\"\"\n\tlatest_user_ip = \"lip:\"+str(user_id) #latest ip of user with 'user_id'\n\tmy_server = redis.Redis(connection_pool=POOL)\n\tuser_ip = my_server.get(latest_user_ip)\n\tif user_ip:\n\t\tclones = []\n\t\tfive_mins_ago = time.time() - FIVE_MINS\n\t\tonline_users = my_server.zrangebyscore(\"online_users\",five_mins_ago,'+inf')\n\t\tfor user in online_users:\n\t\t\tif user_ip == user.split(\":\",1)[1]:\n\t\t\t\tclones.append(user.split(\":\",1)[0])\n\t\treturn clones\n\telse:\n\t\treturn None\n\n# def set_site_ban(user_id):\n# \tmy_server = redis.Redis(connection_pool=POOL)\n# \tuser_ban = \"ub:\"+str(user_id) # banning user's ip from logging into website\n# \tmy_server.set(user_ban,1)\n# \tmy_server.expire(user_ban,ONE_HOUR)\n\n\n#########################################################\n\n#calculating installment amount for mobile devices\ndef get_historical_calcs(base_price=None, time_period_in_months=None, monthly_installment=None, ending_value=None):\n\tmy_server = redis.Redis(connection_pool=POOL)\n\tpipeline1 = my_server.pipeline()\n\tif base_price is None:\n\t\tid_ = my_server.get(\"historical_calcs\")\n\t\tif id_ is not None:\n\t\t\tfor x in range(1,(int(id_)+1)):\n\t\t\t\tpipeline1.hgetall(\"cd:\"+str(x))\n\t\t\treturn pipeline1.execute()\n\t\telse:\n\t\t\treturn None\n\telse:\n\t\tid_ = my_server.incr(\"historical_calcs\")\n\t\tcalc_details = \"cd:\"+str(id_)\n\t\tmapping = {'id':id_,'bp':base_price,'tpim':time_period_in_months,'mi':monthly_installment,'ev':ending_value, 't':time.time()}\n\t\tmy_server.hmset(calc_details,mapping)\n\t\tfor x in range(1,(id_+1)):\n\t\t\tpipeline1.hgetall(\"cd:\"+str(x))\n\t\treturn pipeline1.execute()\n\n#########################################################\n\ndef save_ad_desc(text, price, user_id,username):\n\tmy_server = redis.Redis(connection_pool=POOL)\n\tmapping = {'uid':user_id, 'nick':username, 'desc':text,'price':price}\n\tmy_server.lpush(\"ad_desc\",mapping)\n\n#########################################################\n\ndef save_careem_data(careem_data):\n\tmy_server = redis.Redis(connection_pool=POOL)\n\tif my_server.zscore(\"careem_applicant_nums\",careem_data[\"phonenumber\"]):\n\t\t# it already exists\n\t\treturn False\n\telse:\n\t\t# it does not exist\n\t\tpipeline1 = my_server.pipeline()\n\t\tpipeline1.hmset(\"cad:\"+str(careem_data['phonenumber']),careem_data)\n\t\tpipeline1.zadd('careem_applicant_nums',careem_data['phonenumber'],time.time())\n\t\tpipeline1.zadd('careem_applicant_nums_live',careem_data['phonenumber'],time.time())\n\t\tpipeline1.execute()\n\n\t\treturn True\n\ndef export_careem_data():\n\tmy_server = redis.Redis(connection_pool=POOL)\n\tpipeline1 = my_server.pipeline()\n\tuser = my_server.zcard(\"careem_applicant_nums_live\")\n\tif user == 0:\n\t\treturn False\n\telse:\n\t\tnums = my_server.zrange(\"careem_applicant_nums_live\",0,-1)\n\t\tpipeline1 = my_server.pipeline()\n\t\tfor num in nums:\n\t\t\tpipeline1.hgetall('cad:'+num)\n\t\tresult1 = pipeline1.execute()\n\t\treturn result1\n\ndef del_careem_data():\n\tmy_server = redis.Redis(connection_pool=POOL)\n\tmy_server.delete(\"careem_applicant_nums_live\")\n\n\n##################################################Mobile_Shop\n\n\ndef log_buyer_form_err(error_data):\n\tmy_server = redis.Redis(connection_pool=POOL)\n\terror_id = get_error_id()\n\tkey_name = \"error_data:\"+str(error_id)\n\tpipeline1 = my_server.pipeline()\n\tpipeline1.hmset(key_name,error_data)\n\t#pipeline1.expire(key_name,TWELVE_HOURS)\n\tpipeline1.execute()\n\treturn True\n\ndef get_error_id():\n\tmy_server = redis.Redis(connection_pool=POOL)\n\treturn my_server.incr(\"ms_error_id\")\n\ndef save_order_data(order_data):\n\tmy_server = redis.Redis(connection_pool=POOL)\n\tkey_name = \"order_data:\"+str(order_data['user_id'])\n\tpipeline1 = my_server.pipeline()\n\tpipeline1.hmset(key_name,order_data)\n\tpipeline1.expire(key_name,TWELVE_HOURS)\n\tpipeline1.execute()\n\treturn True\n\ndef save_query_data(query_data):\n\tmy_server = redis.Redis(connection_pool=POOL)\n\tkey_name = \"querydata:\"+str(query_data['user_id'])\n\tpipeline1 = my_server.pipeline()\n\tpipeline1.hmset(key_name,query_data)\n\tpipeline1.sadd('queryusers',query_data['user_id'])\n#\tpipeline1.expire(key_name,TWELVE_HOURS)\n\tpipeline1.execute()\n\treturn True\n\ndef delete_query(user_id):\n\tmy_server = redis.Redis(connection_pool=POOL)\n\tpipeline1 = my_server.pipeline()\n\tmy_server.srem(\"queryusers\",user_id)\n\tmy_server.delete(\"querydata:\"+str(user_id))\n\tpipeline1.execute()\n\treturn {}\n\n\ndef place_order(user_id):\n\tmy_server = redis.Redis(connection_pool=POOL)\n\tpipeline1 = my_server.pipeline()\n\torder_data = False\n\torder_data = get_temp_order_data(user_id)\n\torder_id = get_order_id()\n\torder_data['order_id'] = order_id\n\tpipeline1.zadd('ordersinprocess',user_id,order_id)\n\t# after a few months, export this to excel and clean the list (it takes up needless space)\n\tpipeline1.hmset(\"placedorders:\"+str(order_id),order_data)\n\tpipeline1.execute()\n\treturn order_data\n\ndef delete_order(order_id,user_id):\n\tmy_server = redis.Redis(connection_pool=POOL)\n\tpipeline1 = my_server.pipeline()\n\t# after a few months, export this to excel and clean the list (it takes up needless space)\n\tmy_server.zrem(\"ordersinprocess\",user_id)\n\tmy_server.delete(\"placedorders:\"+str(order_id))\n\tpipeline1.execute()\n\treturn {}\n\n\n\n\n\ndef get_temp_order_data(user_id):\n\tmy_server = redis.Redis(connection_pool=POOL)\n\ttemp_storage = \"order_data:\"+str(user_id)\n\tif my_server.exists(temp_storage):\n\t\torder_details = my_server.hgetall(\"order_data:\"+str(user_id))\n#\t\tmy_server.delete(\"order_data:\"+str(user_id))\n\t\treturn order_details\n\telse:\n\t\treturn {}\n\ndef check_orders_processing(user_id):\n\tuser = redis.Redis(connection_pool=POOL).zscore('ordersinprocess',user_id)\n\tif user == None:\n\t\treturn False\n\telse:\n\t\treturn True\n\ndef get_order_id():\n\treturn redis.Redis(connection_pool=POOL).incr(\"order_id\")\n\ndef get_total_orders():\n\treturn redis.Redis(connection_pool=POOL).get(\"order_id\")\n\ndef show_new_orders():\n\ttotal_orders = get_total_orders()\n\tif total_orders <1:\n\t\treturn False\n\telse:\n\t\tmy_server = redis.Redis(connection_pool=POOL)\n\t\tpipeline1 = my_server.pipeline()\n\t\tnum = 0\n\t\t\n\t\twhile num <= int(total_orders):\n\t\t\ttemp_storage = \"placedorders:\"+str(num)\n\t\t\tif my_server.exists(temp_storage):\n\t\t\t\tpipeline1.hgetall('placedorders:'+str(num))\n\t\t\tnum = num + 1\n\t\torders = pipeline1.execute()\n\t\treturn orders\n\n\ndef show_new_queries():\n\tmy_server = redis.Redis(connection_pool=POOL)\n\tpipeline1 = my_server.pipeline()\n\tusers = my_server.smembers(\"queryusers\")\n\tif users == 0:\n\t\treturn []\n\telse:\n\t\tpipeline1 = my_server.pipeline()\n\t\tfor obj in users:\n\t\t\tpipeline1.hgetall('querydata:'+obj)\n\t\tresult1 = pipeline1.execute()\n\t\treturn result1\n\n\n\n'''\n####################\nM_S Deprecated Key Names\nquery_data now querydata\nquery_users now queryusers\norders_in_process now ordersinprocess\nplaced_orders now placedorders\n\n####################\n'''\n\n############ Rate limiting flooding and spamming ############\n\ndef is_limited(user_id, section, with_reason = False):\n\t\"\"\"\n\t\"\"\"\n\tif section == 'home':\n\t\tkey = \"rlfh:\"+str(user_id)\n\telif section == 'pub_grp':\n\t\tkey = \"rlfpg:\"+str(user_id)\n\telif section == 'prv_grp':\n\t\tkey = \"rlfpvg:\"+str(user_id)\n\telif section == 'pht_comm':\n\t\tkey = \"rlfpc:\"+str(user_id)\n\telif section == 'home_rep':\n\t\tkey = \"rlfhr:\"+str(user_id)\n\telse:\n\t\treturn False\n\tmy_server = redis.Redis(connection_pool=POOL)\n\tif with_reason:\n\t\tttl = my_server.ttl(key)\n\t\tif ttl > 0:\n\t\t\treason = my_server.get(key)\n\t\t\treturn ttl, reason\n\t\telse:\n\t\t\treturn 0, None\n\telse:\n\t\treturn my_server.ttl(key)\n\ndef rate_limit_user(user_id,section,level,ban_reason,my_server=None):\n\t\"\"\"\n\tSetting blocking rate limits on abusive users on a section of the website\n\n\tLevel relates to severity of block limit\n\t1 - lowest (7 mins)\n\t2 - low (30 mins)\n\t3 - medium (2 hours)\n\t4 - med-high (8 hours)\n\t5 - high (24 hours)\n\t6 - severe (3 days)\n\t7 - hardcore (7 days)\n\t8 - jackpot (30 days)\n\t\"\"\"\n\tif section == 'home':\n\t\trate_limit_key = \"rlfh:\"+str(user_id)\n\telif section == 'pub_grp':\n\t\trate_limit_key = \"rlfpg:\"+str(user_id)\n\telif section == 'prv_grp':\n\t\trate_limit_key = \"rlfpvg:\"+str(user_id)\n\telif section == 'pht_comm':\n\t\trate_limit_key = \"rlfpc:\"+str(user_id)\n\telif section == 'home_rep':\n\t\trate_limit_key = \"rlfhr:\"+str(user_id)\n\tif not my_server:\n\t\tmy_server = redis.Redis(connection_pool=POOL)\n\ttry:\n\t\tmy_server.setex(rate_limit_key,ban_reason,RATELIMIT_TTL[level])\n\t\treturn True\n\texcept KeyError:\n\t\tmy_server.setex(rate_limit_key,ban_reason,TWENTY_MINS)\n\t\treturn False\n\n# 1) revert log_input_rate in tasks.py (done)\n# 2) revert function definition of log_input_rate in redis4 (done)\n# 3) remove pipeline1.execute(key+\":logger\",text) from log_input_rate (done)\n# 4) remove log_rate_limited_conversation() from log_input_rate (done)\n# 5) remove function for log_rate_limited_conversation() in redis4 (done)\n# 6) remove function called report_rate_limited_conversation() in redis4 (done)\n# 7) remove logger url (and view import) from urls_maint.py (done)\n# 8) remove reporting view (called rate_limit_logging_report) from end of maint_views (done)\n# 9) remove the import for report_rate_limited_conversation() from maint_views (done)\n# 10) remove get_section_string() from redis4 (done)\n\n# def report_rate_limited_conversation():\n# \t\"\"\"\n# \tExtracts all rate limited conversations\n# \t\"\"\"\n# \timport csv,ast\n# \tmy_server = redis.Redis(connection_pool=POOL)\n# \tsuper_ = my_server.lrange(\"rate_limited_convos:super\",0,-1)\n# \tnormal_ = my_server.lrange(\"rate_limited_convos:normal\",0,-1)\n# \tlazy_ = my_server.lrange(\"rate_limited_convos:lazy\",0,-1)\n# \tresult = [(super_,'super_'),(normal_,'normal_'),(lazy_,'lazy_')]\n# \tfor logs,log_type in result:\n# \t\tfilename = 'ratelimited_convos_'+log_type+str(int(time.time()))+'.csv'\n# \t\twith open(filename,'wb') as f:\n# \t\t\twtr = csv.writer(f)\n# \t\t\tcolumns = [\"Type\",\"Section\",\"#\",\"Conversation\"]\n# \t\t\twtr.writerow(columns) # writing the columns\n# \t\t\tnum = 0\n# \t\t\tfor payload in logs:\n# \t\t\t\tnum += 1\n# \t\t\t\tpayload = ast.literal_eval(payload)\n# \t\t\t\twhich_section = payload[0]\n# \t\t\t\tfor string in payload[1]:\n# \t\t\t\t\ttype_=log_type\n# \t\t\t\t\tsec=which_section\n# \t\t\t\t\tconv_num=num\n# \t\t\t\t\tconversation=string\n# \t\t\t\t\tto_write = [type_,sec,conv_num,conversation]\t\n# \t\t\t\t\twtr.writerows([to_write])\n\n\n# def get_section_string(key):\n# \ttry:\n# \t\tstring = key[:2]\n# \texcept TypeError:\n# \t\tstring = 'undef'\n# \tif string == 'hi':\n# \t\treturn 'home_post'\n# \telif string == 'pg':\n# \t\treturn 'public_group'\n# \telif string == 'pv':\n# \t\treturn 'private_group'\n# \telif string == 'pc':\n# \t\treturn 'photo_comment'\n# \telif string == 'hr':\n# \t\treturn 'home_reply'\n# \telse:\n# \t\treturn 'undefined'\t\n\n# def log_rate_limited_conversation(convo_key, severity):\n# \t\"\"\"\n# \tlogger for rate limited conversation strings\n# \t\"\"\"\n# \tmy_server = redis.Redis(connection_pool=POOL)\n# \twhich_section = get_section_string(convo_key)\n# \tif severity == 'super':\n# \t\tmy_server.lpush(\"rate_limited_convos:super\",[which_section,my_server.lrange(convo_key,0,-1)])\n# \telif severity == 'normal':\n# \t\tmy_server.lpush(\"rate_limited_convos:normal\",[which_section,my_server.lrange(convo_key,0,-1)])\n# \telse:\n# \t\tmy_server.lpush(\"rate_limited_convos:lazy\",[which_section,my_server.lrange(convo_key,0,-1)])\n\n\n# def log_input_rate(section,user_id,time_now):\ndef log_input_rate(section,user_id,time_now,text=None):\n\t\"\"\"\n\tKeeps check of writing rates to rate limit abusive users\n\t\"\"\"\n\tmy_server = redis.Redis(connection_pool=POOL)\n\tif section == 'home':\n\t\tkey = \"hir:\"+str(user_id)\n\telif section == 'pub_grp':\n\t\tkey = \"pgir:\"+str(user_id)\t\n\telif section == 'prv_grp':\n\t\tkey = \"pvgir:\"+str(user_id)\n\telif section == 'pht_comm':\n\t\tkey = \"pcir:\"+str(user_id)\n\telif section == 'home_rep':\n\t\tkey = \"hrir:\"+str(user_id)\t\n\tpipeline1 = my_server.pipeline()\n\tpipeline1.lpush(key,time_now)\n\tpipeline1.expire(key,ONE_MIN)\n\t#### remove ####\n\t# pipeline1.lpush(key+\":logger\",text)\n\t# pipeline1.expire(key+\":logger\",ONE_MIN)\n\t#### remove ####\n\tpipeline1.execute()\n\t####################################\n\tall_str_values = my_server.lrange(key,0,-1)\n\ttotal_inputs = len(all_str_values)\n\tif total_inputs > 7:\n\t\tall_values = map(float, all_str_values)\n\t\tsum_of_differences = 0\n\t\tfor s, t in zip(all_values, all_values[1:]):\n\t\t\tsum_of_differences += t - s\n\t\tavg_time_taken_between_sentences = abs(sum_of_differences)/(total_inputs-1)\n\t\tif avg_time_taken_between_sentences < SUPER_FLOODING_THRESHOLD:\n\t\t\trate_limit_user(user_id=user_id,section=section,level='2',ban_reason=BAN_REASON['flooding'],my_server=my_server)\n\t\t\t# log_rate_limited_conversation(key+\":logger\",'super')\n\t\telif avg_time_taken_between_sentences < FLOODING_THRESHOLD:\n\t\t\trate_limit_user(user_id=user_id,section=section,level='1',ban_reason=BAN_REASON['flooding'],my_server=my_server)\n\t\t\t# log_rate_limited_conversation(key+\":logger\",'normal')\n\t\telif avg_time_taken_between_sentences < LAZY_FLOODING_THRESHOLD:\n\t\t\trate_limit_user(user_id=user_id,section=section,level='0',ban_reason=BAN_REASON['flooding'],my_server=my_server)\n\t\t\t# log_rate_limited_conversation(key+\":logger\",'lazy')\n\t\telse:\n\t\t\tmy_server.ltrim(key,0,6)\n\t\t\t#### remove ####\n\t\t\t# my_server.ltrim(key+\":logger\",0,6)\n\ndef log_input_text(section, section_id,text,user_id):\n\t\"\"\"\n\tLogs previous 4 sentences of each section\n\t\"\"\"\n\tmy_server = redis.Redis(connection_pool=POOL)\n\tif section == 'home':\n\t\tkey = \"hit:\"+str(user_id)+\":\"+str(section_id)\n\telif section == 'pub_grp':\n\t\tkey = \"pgit:\"+str(user_id)+\":\"+str(section_id)\t\n\telif section == 'prv_grp':\n\t\tkey = \"pvgit:\"+str(user_id)+\":\"+str(section_id)\t\n\telif section == 'pht_comm':\n\t\tkey = \"pcit:\"+str(user_id)+\":\"+str(section_id)\n\telif section == 'home_rep':\n\t\tkey = \"hrit:\"+str(user_id)+\":\"+str(section_id)\n\tmy_server.lpush(key,text)\n\tmy_server.ltrim(key,0,3) # keeping previous 4 sentences\n\tmy_server.expire(key,TEN_MINS)\n\n\ndef retrieve_previous_msgs(section, section_id,user_id):\n\t\"\"\"\n\tretrieve previous messages stored for a certain section_id\n\t\"\"\"\t\n\tif section == 'home':\n\t\tkey = \"hit:\"+str(user_id)+\":\"+str(section_id)\n\telif section == 'pub_grp':\n\t\tkey = \"pgit:\"+str(user_id)+\":\"+str(section_id)\t\n\telif section == 'prv_grp':\n\t\tkey = \"pvgit:\"+str(user_id)+\":\"+str(section_id)\t\n\telif section == 'pht_comm':\n\t\tkey = \"pcit:\"+str(user_id)+\":\"+str(section_id)\n\telif section == 'home_rep':\n\t\tkey = \"hrit:\"+str(user_id)+\":\"+str(section_id)\n\treturn redis.Redis(connection_pool=POOL).lrange(key,0,-1)\n\n\n############################ Ratelimiting short messages ###############################\n\ndef log_short_message(user_id,section,obj_id):\n\tshort_message = \"sm:\"+str(user_id)+\":\"+section+\":\"+str(obj_id)\n\tmy_server = redis.Redis(connection_pool=POOL)\n\tmy_server.incr(short_message)\n\tmy_server.expire(short_message,THREE_MINS)\n\ndef many_short_messages(user_id,section,obj_id):\n\t\"\"\"\n\tConfirms if trail of short messages have already been left by the user on the given object\n\t\"\"\"\n\tshort_message = \"sm:\"+str(user_id)+\":\"+section+\":\"+str(obj_id)\n\tmy_server = redis.Redis(connection_pool=POOL)\n\tshort_messages_so_far = my_server.get(short_message)\n\tif short_messages_so_far:\n\t\tif int(short_messages_so_far) > SHORT_MESSAGES_ALWD:\n\t\t\tmy_server.expire(short_message,SEVEN_MINS) #renew block short messages for 7 mins\n\t\t\treturn True\n\t\telse:\n\t\t\tFalse\n\telse:\n\t\treturn False\n\n################################################# Logging Sharing in Photos #################################################\n\n\ndef log_share(photo_id, photo_owner_id, sharer_id, share_type='undefined', origin=None):\n\t\"\"\"\n\tLogs image sharing attempts (via Whatsapp)\n\t\n\t1) If origin is 'user_albums', user is originating from user profiles\n\t1) If origin is 'fresh_photos', user is originating from fresh photos\n\t\"\"\"\n\tmy_server = redis.Redis(connection_pool=POOL)\n\tif origin == 'user_albums':\n\t\t# log shared profiles\n\t\tif not my_server.get('as:'+photo_id+\":\"+str(sharer_id)):\n\t\t\t# this sharer hasn't shared this photo in the last 20 mins\n\t\t\tadded = my_server.sadd('shared_user_albums_set',photo_id)\n\t\t\tif added == 1:\n\t\t\t\tmy_server.lpush('shared_user_albums_list',photo_id)\n\t\t\t\tif my_server.llen('shared_user_albums_list') > SHARED_PHOTOS_CEILING:\n\t\t\t\t\texpiring_photo_ids = my_server.lrange('shared_user_albums_list', (SHARED_PHOTOS_CEILING-PHOTO_DELETION_BUFFER), -1)\n\t\t\t\t\tpipeline1 = my_server.pipeline()\n\t\t\t\t\tpipeline1.ltrim('shared_user_albums_list',0,(SHARED_PHOTOS_CEILING-PHOTO_DELETION_BUFFER-1))\t\n\t\t\t\t\tpipeline1.zrem('sorted_user_albums_photos',*expiring_photo_ids)\n\t\t\t\t\tpipeline1.srem('shared_user_albums_set',*expiring_photo_ids)\n\t\t\t\t\tpipeline1.execute()\n\t\t\tmy_server.zincrby('sorted_user_albums_photos',photo_id,amount=1)# query this when getting report of which photos were shared the most\n\t\t\tmy_server.setex('as:'+photo_id+\":\"+str(sharer_id),'1',TWENTY_MINS)\n\telif origin == 'fresh_photos':\n\t\t# log shared photos\n\t\tif not my_server.get('as:'+photo_id+\":\"+str(sharer_id)):\n\t\t\t# this sharer hasn't shared this photo in the last 20 mins\n\t\t\tadded = my_server.sadd('shared_public_photos_set',photo_id)\n\t\t\tif added == 1:\n\t\t\t\tmy_server.lpush('shared_public_photos_list',photo_id)\n\t\t\t\tif my_server.llen('shared_public_photos_list') > SHARED_PHOTOS_CEILING:\n\t\t\t\t\texpiring_photo_ids = my_server.lrange('shared_public_photos_list', (SHARED_PHOTOS_CEILING-PHOTO_DELETION_BUFFER), -1)\n\t\t\t\t\tpipeline1 = my_server.pipeline()\n\t\t\t\t\tpipeline1.ltrim('shared_public_photos_list',0,(SHARED_PHOTOS_CEILING-PHOTO_DELETION_BUFFER-1))\n\t\t\t\t\tpipeline1.zrem('sorted_shared_public_photos',*expiring_photo_ids)\n\t\t\t\t\tpipeline1.srem('shared_public_photos_set',*expiring_photo_ids)\n\t\t\t\t\tpipeline1.execute()\n\t\t\tmy_server.zincrby('sorted_shared_public_photos',photo_id,amount=1)# query this when getting report of which photos were shared the most\n\t\t\tmy_server.setex('as:'+photo_id+\":\"+str(sharer_id),'1',TWENTY_MINS)\n\telse:\n\t\tpass\n\n\n\ndef logging_sharing_metrics(sharer_id, photo_id, photo_owner_id, num_shares, sharing_time, group_ids):\n\t\"\"\"\n\tLogs metrics for photos shared internally (from public albums to personal groups)\n\n\tThis is a separate functionality from sharing via Whatsapp\n\t\"\"\"\n\tsharer_id, num_shares, sharing_time = str(sharer_id), str(num_shares), str(sharing_time)\n\tkey = \"sp:\"+photo_owner_id\n\tmy_server = redis.Redis(connection_pool=POOL)\n\tpipeline1 = my_server.pipeline()\n\t\n\t# use this to calculate total shares in the last week, also total all_time shares (list resets if user inactive for two weeks)\n\tpipeline1.zadd(key,photo_id+\":\"+num_shares+\":\"+sharer_id+\":\"+sharing_time,sharing_time)\n\tpipeline1.expire(key,TWO_WEEKS)\n\t# can also give to each user: latest shared photo, most highly shared photo, and all other shared photos (alongwith num shares)\n\n\t# save the act of sharing in a global list as well, for our viewing\n\tpipeline1.lpush('shared_photos',photo_id+\":\"+num_shares+\":\"+photo_owner_id+\":\"+sharing_time)\n\tpipeline1.ltrim('shared_photos',0,500)\n\n\tpipeline1.execute()\n\t##################################### SHARER METRICS #####################################\n\t# one_month_ago = sharing_time - (60*60*24*30)\n\t# most proflific sharers of own content (remove those inactive for a month)\n\t# if photo_owner_id == sharer_id:\n\t# \tmy_server.zadd('ocst',sharer_id,sharing_time)#'own content sharing time'\n\t# \tmy_server.zincrby('ocsv',sharer_id,amount=num_shares)#'own content sharing volume'\n\t# \texpired_sharer_ids = my_server.zrangebyscore(\"ocst\",'-inf',one_month_ago)\n\t# \tif expired_sharer_ids:\n\t# \t\tmy_server.zrem('ocst',*expired_sharer_ids)\n\t# \t\tmy_server.zrem('ocsv',*expired_sharer_ids)\n\t\n\t# most prolific sharers of others' content (remove those inactive for a month)\n\t# else:\n\t# \tmy_server.zadd('cst',sharer_id,sharing_time)#'content sharing time'\n\t# \tmy_server.zincrby('csv',sharer_id,amount=num_shares)#'content sharing volume'\n\t# \texpired_sharer_ids = my_server.zrangebyscore(\"cst\",'-inf',one_month_ago)\n\t# \tif expired_sharer_ids:\n\t# \t\tmy_server.zrem('cst',*expired_sharer_ids)\n\t# \t\tmy_server.zrem('csv',*expired_sharer_ids)\n\t\n\t# most prolific sharers who influence the most number of people. Can use formula volume_shared^(simpson_diversity_index) to calculate 'influencer score'\n\t# volume_shared is simply total number of shares (it counts each individual share as 1)\n\t# simpson_diversity_index is here: https://www.youtube.com/watch?v=zxzwvWDeTT8\n\t\n\n# def log_photo_attention_from_fresh(photo_id, att_giver, photo_owner_id, action, vote_value):\n# \t\"\"\"\n# \tMaintains list of photos that are 'trending' via actvity from fresh photos\n\t\n# \tNote: 'action' may be 'photo_vote' or 'photo_comment'\n# \t\"\"\"\n# \tmy_server = redis.Redis(connection_pool=POOL)\n# \tif action == 'photo_vote':\n# \t\tif vote_value == '1':\n# \t\t\tpass\n# \t\telif vote_value == '-1':\n# \t\t\tpass\n# \t\telse:\n# \t\t\tpass\n\ndef cache_photo_share_data(json_data,user_id):\n\t\"\"\"\n\tCaches photo sharing data of given user_id for twenty mins\n\t\"\"\"\n\tredis.Redis(connection_pool=POOL).setex('phd:'+user_id,json_data,TWENTY_MINS)\n\n\ndef retrieve_fresh_photo_shares_or_cached_data(user_id):\n\t\"\"\"\n\tReturns shared photos of user_id\n\t\"\"\"\n\tmy_server = redis.Redis(connection_pool=POOL)\n\tcached_photo_data = my_server.get('phd:'+user_id)\n\tif cached_photo_data:\n\t\treturn json.loads(cached_photo_data), True\n\telse:\n\t\treturn my_server.zrange(\"sp:\"+user_id,0,-1), False\n\n\ndef logging_profile_view(visitor_id,star_id,viewing_time):\n\t\"\"\"\n\tLogs profile view if a visitor visits\n\n\tOnly last 24 hours are preserved\n\tEnsures self visits don't count\n\tEnsures repeat visits don't count\n\t\"\"\"\n\tstar_id = str(star_id)\n\tone_day_ago = viewing_time - ONE_DAY\n\tviewing_time = str(viewing_time)\n\tvisitor_id = str(visitor_id)\n\tkey = \"vb:\"+star_id+\":\"+visitor_id\n\tmy_server = redis.Redis(connection_pool=POOL)\n\tif not my_server.get(key):\n\t\t# can log visit\n\t\tsorted_set = 'pf:'+star_id\n\t\tmy_server.zremrangebyscore(sorted_set,'-inf',one_day_ago)#purging old data\n\t\tmy_server.zadd(sorted_set,visitor_id+\":\"+viewing_time,viewing_time)\n\t\tmy_server.expire(sorted_set,ONE_DAY)#this expires if no new views appear for 24 hours\n\t\tmy_server.setex(key,'1',THIRTY_MINS)\n\n########################################### Gathering Metrics for Personal Groups ###########################################\n\ndef increment_convo_counter(group_id, writer_id, group_type=None):\n\t\"\"\"\n\tLogs conversation quantity in personal groups and private mehfils\n\n\tHelps answer questions such as:\n\t1) What are avg number of chats produced per type of chat?\n\t2) What are avg number of switchovers produced per type of chat?\n\t\"\"\"\n\tif group_type:\n\t\tlast_interaction_in_group = \"lig_\"+group_type+\":\"+group_id\n\t\tmy_server = redis.Redis(connection_pool=POOL)\n\t\tlwid = my_server.getset(last_interaction_in_group,writer_id)\n\t\tif lwid:\n\t\t\tinteraction_type = 'ch' if lwid == str(writer_id) else 'both'\n\t\telse:\n\t\t\tinteraction_type = 'ch'\n\t\tif interaction_type == 'ch':\n\t\t\t# this logs normal chat\n\t\t\tmy_server.zincrby(group_type+\"_ch\",group_id,amount=1)\n\t\telif interaction_type == 'both':\n\t\t\t# this logs switchover, and normal chat\n\t\t\tmy_server.zincrby(group_type+\"_sw\",group_id,amount=1)\n\t\t\tmy_server.zincrby(group_type+\"_ch\",group_id,amount=1)\n\t\tmy_server.expire(last_interaction_in_group,ONE_DAY)\n\n\ndef increment_session(group_id, user_id, group_type=None):\n\t\"\"\"\n\tIncrements unique sessions per group per user\n\n\tHelps answer questions such as:\n\t1) What are avg number of sessions per type of chat\n\t2) What are median number of sessions per type of chat\n\t3) Calculate correlation between number of sessions and number of switchovers\n\t4) Calculate coorelation between number of sessions and number of chats\n\t\"\"\"\n\tmy_server, user_id = redis.Redis(connection_pool=POOL), str(user_id)\n\tif not my_server.get(\"gs_\"+group_type+\":\"+group_id+\":\"+user_id):\n\t\t# create new session key for the user for this group\n\t\tnow = datetime.now()\n\t\tsecs_till_midnight = ((24 - now.hour - 1) * 60 * 60) + ((60 - now.minute - 1) * 60) + (60 - now.second)\n\t\tmy_server.setex(\"gs_\"+group_type+\":\"+group_id+\":\"+user_id,group_id,secs_till_midnight)\n\t\t# increment session counter\n\t\tmy_server.zincrby(group_type+\"_sess\",group_id+\":\"+user_id,amount=1)\n\n\ndef track_p2p_sms(sent_by_id, sent_to_id, sending_time):\n\t\"\"\"\n\tLog which user sent whom an SMS at what time (to entice them to come to Damadam)\n\t\n\tHelps answer questions such as:\n\t1) Number of SMSes generated per chat/per day/per user\n\t\"\"\"\n\tmy_server = redis.Redis(connection_pool=POOL)\n\tmy_server.lpush(\"p2p_sms\",str(sent_by_id)+\":\"+sent_to_id+\":\"+str(sending_time))#llen of list reveals number of SMSes sent\n\t# create 'red carpet' for target user\n\tmy_server.setex(\"rc:\"+sent_to_id,sending_time,ONE_DAY)\n\n\ndef check_p2p_sms(user_id):\n\t\"\"\"\n\tLogs data in case you were sent an SMS by a friend (asking you to return to Damadam)\n\t\n\tHelps answer questions such as:\n\t1) Number of people responding to SMSes\n\t2) How soon does an average responder take to return to the chat from which SMS was sent?\n\t\"\"\"\n\tmy_server, user_id = redis.Redis(connection_pool=POOL), str(user_id)\n\tsms_sent_at = my_server.get(\"rc:\"+user_id)\n\tif sms_sent_at:\n\t\tmy_server.delete(\"rc:\"+user_id)\n\t\ttime_passed_since_sms = time.time() - float(sms_sent_at)\n\t\t# log returned user and time taken since sending of SMS\n\t\tmy_server.lpush(\"sms_eft\",user_id+\":\"+str(time_passed_since_sms))\n\n\ndef log_personal_group_exit_or_delete(group_id, exit_by_id=None, action_type=None):\n\t\"\"\"\n\tLogging time of personal group exit or deletion\n\n\tHelps answer questions such as:\n\t1) How many private chats are create (net basis) week-over-week?\n\t2) What is the average life-time of a private chat?\n\t\"\"\"\n\tmy_server = redis.Redis(connection_pool=POOL)\n\tif action_type == 'exit':\n\t\tmy_server.zadd(\"exits\",group_id+\":\"+exit_by_id,time.time())\n\telse:\n\t\t# group_id is a list of group_ids\n\t\ttime_now = time.time()\n\t\tgroups = []\n\t\tfor gid in group_id:\n\t\t\tgroups.append(gid)\n\t\t\tgroups.append(time_now)\n\t\tif groups:\n\t\t\tif action_type == 'del_exit':\n\t\t\t\tmy_server.zadd(\"del_after_exit\",*groups)\n\t\t\telif action_type == 'del_idle':\n\t\t\t\tmy_server.zadd(\"del_after_idle\",*groups)\n\n\ndef purge_exit_list(group_id, user_id):\n\t\"\"\"\n\tPurge a value from 'exits' (called when a user rejoins a group after exiting it)\n\t\"\"\"\n\tredis.Redis(connection_pool=POOL).zrem(\"exits\",group_id+\":\"+str(user_id))\n\n\n########################################### Reporting Metrics for Personal Groups ###########################################\n\ndef avg_sessions_per_type():\n\t\"\"\"\n\tRetrieves session information for personal groups\n\n\t1) What are avg number of sessions per type of chat\n\t2) What are median number of sessions per type of chat\n\t\"\"\"\n\tmy_server = redis.Redis(connection_pool=POOL)\n\tpgs_sampled = my_server.get('pgs_sampled_for_sess')\n\tif pgs_sampled:\n\t\tresults = my_server.mget(['pms_sampled_for_sess','med_sess_per_user_per_pg','med_sess_per_user_per_pm','avg_sess_per_user_per_pg',\\\n\t\t\t'avg_sess_per_user_per_pm','avg_users_per_pm','med_users_per_pm','avg_users_per_pg','med_users_per_pg','avg_sess_per_user_per_two_user_pm',\\\n\t\t\t'med_sess_per_user_per_two_user_pm','total_two_user_pms','avg_users_per_two_user_pm','med_users_per_two_user_pm'])\n\t\treturn pgs_sampled, results[0], results[1], results[2], results[3], results[4], results[5], results[6], results[7], results[8], results[9],\\\n\t\tresults[10],results[11], results[12], results[13]\n\telse:\n\t\tpg_data = my_server.zrange('pg_sess',0,-1,withscores=True)\n\t\tpg_sample_size = len(pg_data)\n\t\tpg_med_idx = int(pg_sample_size/2)\n\n\t\tpm_data = my_server.zrange('pm_sess',0,-1,withscores=True)\n\t\tpm_sample_size = len(pm_data)\n\t\tpm_med_idx = int(pm_sample_size/2)\n\n\t\t# data contains : tuples\n\t\tpg_sessions, all_pgs, all_pg_users = 0, set(), {}\n\t\tfor tup in pg_data:\n\t\t\tpg_sessions += int(tup[1])\n\t\t\tpayload = tup[0].split(\":\")\n\t\t\tgroup_id, user_id = payload[0], payload[1]\n\t\t\tall_pgs.add(group_id)\n\t\t\tall_pg_users[group_id] = {}\n\t\tpgs_sampled = len(all_pgs)\n\t\tfor tup in pg_data:\n\t\t\tpayload = tup[0].split(\":\")\n\t\t\tgroup_id, user_id = payload[0], payload[1]\n\t\t\tall_pg_users[group_id].update({user_id:'user_id'})\n\t\ttotal_pg_users, pg_users_set = 0, []\n\t\tfor key, value in all_pg_users.iteritems():\n\t\t\tnum_users_in_group = len(value)\n\t\t\tpg_users_set.append(num_users_in_group)\n\t\t\ttotal_pg_users += num_users_in_group\n\t\t# finding median\n\t\tarray_pg = sorted(pg_users_set)\n\t\thalf, odd = divmod(len(array_pg), 2)\n\t\tif odd:\n\t\t\tmed_users_per_pg = array_pg[half]\n\t\telse:\n\t\t\tmed_users_per_pg = (array_pg[half - 1] + array_pg[half]) / 2.0\n\n\t\t# calculating pm data\n\t\tpm_sessions, all_pms, all_pm_users = 0, set(), {}\n\t\tfor tup in pm_data:\n\t\t\tpm_sessions += int(tup[1])\n\t\t\tpayload = tup[0].split(\":\")\n\t\t\tgroup_id, user_id = payload[0], payload[1]\n\t\t\tall_pms.add(group_id)\n\t\t\tall_pm_users[group_id] = {}\n\t\tpms_sampled = len(all_pms)\n\t\tfor tup in pm_data:\n\t\t\tpayload = tup[0].split(\":\")\n\t\t\tgroup_id, user_id = payload[0], payload[1]\n\t\t\tall_pm_users[group_id].update({user_id:'user_id'})\n\t\ttotal_pm_users, pm_users_set, two_user_pms = 0, [], {}\n\t\tfor key, value in all_pm_users.iteritems():\n\t\t\tnum_users_in_group = len(value)\n\t\t\tpm_users_set.append(num_users_in_group)\n\t\t\tif num_users_in_group < 3:\n\t\t\t\t# retriving 2 user pms\n\t\t\t\ttwo_user_pms[key] = num_users_in_group\n\t\t\ttotal_pm_users += num_users_in_group\n\t\t\n\t\t# retrieving sessions for 2 user pms in {gid:num_sess} form\n\t\ttwo_user_pm_sess = {}\n\t\tfor tup in pm_data:\n\t\t\tpayload = tup[0].split(\":\")\n\t\t\tgroup_id, user_id = payload[0], payload[1]\n\t\t\tif group_id in two_user_pms:\n\t\t\t\t# it's a two person pm\n\t\t\t\tif group_id in two_user_pm_sess:\n\t\t\t\t\t# already entered data for 1 user\n\t\t\t\t\tnum_sess = two_user_pm_sess[group_id]\n\t\t\t\t\ttwo_user_pm_sess[group_id] = num_sess + int(tup[1])\n\t\t\t\telse:\n\t\t\t\t\ttwo_user_pm_sess[group_id] = int(tup[1])\n\n\t\t# we now have two_user_pms {gid:num_users} and two_user_pm_sess {gid:num_sess}\n\t\ttotal_two_user_pm_sess, all_two_user_pm_sess = 0, []\n\t\tfor key,value in two_user_pm_sess.iteritems():\n\t\t\ttotal_two_user_pm_sess += int(value)\n\t\t\tall_two_user_pm_sess.append(value)\n\t\ttotal_two_user_pm_users, all_two_user_pm_users = 0, []\n\t\tfor key,value in two_user_pms.iteritems():\n\t\t\ttotal_two_user_pm_users += int(value)\n\t\t\tall_two_user_pm_users.append(value)\n\t\tavg_sess_per_user_per_two_user_pm = \"{0:.2f}\".format(float(total_two_user_pm_sess)/total_two_user_pm_users)\n\t\tsorted_sess = sorted(all_two_user_pm_sess)\n\t\thalved, is_odd = divmod(len(sorted_sess), 2)\n\t\tif is_odd:\n\t\t\tmed_sess_per_user_per_two_user_pm = sorted_sess[halved]\n\t\telse:\n\t\t\tmed_sess_per_user_per_two_user_pm = int(sorted_sess[halved - 1] + sorted_sess[halved]) / 2.0\n\t\ttotal_two_user_pms = len(two_user_pms)\n\t\tavg_users_per_two_user_pm = \"{0:.2f}\".format(float(total_two_user_pm_users)/total_two_user_pms)\n\t\tsorted_users = sorted(all_two_user_pm_users)\n\t\tbisect, isodd = divmod(len(sorted_users), 2)\n\t\tif isodd:\n\t\t\tmed_users_per_two_user_pm = sorted_users[bisect]\n\t\telse:\n\t\t\tmed_users_per_two_user_pm = int(sorted_users[bisect - 1] + sorted_users[bisect]) / 2.0\n\t\t# we now have avg and median sessions per user per two user pm\n\n\t\t# finding overall median\n\t\tarray_pm = sorted(pm_users_set)\n\t\thalf, odd = divmod(len(array_pm), 2)\n\t\tif odd:\n\t\t\tmed_users_per_pm = array_pm[half]\n\t\telse:\n\t\t\tmed_users_per_pm = (array_pm[half - 1] + array_pm[half]) / 2.0\n\t\tavg_sess_per_user_per_pg = \"{0:.2f}\".format(float(pg_sessions)/pg_sample_size)\n\t\tavg_sess_per_user_per_pm = \"{0:.2f}\".format(float(pm_sessions)/pm_sample_size)\n\t\tavg_users_per_pg = \"{0:.2f}\".format(float(total_pg_users)/pgs_sampled)\n\t\tavg_users_per_pm = \"{0:.2f}\".format(float(total_pm_users)/pms_sampled)\n\t\tmed_sess_per_user_per_pg = my_server.zrange('pg_sess',pg_med_idx,pg_med_idx+1,withscores=True)[0]\n\t\tmed_sess_per_user_per_pm = my_server.zrange('pm_sess',pm_med_idx,pm_med_idx+1,withscores=True)[0]\n\n\t\t# caching the results\n\t\tpipeline1 = my_server.pipeline()\n\t\tpipeline1.setex('pgs_sampled_for_sess',pgs_sampled,TEN_MINS)\n\t\tpipeline1.setex('pms_sampled_for_sess',pms_sampled,TEN_MINS)\n\t\tpipeline1.setex('avg_users_per_pm',avg_users_per_pm,TEN_MINS)\n\t\tpipeline1.setex('avg_users_per_pg',avg_users_per_pg,TEN_MINS)\n\t\tpipeline1.setex('med_users_per_pm',med_users_per_pm,TEN_MINS)\n\t\tpipeline1.setex('med_users_per_pg',med_users_per_pg,TEN_MINS)\n\t\tpipeline1.setex('total_two_user_pms',total_two_user_pms,TEN_MINS)\n\t\tpipeline1.setex('avg_sess_per_user_per_pg',avg_sess_per_user_per_pg,TEN_MINS)\n\t\tpipeline1.setex('avg_sess_per_user_per_pm',avg_sess_per_user_per_pm,TEN_MINS)\n\t\tpipeline1.setex('med_sess_per_user_per_pg',med_sess_per_user_per_pg,TEN_MINS)\n\t\tpipeline1.setex('med_sess_per_user_per_pm',med_sess_per_user_per_pm,TEN_MINS)\n\t\tpipeline1.setex('med_sess_per_user_per_pm',med_sess_per_user_per_pm,TEN_MINS)\n\t\tpipeline1.setex('avg_users_per_two_user_pm',avg_users_per_two_user_pm,TEN_MINS)\n\t\tpipeline1.setex('med_users_per_two_user_pm',med_users_per_two_user_pm,TEN_MINS)\n\t\tpipeline1.setex('avg_sess_per_user_per_two_user_pm',avg_sess_per_user_per_two_user_pm,TEN_MINS)\n\t\tpipeline1.setex('med_sess_per_user_per_two_user_pm',med_sess_per_user_per_two_user_pm,TEN_MINS)\n\t\tpipeline1.execute()\n\t\treturn pgs_sampled, pms_sampled, med_sess_per_user_per_pg, med_sess_per_user_per_pm, avg_sess_per_user_per_pg, avg_sess_per_user_per_pm,\\\n\t\tavg_users_per_pm, med_users_per_pm, avg_users_per_pg, med_users_per_pg, avg_sess_per_user_per_two_user_pm, med_sess_per_user_per_two_user_pm,\\\n\t\ttotal_two_user_pms, avg_users_per_two_user_pm, med_users_per_two_user_pm\n\t\"\"\"\n\tMeasure 2-user pms vs 2-user pgs\n\tGet 2-user pms and 2-user pgs from pm_sess and pg_sess (i.e. where sessions for 2 participants were logged)\n\n\tGet rid of less than 2 user cases to make it like for like\n\t\"\"\"\n\ndef avg_num_of_switchovers_per_type():\n\t\"\"\"\n\tWhat are avg number of chats produced per type of chat?\n\t\"\"\"\n\tmy_server = redis.Redis(connection_pool=POOL)\n\ttotal_pms = my_server.get('total_pms_sw')\n\tif total_pms:\n\t\tresults = my_server.mget(['median_pm_idx_sw','median_pm_tuple_sw','aggregate_pm_sws','avg_sw_per_pm','total_pgs_sw','median_pg_idx_sw',\\\n\t\t\t'median_pg_tuple_sw','aggregate_pg_sws','avg_sw_per_pg'])\n\t\treturn total_pms, results[0], results[1], results[2], results[3], results[4], results[5], results[6], results[7], results[8]\n\telse:\n\t\tpm_data = my_server.zrange('pm_sw',0,-1,withscores=True)\n\t\ttotal_pms = len(pm_data)\n\t\tmedian_pm_idx = int(total_pms/2)\n\t\tmedian_pm_tuple = my_server.zrange('pm_sw',median_pm_idx,median_pm_idx+1,withscores=True)[0]\n\n\t\tpg_data = my_server.zrange('pg_sw',0,-1,withscores=True)\t\n\t\ttotal_pgs = len(pg_data)\n\t\tmedian_pg_idx = int(total_pgs/2)\n\t\tmedian_pg_tuple = my_server.zrange('pg_sw',median_pg_idx,median_pg_idx+1,withscores=True)[0]\n\n\t\t# data is list of (group_id,chat_num) type tuples\n\t\taggregate_pm_sws, aggregate_pg_sws = 0, 0\n\t\tfor tup in pm_data:\n\t\t\taggregate_pm_sws += int(tup[1])\n\t\tfor tup in pg_data:\n\t\t\taggregate_pg_sws += int(tup[1])\n\t\tavg_sw_per_pm = \"{0:.2f}\".format(aggregate_pm_sws/float(total_pms))\n\t\tavg_sw_per_pg = \"{0:.2f}\".format(aggregate_pg_sws/float(total_pgs))\n\n\t\t# caching the results\n\t\tpipeline1 = my_server.pipeline()\n\t\tpipeline1.setex('total_pms_sw',total_pms,TEN_MINS)\n\t\tpipeline1.setex('median_pm_idx_sw',median_pm_idx,TEN_MINS)\n\t\tpipeline1.setex('median_pm_tuple_sw',median_pm_tuple,TEN_MINS)\n\t\tpipeline1.setex('aggregate_pm_sws',aggregate_pm_sws,TEN_MINS)\n\t\tpipeline1.setex('avg_sw_per_pm',avg_sw_per_pm,TEN_MINS)\n\t\tpipeline1.setex('total_pgs_sw',total_pgs,TEN_MINS)\n\t\tpipeline1.setex('median_pg_idx_sw',median_pg_idx,TEN_MINS)\n\t\tpipeline1.setex('median_pg_tuple_sw',median_pg_tuple,TEN_MINS)\n\t\tpipeline1.setex('aggregate_pg_sws',aggregate_pg_sws,TEN_MINS)\n\t\tpipeline1.setex('avg_sw_per_pg',avg_sw_per_pg,TEN_MINS)\n\t\tpipeline1.execute()\n\t\treturn total_pms, median_pm_idx, median_pm_tuple, aggregate_pm_sws, avg_sw_per_pm, total_pgs, median_pg_idx, median_pg_tuple, \\\n\t\t\taggregate_pg_sws, avg_sw_per_pg\n\n\t\"\"\"\n\tDivide green mehfils into 2 person and 2+ person groups. Only 2 person green groups can be compared to private chat\n\t\"\"\"\n\n\ndef avg_num_of_chats_per_type():\n\t\"\"\"\n\tWhat are avg number of chats produced per type of chat?\n\t\"\"\"\n\tmy_server = redis.Redis(connection_pool=POOL)\n\ttotal_pms = my_server.get('total_pms')\n\tif total_pms:\n\t\tresults = my_server.mget(['median_pm_idx','median_pm_tuple','aggregate_pm_chats','avg_chat_per_pm','total_pgs','median_pg_idx','median_pg_tuple',\\\n\t\t\t'aggregate_pg_chats','avg_chat_per_pg','pms_with_sws','pgs_with_sws'])\n\t\treturn total_pms, results[0], results[1], results[2], results[3], results[4], results[5], results[6], results[7], results[8], results[9], results[10]\n\telse:\n\t\tpm_data = my_server.zrange('pm_ch',0,-1,withscores=True)\n\t\ttotal_pms = len(pm_data)\n\t\tmedian_pm_idx = int(total_pms/2)\n\t\tmedian_pm_tuple = my_server.zrange('pm_ch',median_pm_idx,median_pm_idx+1,withscores=True)[0]\n\n\t\tpg_data = my_server.zrange('pg_ch',0,-1,withscores=True)\n\t\ttotal_pgs = len(pg_data)\n\t\tmedian_pg_idx = int(total_pgs/2)\n\t\tmedian_pg_tuple = my_server.zrange('pg_ch',median_pg_idx,median_pg_idx+1,withscores=True)[0]\n\n\t\t# data is list of (group_id,chat_num) type tuples\n\t\taggregate_pm_chats, aggregate_pg_chats = 0, 0\n\t\tfor tup in pm_data:\n\t\t\taggregate_pm_chats += int(tup[1])\n\t\tfor tup in pg_data:\n\t\t\taggregate_pg_chats += int(tup[1])\n\t\tavg_chat_per_pm = \"{0:.2f}\".format(aggregate_pm_chats/float(total_pms))\n\t\tavg_chat_per_pg = \"{0:.2f}\".format(aggregate_pg_chats/float(total_pgs))\n\n\t\tpms_with_sws = \"{0:.2f}\".format(my_server.zcard('pm_sw')/float(total_pms)*100)\n\t\tpgs_with_sws = \"{0:.2f}\".format(my_server.zcard('pg_sw')/float(total_pgs)*100)\n\n\t\t# caching the results\n\t\tpipeline1 = my_server.pipeline()\n\t\tpipeline1.setex('total_pms',total_pms,TEN_MINS)\n\t\tpipeline1.setex('median_pm_idx',median_pm_idx,TEN_MINS)\n\t\tpipeline1.setex('median_pm_tuple',median_pm_tuple,TEN_MINS)\n\t\tpipeline1.setex('aggregate_pm_chats',aggregate_pm_chats,TEN_MINS)\n\t\tpipeline1.setex('avg_chat_per_pm',avg_chat_per_pm,TEN_MINS)\n\t\tpipeline1.setex('total_pgs',total_pgs,TEN_MINS)\n\t\tpipeline1.setex('median_pg_idx',median_pg_idx,TEN_MINS)\n\t\tpipeline1.setex('median_pg_tuple',median_pg_tuple,TEN_MINS)\n\t\tpipeline1.setex('aggregate_pg_chats',aggregate_pg_chats,TEN_MINS)\n\t\tpipeline1.setex('avg_chat_per_pg',avg_chat_per_pg,TEN_MINS)\n\t\tpipeline1.setex('pms_with_sws',pms_with_sws,TEN_MINS)\n\t\tpipeline1.setex('pgs_with_sws',pgs_with_sws,TEN_MINS)\n\t\tpipeline1.execute()\n\t\treturn total_pms, median_pm_idx, median_pm_tuple, aggregate_pm_chats, avg_chat_per_pm, total_pgs, median_pg_idx, median_pg_tuple, \\\n\t\taggregate_pg_chats, avg_chat_per_pg, pms_with_sws, pgs_with_sws","sub_path":"links/redis4.py","file_name":"redis4.py","file_ext":"py","file_size_in_byte":62779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"387776652","text":"import pandas as pd\nimport os\n\nfiles = []\n\nfor dirpath, dirnames, files in os.walk('channeltranscripts'):\n for name in files:\n if name.lower().endswith('.csv'):\n print('reading', name)\n data = pd.read_csv(os.path.join(dirpath, name))\n\n","sub_path":".history/dataformatter_20210412131615.py","file_name":"dataformatter_20210412131615.py","file_ext":"py","file_size_in_byte":267,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"629169777","text":"#1723682\n#David van Vliet\n\ngetallen = [] #nieuwe list maken van de inputs\ninp = (int(input(\"Geef een getal: \")))\nwhile inp != 0:\n getallen.append(inp) #inputs in de list zetten\n inp = (int(input(\"Geef een getal: \")))\n\nx = len(getallen)\ny = sum(getallen)\nprint(\"Er zijn\", x, \"getallen ingevoerd, de som is: \", y)\n","sub_path":"Python/les7/pe7_1.py","file_name":"pe7_1.py","file_ext":"py","file_size_in_byte":323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"179688283","text":"from alphavantage import Alphavantage\nfrom newsapi import Newsapi\nimport json\nfrom twilio.rest import Client\nSTOCK = \"TSLA\"\nCOMPANY_NAME = \"Tesla Inc\"\nDOWN_ICON = \"🔻\"\nUP_ICON = \"🔺\"\n\nwith open(\"../cred.json\") as f:\n api_json = f.read()\ntwilio_cred = json.loads(api_json)[\"twilio\"]\naccount_sid = twilio_cred['TWILIO_ACCOUNT_SID']\nauth_token = twilio_cred['TWILIO_AUTH_TOKEN']\n\ntesla_obj = Alphavantage()\ntesla_data = tesla_obj.time_series_daily_adjusted(STOCK)\n\ndiff_per = tesla_obj.prev_diff_percentage(tesla_data)\n\nif diff_per > 5 or diff_per < -5:\n if diff_per < 0:\n icon = DOWN_ICON\n else:\n icon = UP_ICON\n newsapi = Newsapi()\n tesla_news = newsapi.get_news(keyword=COMPANY_NAME,page_size=3)\n message_body = f\"{STOCK}: {icon}{abs(diff_per)}%\\n\"\n for n in tesla_news:\n message_body += f\"\\nHeadline: {n['title']}\\nBrief: {n['description']}\\n\"\n client = Client(account_sid, auth_token)\n\n message = client.messages \\\n .create(\n body=f\"{message_body}\",\n from_=f'{twilio_cred[\"from_number\"]}',\n to=f'{twilio_cred[\"to_number\"]}'\n )\n\n print(message.status)\n","sub_path":"day-36/stock-news/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1215,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"158228009","text":"#codeing=utf-8\n# @Time : 2017-10-12\n# @Author : J.sky\n# @Mail : bosichong@qq.com\n# @Site : www.17python.com\n# @Title : Python中创建TCP服务器与客户端进行通信(中)Tk、thread与socket组合。\n# @Url : http://www.17python.com/blog/41\n# @Details : Python中创建TCP服务器与客户端进行通信(中)Tk、thread与socket组合。\n# @Other : OS X 10.11.6 \n# Python 3.6.1\n# VSCode 1.15.1\n###################################\n# Python中创建TCP服务器与客户端进行通信(中)Tk、thread与socket组合。\n###################################\n#coding=utf-8\nimport threading, socket, time\nimport tkinter as tk\n\n\nclass TcpClient(threading.Thread):\n def __init__(self, addr, port):\n threading.Thread.__init__(self)\n self.addr = addr\n self.port = port\n self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.s.connect((self.addr, self.port))\n self.stop_flag = False\n self.name = ''\n\n def run(self):\n self.sendName()#发送昵称验证\n t1 = threading.Thread(target=self.recvMsgThread)\n t2 = threading.Thread(target=self.sendMsgThread)\n t1.start()\n t2.start()\n t1.join()\n t2.join()\n\n#发送客户端用户名,保存在服务器端列表中\n def sendName(self):\n\n while True:\n self.name = input('请输出昵称:')\n self.s.send(self.name.encode()) # 发送到服务器\n msg = self.s.recv(1024)\n if msg.decode('utf-8') == '用户名已经存在':\n print(msg.decode('utf-8'),'请重新输入昵称!')\n else:\n break\n\n\n\n # 发送消息线程方法\n def sendMsgThread(self):\n print('发消息线程启动------------', self.stop_flag)\n while not self.stop_flag:\n data = input('>>>')\n if data == 'exit':#输入exit退出客户端\n msg = self.name + '好象有什么急事!一路小跑的离开了聊天室'\n self.s.send(msg.encode())\n time.sleep(1)\n self.stop() # 中止线程\n print('发消息线程已关闭')\n \n elif data:\n data = '[{0}]说道:{1}'.format(self.name,data)\n self.s.send(data.encode())\n\n\n # 接收消息线程方法\n def recvMsgThread(self):\n print('收消息线程启动-------------', self.stop_flag)\n while not self.stop_flag:\n try:\n msg = self.s.recv(1024)\n if msg: print(msg.decode())\n\n except Exception as e:\n print('收消息线程已关闭')\n\n def stop(self):\n self.s.close()\n self.stop_flag = True\n\n\ndef main():\n ip = '192.168.0.88'\n port = 18888\n t3 = TcpClient(ip, port)\n print('正在进入聊天室,请先起个牛逼的名字!')\n t3.start()\n t3.join()\n\n\nif __name__ == '__main__':\n main()\n print('客户端退出')\n\n","sub_path":"TCP/GUISocket/GUI_Clisock.py","file_name":"GUI_Clisock.py","file_ext":"py","file_size_in_byte":3019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"474601663","text":"from PIL import ImageGrab, Image, ImageChops\nimport time\nimport cv2\nimport os\nimport shutil\nimport sys\nimport pickle\nimport time\nfrom scipy.misc import imread\nfrom scipy.linalg import norm\nfrom scipy import sum, average\nfrom skimage import color\nfrom skimage import io\nfrom sklearn.decomposition import PCA\nimport numpy as np\n\n\n# ----------------------------------------------------------------------------------------------------------------------\n# caputure image\n# ----------------------------------------------------------------------------------------------------------------------\n# for i in range(1000):\n# time.sleep(0.5)\n# im = ImageGrab.grab(bbox=(0,280,405,820))\n# #im = ImageGrab.grab() full screen\n# im.save('screen_shots/screenshot-{}.png'.format(i))\n# ----------------------------------------------------------------------------------------------------------------------\n\nclass GameFb:\n def __init__(self):\n self.pic_uuid = 0\n self.run_bbox = (748, 175, 1140, 705)\n self.end_bbox = (830, 525, 1065, 560)\n\n def _grab_save_img(self, path, bbox):\n im = ImageGrab.grab(bbox=bbox)\n im.save(path)\n self.pic_uuid += 1\n\n def get_img_feature(self, thin_factor = 1):\n im = ImageGrab.grab(bbox=self.run_bbox).convert('1')\n arr = np.array(im)\n arr = 1 * arr.flatten()\n im.save('test.png')\n #arr = arr[::thin_factor]\n pca_path = 'fb_PCA'\n pca = pickle.load(open(pca_path, \"rb\"))\n pca_arr = pca.transform(arr)[0]\n return pca_arr\n\n # path = 'running_screen_shots/{}.png'.format(self.pic_uuid)\n # self._grab_save_img(path, self.run_bbox)\n # img_vector = color.rgb2gray(io.imread(path))\n # print (\"img_vector: \".format(img_vector))\n # sys.exit()\n\n def clear_screen_shots(self):\n self.pic_uuid = 0\n clear_folder = 'running_screen_shots'\n file_list = os.listdir(clear_folder)\n for file in file_list:\n file_path = os.path.join(clear_folder, file)\n os.remove(file_path)\n\n @property\n def is_game_start(self):\n #\n GAME_START_THRESHOLD = 1.0\n #\n start_pics_folder_path = 'start_end_shots'\n start_pic_path = 'start.png'\n start_pic_path = os.path.join(start_pics_folder_path, start_pic_path)\n #\n path = 'running_screen_shots/{}.png'.format(self.pic_uuid)\n self._grab_save_img(path, self.run_bbox)\n #\n\n n_m = self._compare_images(start_pic_path, path)\n #print(\"n_m: {}\".format(n_m))\n\n if n_m <= GAME_START_THRESHOLD:\n return True\n else:\n\n return False\n\n @property\n def is_game_end(self):\n\n #\n GAME_END_THRESHOLD = 1.0\n #\n end_pics_folder_path = 'start_end_shots'\n end_pic_path = 'end.png'\n end_pic_path = os.path.join(end_pics_folder_path, end_pic_path)\n #\n path = 'running_screen_shots/{}.png'.format(self.pic_uuid)\n self._grab_save_img(path, self.end_bbox)\n #\n\n n_m = self._compare_images(end_pic_path, path)\n #print(\"n_m: {}\".format(n_m))\n\n if n_m <= GAME_END_THRESHOLD:\n return True\n else:\n\n return False\n\n\n def _compare_images(self,img1_path, img2_path):\n\n def _to_grayscale(arr):\n \"If arr is a color image (3D array), convert it to grayscale (2D array).\"\n if len(arr.shape) == 3:\n return average(arr, -1) # average over the last axis (color channels)\n else:\n return arr\n\n def _normalize(arr):\n rng = arr.max() - arr.min()\n amin = arr.min()\n return (arr - amin) * 255 / rng\n\n img1 = _to_grayscale(imread(img1_path).astype(float))\n img2 = _to_grayscale(imread(img2_path).astype(float))\n\n # normalize to compensate for exposure difference\n img1 = _normalize(img1)\n img2 = _normalize(img2)\n # calculate the difference and its norms\n diff = img1 - img2 # elementwise for scipy arrays\n m_norm = sum(abs(diff)) # Manhattan norm\n #z_norm = norm(diff.ravel(), 0) # Zero norm\n\n img_size = img1.size\n n_m = m_norm / img_size\n return n_m\n\n# ----------------------------------------------------------------------------------------------------------------------\n# compare image\n# ----------------------------------------------------------------------------------------------------------------------\n\n\n\n# bbox = (748, 175, 1140, 705)\n# im = ImageGrab.grab(bbox=bbox)\n# for i in range(1000):\n# img_path = 'running_screen_shots/{}.png'.format(i)\n# path = 'running_screen_shots/{}.txt'.format(i)\n# time.sleep(0.5)\n# im = ImageGrab.grab(bbox=bbox).convert('1')\n# im.save(img_path)\n# arr = np.array(im)\n# arr = 1 * arr.flatten()\n# arr = [str(x) for x in arr]\n# arr_str = ','.join(arr)\n# with open (path, 'w') as f:\n# f.write(arr_str)\n\n\n\n\n\n# file1 = \"start_shots/screenshot-20.png\"\n# file2 = \"start_shots/screenshot-21.png\"\n#\n#\n# img1 = to_grayscale(imread(file1).astype(float))\n# img2 = to_grayscale(imread(file2).astype(float))\n# # compare\n# n_m, n_0 = compare_images(img1, img2)\n# # threshold n_m: 1.6\n# print (\"n_m: {}, n_0:{}\".format(n_m/img1.size, n_0/img1.size))\n# ----------------------------------------------------------------------------------------------------------------------\n","sub_path":"screen_capturer.py","file_name":"screen_capturer.py","file_ext":"py","file_size_in_byte":5482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"595546968","text":"from banco import Banco\nfrom cliente import Cliente\nfrom conta import ContaCorrente, ContaPoupanca\n\nbanco = Banco()\n\ncliente1 = Cliente('Pedro', 28)\ncliente2 = Cliente('Maria', 18)\ncliente3 = Cliente('Juliana', 25)\n\nconta1 = ContaPoupanca(1111, 235687, 0)\nconta2 = ContaCorrente(2222, 321687, 0)\nconta3 = ContaPoupanca(1112, 2187, 0)\n\ncliente1.inserir_contar(conta1)\ncliente2.inserir_contar(conta2)\ncliente3.inserir_contar(conta3)\n\nbanco.inserir_conta(conta1)\nbanco.inserir_cliente(cliente1)\n\n\nif banco.autenticar(cliente1):\n cliente1.conta.depositar(200)\n cliente1.conta.sacar(30)\nelse:\n print('Cliente não autenticado')","sub_path":"Desafio/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"172983874","text":"def fade(st, nd, steps=10):\n st = st.strip(\"#\")\n nd = nd.strip(\"#\")\n st_rgb = hex_to_rgb(st)\n nd_rgb = hex_to_rgb(nd)\n\n to_step = [nds - sts for sts, nds in zip(st_rgb, nd_rgb)]\n\n (rstep, gstep, bstep) = [to_step[i] // steps for i in (0, 1, 2)]\n\n color_steps = []\n for i in range(steps - 1):\n color_steps.append(\n rgb_to_hex((st_rgb[0]+i*rstep, st_rgb[1]+i*gstep, st_rgb[2]+i*bstep))\n )\n color_steps.append(\"#\" + nd)\n\n return color_steps\n\n\ndef hex_to_rgb(h):\n return [int(h[i:i + 2], 16) for i in (0, 2, 4)]\n\ndef rgb_to_hex(rgb):\n _long_hex = lambda s: ('0' if len(s) == 1 else '') + s\n _hex = lambda i: _long_hex(format(i, 'x'))\n return f'#{_hex(rgb[0])}{_hex(rgb[1])}{_hex(rgb[2])}'\n\ndef long_hex(h):\n if len(h) == 3:\n return \"\".join([ch*2 for ch in h])\n return h\n \n","sub_path":"color.py","file_name":"color.py","file_ext":"py","file_size_in_byte":851,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"403390994","text":"from __future__ import print_function\nimport requests\nimport argparse\n\n# handle ModuleNotFoundError during python3 execution\ntry:\n from cli_weather.airquality_data import *\n from cli_weather.weather_data import *\n from cli_weather.weather_forecast_data import *\n from cli_weather.airquality_forecast_data import *\nexcept ModuleNotFoundError:\n from airquality_data import *\n from weather_data import *\n from weather_forecast_data import *\n from airquality_forecast_data import *\n\ndef get_by_city_args(subparsers):\n \"\"\"\n add positinal argument 'city' to argparser\n \"\"\"\n city_parser = subparsers.add_parser('city',\n formatter_class=argparse.RawTextHelpFormatter\n )\n city_parser.add_argument(\n \"city\",\n help=\"get weather by city name\"\n )\n city_parser.add_argument(\n \"-a\",\"--airquality\",\n action=\"store_true\",\n help=\"current air quality observations\"\n )\n city_parser.add_argument(\n \"-d\",\"--detailed\",\n help=\"display detailed data [not applicable for forecast]\",\n action=\"store_true\"\n )\n city_parser.add_argument(\n \"-f\",\"--forecast\",\n action=\"store_true\",\n help=\"forecast on weather or airquality\"\n )\n city_parser.add_argument(\n \"-c\", \"--country\",\n help=\"country of entered area\",\n default=\"\"\n )\n city_parser.add_argument(\n \"-u\", \"--units\",\n choices=['M','S','I'],\n help=\"M - Metric (Celcius, m/s, mm) [DEFAULT]\\nS - Scientific (Kelvin, m/s, mm)\\nI - Imperial (F, mph, in)\",\n default=\"M\"\n )\n\n\ndef city_parse(args):\n \"\"\"\n Send API request to WeatherBIT for city based input\n and call respective methods based on optional arguments\n \"\"\"\n city = args.city\n country = \"&\" + args.country\n units = args.units\n API_KEY = \"2a7db0585e7541018229c17efb2efa94\"\n\n\n if args.airquality is True and args.forecast is False:\n if args.country == \"\":\n API_URL = \"https://api.weatherbit.io/v2.0/current/airquality?city=\"+city+\"&key=\"\n else:\n API_URL = \"https://api.weatherbit.io/v2.0/current/airquality?city=\"+city+country+\"&key=\"\n\n elif args.airquality is False and args.forecast is True:\n if args.country == \"\":\n API_URL = \"https://api.weatherbit.io/v2.0/forecast/daily?city=\"+city+\"&key=\"\n else:\n API_URL = \"https://api.weatherbit.io/v2.0/forecast/daily?city=\"+city+country+\"&key=\"\n\n elif args.airquality is True and args.forecast is True:\n if args.country == \"\":\n API_URL = \"https://api.weatherbit.io/v2.0/forecast/airquality?city=\"+city+\"&key=\"\n else:\n API_URL = \"https://api.weatherbit.io/v2.0/forecast/airquality?city=\"+city+country+\"&key=\"\n\n elif args.airquality is False:\n if args.country == \"\":\n API_URL = \"https://api.weatherbit.io/v2.0/current?city=\"+city+\"&key=\"\n else:\n API_URL = \"https://api.weatherbit.io/v2.0/current?city=\"+city+country+\"&key=\"\n\n url = API_URL + API_KEY\n\n querystring = {\n \"lang\":\"en\",\n \"units\":units\n }\n\n response = requests.request(\"GET\", url, params=querystring)\n\n try:\n main_data = response.json()\n # ValueError-unable to decode json, UnboundLocalError-used var before declaring\n except (ValueError,UnboundLocalError) as err:\n print(\"Invalid city\")\n print(\"Please use format ex: $ cli-weather bengaluru [-c country_name][-a][-u M/S/I][-d]\")\n return\n\n # defalut metric values\n degree = \"celcius\"\n speed = \"m/s\"\n distance = \"mm\"\n\n if args.units == \"S\":\n degree = \"kelvin\"\n elif args.units == \"I\":\n degree = \"Fahrenheit\"\n speed = \"mph\"\n distance = \"in\"\n\n choice = [True, False]\n if args.airquality is False and args.detailed in choice and args.forecast is True:\n get_weather_forecast(main_data, degree, speed, distance)\n return\n elif args.airquality is True and args.detailed in choice and args.forecast is True:\n get_airquality_forecast(main_data)\n return\n\n # call respective methods based on selected combination of optional arguments in cli-weather\n if args.detailed is False and args.airquality is False:\n get_basic_temparature(main_data, degree)\n\n elif args.detailed is True and args.airquality is False:\n get_detailed_weather(main_data, degree, speed, distance)\n\n elif args.detailed is False and args.airquality is True:\n get_basic_airquality(main_data)\n\n elif args.detailed is True and args.airquality is True:\n get_detailed_airquality(main_data)\n","sub_path":"venv/Lib/site-packages/cli_weather/get_by_city.py","file_name":"get_by_city.py","file_ext":"py","file_size_in_byte":4645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"200851098","text":"\"\"\"\nWhat is the smallest positive number that is evenly divisible by all of the numbers from 1 to 20?\n\"\"\"\n\n\n# Primeros 4 numeros primos\nprimes = [2,3,5,7]\n\n# Obtiene una lista con los factores de un numero\ndef get_factors(n):\n if n in primes:\n return [int(n)]\n else:\n #print(f'Calling with n={n}')\n for i in range (2,int(n)):\n if int(n)%i == 0:\n #print(f'Factor found: {i}')\n resto = get_factors(int(n)/i)\n return [*resto, *[int(i)]]\n return[int(n)]\n\n# Devuelve un diccionario con el numer de ocurrencias de cada factor\ndef dict_factor(factores):\n dic = {}\n for i in factores:\n if i in dic.keys():\n dic[i] += 1\n else:\n dic[i] = 1\n return dic\n\n# A partir de un diccionario factor/ocurrencias, saca en mcm\ndef get_mcm(factores: dict):\n resultado = 1\n for i in factores.keys():\n n = i**factores[i]\n resultado = resultado * n\n return resultado\n\n\n\nall_divisors = {}\n\nfor n in range(2,21):\n factores = dict_factor(get_factors(n))\n for f in factores.keys():\n if f in all_divisors:\n if all_divisors[f] < factores[f]:\n all_divisors[f] = factores[f]\n else:\n all_divisors[f] = factores[f]\n\n\nprint(f'All divisors: {all_divisors}')\nmcm = get_mcm(all_divisors)\n\nprint(f'The mcm is: {mcm}')\n\n","sub_path":"Problem_005/problem_5.py","file_name":"problem_5.py","file_ext":"py","file_size_in_byte":1392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"586164884","text":"# System imports\nimport os\nimport sys\nfrom pprint import pprint as pp\nfrom time import time as tt\nimport inspect\n\n# External imports\nimport matplotlib.pyplot as plt\nimport matplotlib.colors\nfrom sklearn.decomposition import PCA\nfrom sklearn.metrics import auc\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport torch\nfrom torch_geometric.data import Data\nfrom torch_geometric.data import DataLoader\nfrom mpl_toolkits.mplot3d import Axes3D\nimport argparse\nfrom itertools import permutations\n\nfrom itertools import chain\nimport trackml.dataset\n\nimport ipywidgets as widgets\nfrom ipywidgets import interact, interact_manual\n\n# Pick up local packages\nsys.path.append(\"..\")\n\n# Local imports\nfrom prepare import select_hits\nfrom toy_utils import *\nfrom models import *\nfrom trainers import *\n\n# Get rid of RuntimeWarnings, gross\nimport warnings\n\nwarnings.filterwarnings(\"ignore\", category=RuntimeWarning)\n\nimport wandb\n\n\ndef parse_args():\n \"\"\"Parse command line arguments.\"\"\"\n parser = argparse.ArgumentParser(\"train.py\")\n add_arg = parser.add_argument\n\n add_arg(\"--hidden-dim\", type=int, default=None, help=\"Hidden layer dimension size\")\n add_arg(\n \"--n-graph-iters\", type=int, default=None, help=\"Number of graph iterations\"\n )\n add_arg(\n \"--emb-dim\",\n type=int,\n default=None,\n help=\"Number of spatial embedding dimensions\",\n )\n add_arg(\n \"--emb-hidden\",\n type=int,\n default=None,\n help=\"Number of embedding hidden dimensions\",\n )\n add_arg(\"--nb-layer\", type=int, default=None, help=\"Number of embedding layers\")\n add_arg(\"--r-val\", type=float, default=None, help=\"Radius of graph construction\")\n add_arg(\"--r-train\", type=float, default=None, help=\"Radius of embedding training\")\n add_arg(\"--margin\", type=float, default=None, help=\"Radius of hinge loss\")\n add_arg(\"--lr-1\", type=float, default=None, help=\"Embedding loss learning rate\")\n add_arg(\"--lr-2\", type=float, default=None, help=\"AGNN loss learning rate\")\n add_arg(\"--lr-3\", type=float, default=None, help=\"Weight balance learning rate\")\n add_arg(\"--weight\", type=float, default=None, help=\"Positive weight in AGNN\")\n add_arg(\"--train-size\", type=int, default=None, help=\"Number of train population\")\n add_arg(\"--val-size\", type=int, default=None, help=\"Number of validate population\")\n add_arg(\"--pt-cut\", type=float, default=None, help=\"Cutoff for momentum\")\n add_arg(\"--adjacent\", type=bool, default=False, help=\"Enforce adjacent layers?\")\n add_arg(\"--pretrain-epochs\", type=int, default=5)\n\n return parser.parse_args()\n\n\ndef build_event(event_file, pt_min, feature_scale, adjacent):\n hits, particles, truth = trackml.dataset.load_event(\n event_file, parts=[\"hits\", \"particles\", \"truth\"]\n )\n hits = select_hits(hits, truth, particles, pt_min=pt_min).assign(\n evtid=int(event_file[-9:])\n )\n layers = hits.layer.to_numpy()\n\n # Get true edge list\n records_array = hits.particle_id.to_numpy()\n idx_sort = np.argsort(records_array)\n sorted_records_array = records_array[idx_sort]\n _, idx_start, _ = np.unique(\n sorted_records_array, return_counts=True, return_index=True\n )\n # sets of indices\n res = np.split(idx_sort, idx_start[1:])\n true_edges = np.concatenate(\n [list(permutations(i, r=2)) for i in res if len(list(permutations(i, r=2))) > 0]\n )\n if adjacent:\n true_edges = true_edges[\n (layers[true_edges.T[1]] - layers[true_edges.T[0]] == 1)\n ]\n\n return (\n hits[[\"r\", \"phi\", \"z\"]].to_numpy() / feature_scale,\n hits.particle_id.to_numpy(),\n layers,\n true_edges,\n )\n\n\ndef prepare_event(event_file, pt_min, feature_scale, adjacent=True):\n # print(\"Preparing\",event_file)\n X, pid, layers, true_edges = build_event(\n event_file, pt_min, feature_scale, adjacent\n )\n data = Data(\n x=torch.from_numpy(X).float(),\n pid=torch.from_numpy(pid),\n layers=torch.from_numpy(layers),\n true_edges=torch.from_numpy(true_edges),\n )\n return data\n\n\ndef save_model(epoch, model, optimizer, scheduler, running_loss, config, PATH):\n torch.save(\n {\n \"epoch\": epoch,\n \"model_state_dict\": model.state_dict(),\n \"optimizer_state_dict\": optimizer.state_dict(),\n \"scheduler_state_dict\": scheduler.state_dict(),\n \"loss\": running_loss,\n \"config\": config,\n },\n os.path.join(\"model_comparisons/\", PATH),\n )\n\n\ndef main(args):\n # print(args)\n device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n\n # Dataset processing\n\n input_dir = \"/global/cscratch1/sd/danieltm/ExaTrkX/trackml/train_all/\"\n all_events = os.listdir(input_dir)\n all_events = [input_dir + event[:14] for event in all_events]\n np.random.shuffle(all_events)\n\n train_dataset = [\n prepare_event(event_file, args.pt_cut, [1000, np.pi, 1000], args.adjacent)\n for event_file in all_events[:4000]\n ]\n test_dataset = [\n prepare_event(event_file, args.pt_cut, [1000, np.pi, 1000], args.adjacent)\n for event_file in all_events[-args.val_size :]\n ]\n train_loader = DataLoader(train_dataset, batch_size=1, shuffle=True)\n test_loader = DataLoader(test_dataset, batch_size=1, shuffle=True)\n\n # Model config\n e_configs = {\n \"in_channels\": 3,\n \"emb_hidden\": args.emb_hidden,\n \"nb_layer\": args.nb_layer,\n \"emb_dim\": args.emb_dim,\n }\n m_configs = {\n \"in_channels\": 3,\n \"emb_hidden\": args.emb_hidden,\n \"nb_layer\": args.nb_layer,\n \"emb_dim\": args.emb_dim,\n \"r\": args.r_val,\n \"hidden_dim\": args.hidden_dim,\n \"n_graph_iters\": args.n_graph_iters,\n }\n other_configs = {\n \"weight\": args.weight,\n \"r_train\": args.r_train,\n \"r_val\": args.r_val,\n \"margin\": args.margin,\n \"reduction\": \"mean\",\n }\n\n # Create and pretrain embedding\n embedding_model = Embedding(**e_configs).to(device)\n wandb.init(group=\"EmbeddingToAGNN_PurTimesEff\", config=m_configs)\n embedding_optimizer = torch.optim.Adam(\n embedding_model.parameters(), lr=0.0005, weight_decay=1e-3, amsgrad=True\n )\n\n for epoch in range(args.pretrain_epochs):\n tic = tt()\n embedding_model.train()\n cluster_pur, train_loss = train_emb(\n embedding_model, train_loader, embedding_optimizer, other_configs\n )\n\n embedding_model.eval()\n with torch.no_grad():\n cluster_pur, cluster_eff, val_loss, av_nhood_size = evaluate_emb(\n embedding_model, test_loader, other_configs\n )\n wandb.log(\n {\n \"val_loss\": val_loss,\n \"train_loss\": train_loss,\n \"cluster_pur\": cluster_pur,\n \"cluster_eff\": cluster_eff,\n \"av_nhood_size\": av_nhood_size,\n }\n )\n\n # Create and train main model\n model = EmbeddingToAGNNPretrained(**m_configs, pretrained_model=embedding_model).to(\n device\n )\n multi_loss = MultiNoiseLoss(n_losses=2).to(device)\n m_configs.update(other_configs)\n wandb.run.save()\n print(wandb.run.name)\n model_name = wandb.run.name\n wandb.watch(model, log=\"all\")\n\n # Optimizer config\n\n optimizer = torch.optim.AdamW(\n [\n {\"params\": model.emb_network.parameters()},\n {\n \"params\": chain(\n model.node_network.parameters(),\n model.edge_network.parameters(),\n model.input_network.parameters(),\n )\n },\n {\"params\": multi_loss.noise_params},\n ],\n lr=0.001,\n weight_decay=1e-3,\n amsgrad=True,\n )\n\n # Scheduler config\n\n lambda1 = lambda ep: 1 / (args.lr_1 ** (ep // 10))\n lambda2 = lambda ep: 1 / (args.lr_2 ** (ep // 30))\n lambda3 = lambda ep: 1 / (args.lr_3 ** (ep // 10))\n scheduler = torch.optim.lr_scheduler.LambdaLR(\n optimizer, lr_lambda=[lambda1, lambda2, lambda3]\n )\n\n # Training loop\n\n for epoch in range(50):\n tic = tt()\n model.train()\n if args.adjacent:\n edge_acc, cluster_pur, train_loss = balanced_adjacent_train(\n model, train_loader, optimizer, multi_loss, m_configs\n )\n else:\n edge_acc, cluster_pur, train_loss = balanced_train(\n model, train_loader, optimizer, multi_loss, m_configs\n )\n # print(\"Training loss:\", train_loss)\n\n model.eval()\n if args.adjacent:\n with torch.no_grad():\n (\n edge_acc,\n edge_pur,\n edge_eff,\n cluster_pur,\n cluster_eff,\n val_loss,\n av_nhood_size,\n ) = evaluate_adjacent(model, test_loader, multi_loss, m_configs)\n else:\n with torch.no_grad():\n (\n edge_acc,\n edge_pur,\n edge_eff,\n cluster_pur,\n cluster_eff,\n val_loss,\n av_nhood_size,\n ) = evaluate(model, test_loader, multi_loss, m_configs)\n scheduler.step()\n wandb.log(\n {\n \"val_loss\": val_loss,\n \"train_loss\": train_loss,\n \"edge_acc\": edge_acc,\n \"edge_pur\": edge_pur,\n \"edge_eff\": edge_eff,\n \"cluster_pur\": cluster_pur,\n \"cluster_eff\": cluster_eff,\n \"lr\": scheduler._last_lr[0],\n \"combined_performance\": edge_eff * cluster_eff * edge_pur + cluster_pur,\n \"combined_efficiency\": edge_eff * cluster_eff * edge_pur,\n \"noise_1\": multi_loss.noise_params[0].item(),\n \"noise_2\": multi_loss.noise_params[1].item(),\n \"av_nhood_size\": av_nhood_size,\n }\n )\n\n save_model(\n epoch,\n model,\n optimizer,\n scheduler,\n cluster_eff,\n m_configs,\n \"EmbeddingToAGNN/\" + model_name + \".tar\",\n )\n\n\n# print('Epoch: {}, Edge Accuracy: {:.4f}, Edge Purity: {:.4f}, Edge Efficiency: {:.4f}, Cluster Purity: {:.4f}, Cluster Efficiency: {:.4f}, Loss: {:.4f}, LR: {} in time {}'.format(epoch, edge_acc, edge_pur, edge_eff, cluster_pur, cluster_eff, val_loss, scheduler._last_lr, tt()-tic))\n\nif __name__ == \"__main__\":\n\n # Parse the command line\n args = parse_args()\n # print(args)\n\n main(args)\n","sub_path":"pretrain.py","file_name":"pretrain.py","file_ext":"py","file_size_in_byte":10789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"29470928","text":"import re\nimport math\nfrom collections import Counter\nfrom Project import db\nfrom flask_restful import Resource,reqparse,fields,marshal\nfrom flask_login import current_user, login_user, logout_user, login_required\nfrom Project.views import userbase,extra,home\nfrom flask import url_for,request\nfrom Project.models import User\nfrom werkzeug.urls import url_parse\nfrom Project.models import UserProfile\nfrom Project.models import Student\nfrom flask_cors import CORS,cross_origin\nfrom flask import jsonify\n\nheaders ={\"Content-Type\": \"application/json\"}\n\nclass SkillRecommender(Resource):\n\tdecorators= [login_required]\n\t@cross_origin()\n\tdef get(self,username):\n\t\t#u = User.query.filter_by(username=username).first()\n\t\t\t\t\n\t\tif username is None:\n\t\t\treturn jsonify({\"description\" : \"The username is NULL\"}),200,headers\n\t\tu = User.query.filter_by(username=username).first()\n\t\t\t\t\n\t\tu = UserProfile.query.filter_by(id=u.id).first()\n\t\t#text1 is the list of skills of the user\n\t\tprint(u)\n\t\tif u is None:\n\t\t\treturn jsonify({\"description\" : \"The username does not exist.Please enter a valid username\"}),200,headers\n\t\t\t\t\n\t\ttext1=u.skills\n\t\t#text2 is the list of skills from the group table\n\t\t#It is in the form of group_id,skills\n\t\ttext2=[[1,\"Java, C programming, R programming\"],[3,\"NodeJs, HTML, CSS\"],[5,\"C programming, Java, Python\"],[8,\"NodeJs, Java\"]]\n\t\ttext3=[[1,\"SE Project\"],[2,\"Chatbot App\"],[3,\"Proveit\"],[5,\"NGO Website\"],[8,\"Brownie\"]]\n\t\t\n\t\tWORD = re.compile(r'\\w+')\n\n\t\tdef get_cosine(vec1, vec2):\n\t\t intersection = set(vec1.keys()) & set(vec2.keys())\n\t\t numerator = sum([vec1[x] * vec2[x] for x in intersection])\n\n\t\t sum1 = sum([vec1[x]**2 for x in vec1.keys()])\n\t\t sum2 = sum([vec2[x]**2 for x in vec2.keys()])\n\t\t denominator = math.sqrt(sum1) * math.sqrt(sum2)\n\t\t if not denominator:\n\t\t return (0)\n\t\t else:\n\t\t return float(numerator) / denominator\n\n\t\tdef text_to_vector(text):\n\t\t words = WORD.findall(text)\n\t\t return Counter(words)\n\n\n\t\tvector1 = text_to_vector(text1)\n\t\tl=[]\n\t\tval=-11111\n\t\tfor i in range(len(text2)):\n\t\t\tvector2 = text_to_vector(text2[i][1])\n\t\t\tcosine = get_cosine(vector1, vector2)\n\t\t\tl.append(cosine)\n\t\t\t#print 'Project id: ',text2[i][0],'Cosine:', cosine\n\t\t\tif(cosine>val):\n\t\t\t\tval=cosine\n\t\t\t\tprojid=text2[i][0]\n\t\tfor i in range(len(text3)):\n\t\t\tif(text3[i][0]==projid):\n\t\t\t\tprojname=text3[i][1]\n\t\t\t\t#max=val\n\t\t#print 'Max Project id', projid, 'Similarity: ',val\n\t\t#print 'Recommendation for user: See project_id ', projid #Return the projid of max similarity between text1 and text2\n\t\t#projid_list=[]\n\t\t#projid_list.append(projid)\t\t\n\t\t#projid_returned = jsonify(projid_list)\n\n\t\treturn jsonify({ 'Projects recommended based on skills are' : projname}),200,headers\n\n","sub_path":"Project/Project/resources/skills.py","file_name":"skills.py","file_ext":"py","file_size_in_byte":2737,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"458148997","text":"# Даны два вектора в трехмерном пространстве: (10,10,10) и (0,0,-10)\n# Напишите код на Python, реализующий расчет длины вектора, заданного его координатами.\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport math\n\na = np.array([10, 10, 10])\nb = np.array([0, 0, -10])\nc = a + b\n\nprint(math.sqrt(c[0]**2 + c[1]**2 + c[2]**2))\n\n# 3. Задание Напишите код на Python, реализующий построение графиков:\n# окружности,\n# эллипса,\n# гиперболы.\n\n# окружность\nx = []\ny1 = []\ny2 = []\nR = 2\nfor i in range(500*2*R+1):\n x1 = -R + i/500\n x.append(x1)\n y1.append(math.sqrt(R**2 - x1**2))\n y2.append(-math.sqrt(R**2 - x1**2))\nplt.plot(x,y1)\nplt.plot(x,y2)\nplt.xlabel(\"Ось x\")\nplt.ylabel(\"Ось y\")\nplt.ylim(-3,3)\nplt.xlim(-4.3,4.3)\nplt.grid(True)\n\nplt.show()\n\n# Эллипс\nx = []\ny1 = []\ny2 = []\na = 4\nb = 7\nfor i in range(500*2*a+1):\n x1 = -a + i/500\n x.append(x1)\n y1.append(math.sqrt(1 - x1**2/a**2) * b)\n y2.append(-math.sqrt(1 - x1**2/a**2) * b)\nplt.plot(x,y1)\nplt.plot(x,y2)\nplt.xlabel(\"Ось x\")\nplt.ylabel(\"Ось y\")\nplt.ylim(-10,10)\nplt.xlim(-4.3,4.3)\nplt.grid(True)\n\nplt.show()\n\n# Гипербола\nx = []\ny1 = []\ny2 = []\na = 1\nb = 3\nfor i in range(5000*2*a):\n x1 = a + i/500\n x.append(x1)\n y1.append(math.sqrt(x1**2/a**2 - 1) * b)\n y2.append(-math.sqrt(x1**2/a**2 - 1) * b)\nplt.plot(x,y1)\nplt.plot(x,y2)\nplt.xlabel(\"Ось x\")\nplt.ylabel(\"Ось y\")\nplt.ylim(-5,5)\nplt.xlim(-5,5)\nplt.grid(True)\nplt.show()\n\n# Нарисуйте трехмерный график двух параллельных плоскостей.\nfrom pylab import *\nfrom mpl_toolkits.mplot3d import Axes3D\nfig = figure()\nax = Axes3D(fig)\nX = np.arange(-10, 10, 1)\nY = np.arange(-10, 10, 1)\nX, Y = np.meshgrid(X, Y)\nZ = X + 2*Y\nZ1 = X + 2*Y + 40\nax.plot_surface(X, Y, Z)\nax.plot_surface(X, Y, Z1)\nshow()\n\n#Нарисуйте трехмерный график двух любых поверхностей второго порядка.\n\nfrom pylab import *\nfrom mpl_toolkits.mplot3d import Axes3D\nfig = figure()\nax = Axes3D(fig)\nX = np.arange(-10, 10, 1)\nY = np.arange(-10, 10, 1)\nX, Y = np.meshgrid(X, Y)\nZ = - np.sqrt(X**2 + Y**2)\nZ1 = X**2/10 + Y**2/10\nax.plot_surface(X, Y, Z)\nax.plot_surface(X, Y, Z1)\nshow()","sub_path":"lesson 3.py","file_name":"lesson 3.py","file_ext":"py","file_size_in_byte":2418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"379303599","text":"# -*- encoding: utf-8 -*-\n\nfrom setuptools import setup, find_packages\n\nversion = '1.0.1'\n\nsetup(name='sumdir',\n version=version,\n description=\"Display sizes of the current subdirectories\",\n long_description=\"\"\"\\\nDisplay sizes of the current subdirectories\"\"\",\n # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers\n classifiers=[\n \"Intended Audience :: Developers\",\n \"Environment :: Console\",\n \"Development Status :: 5 - Production/Stable\",\n \"Programming Language :: Python\",\n \"Topic :: Utilities\",\n \"License :: OSI Approved :: MIT License\",\n ],\n keywords='',\n author='Ginés Martínez Sánchez',\n author_email='ginsmar@gmail.com',\n license='MIT',\n packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),\n include_package_data=True,\n zip_safe=False,\n install_requires=[\n # -*- Extra requirements: -*-\n ],\n entry_points=\"\"\"\n [console_scripts]\n sumdir = sumdir:main\n \"\"\",\n)\n","sub_path":"pypi_install_script/sumdir-1.0.1.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"226479544","text":"import pandas as pd\r\nimport numpy as np\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.tree import DecisionTreeClassifier\r\ndef open_files(): #Функция открывает файл tokens, groups, docs, записывает каждый размеченный текстб добавляет в общий список. На выходе размеченные тексты такие: [текст1][текст2] и тд., а каждый текст [элемент1][элемент2] и т.д.\r\n with open('Documents.txt', 'r', encoding='utf-8') as documents:\r\n docs = []\r\n for line in documents:\r\n docs.append(line.split('\\t')[:2])\r\n with open('Tokens.txt', 'r', encoding='utf-8') as tokens_file:\r\n tokens = []\r\n for line in tokens_file:\r\n line = line[:-1]\r\n tokens.append(line.split('\\t'))\r\n for t in tokens:\r\n if len(t)==1:\r\n del t\r\n del t[4]\r\n with open('Groups.txt', 'r', encoding='utf-8') as groups_file:\r\n groups = []\r\n for line in groups_file:\r\n line = line[:-2]\r\n groups.append(line.split('\\t'))\r\n for g in groups:\r\n if len(g)==1:\r\n del g\r\n del g[1]\r\n parsed_t = []\r\n for d in docs[1:]:\r\n filepath = d[1].replace('/', '\\\\')\r\n filepath = filepath.replace('txt', 'conll')\r\n try:\r\n with open('parsed_testset\\\\{0}'.format(filepath), 'r', encoding='utf-8') as parsed_texts:\r\n a = []\r\n a.append(int(d[0]))\r\n\r\n for line in parsed_texts:\r\n line = line[:-2]\r\n a.append(line.split('\\t'))\r\n parsed_t.append(a)\r\n except:\r\n pass\r\n parsed_t.insert(0, ['id', 'word', 'lemma', 'part', 'part', 'gram','to_head', 'role', ' ', ' '])\r\n #Текст распарсенный [[file1], [file2], [[el1], [el2]]]\r\n return parsed_t, tokens, groups\r\nparsed_t, tokens, groups = open_files()\r\n\r\ndef unite_info(parsed_t): #Функция добавляет к каждому элементу из parsed_t id документа в начало\r\n parsed_t[0].insert(0, 'doc_id')\r\n for p in parsed_t[1:]:\r\n for element in p[1:]:\r\n element.insert(0, p[0])\r\n return parsed_t\r\nparsed_t = unite_info(parsed_t)\r\n\r\ndef clean_info(parsed_t): #очищает лишний данные (последние две колонки, повтор части речи)\r\n del parsed_t[0][-1]\r\n del parsed_t[0][-1]\r\n del parsed_t[0][5]\r\n for p in parsed_t[1:]:\r\n del p[0]\r\n for element in p:\r\n try:\r\n del element[-1]\r\n del element[-1]\r\n del element[5]\r\n except:\r\n pass\r\n return parsed_t\r\nparsed_t = clean_info(parsed_t)\r\n#print(parsed_t[:5])\r\n\r\ndef split_on(parsed_t): #Делит parsed_t на предложения, получаем в итоге список файлов, в каждом файле список предложений, в каждом предложении список элементов.\r\n full_split = []\r\n for p in parsed_t[1:]:\r\n splitted = [[]]\r\n for item in p:\r\n if item == [' '] or len(item)==0:\r\n splitted.append([])\r\n else:\r\n splitted[-1].append(item)\r\n full_split.append(splitted)\r\n return full_split\r\nfull_split = split_on(parsed_t)\r\n\r\ndef group_NP(NP, sent):\r\n to_head_id = NP[6]\r\n head = sent[int(to_head_id)-1]\r\n new_NP = []\r\n if ((head[4] == 'N' and head[5].startswith('Np')) or head[4] == 'S') and (head[5]!= 'SENT' and head[4]!='PUNC'): #условие проверяет, явл. ли именем собственным\r\n #doc id, NP, veersh, len в символах, rolev, gramv, id\r\n new_NP.append(NP[0])\r\n new_NP.append(head[2]+' '+NP[2])\r\n if head[4]=='S':\r\n new_NP.append(NP[3])\r\n else:\r\n new_NP.append(head[3])\r\n new_NP.append(2)\r\n new_NP.append(head[-1])\r\n if head[4] == 'S':\r\n new_NP.append(NP[5])\r\n else:\r\n new_NP.append(head[5])\r\n else:\r\n new_NP.append(NP[0])\r\n new_NP.append(NP[2])\r\n new_NP.append(NP[3])\r\n new_NP.append(1)\r\n new_NP.append(NP[-1])\r\n new_NP.append(NP[5])\r\n new_NP.append(NP[1])\r\n return new_NP\r\ndef find_NP(sent): #ищет ИГ в предложениях.\r\n NPs = []\r\n for element in sent:\r\n if (element[4] == 'N' or element[4]=='P') and element[-1]!='ROOT':\r\n NPs.append(element)\r\n return NPs\r\ndef groupall(NP, sent): #добавлять зависимые и смотреть кол-во cлов в группе\r\n id = NP[-1]\r\n for element in sent:\r\n if int(element[-2])==int(id) and(element[-1]=='опред'):\r\n NP[3]+=1\r\n else:\r\n pass\r\n return NP\r\nfinal = []\r\nfor text in full_split:\r\n for sent in text:\r\n new_NPs = []\r\n NPs = find_NP(sent)\r\n for NP in NPs:\r\n new_NP = group_NP(NP, sent)\r\n new_NPs.append(new_NP)\r\n\r\n for NP in new_NPs:\r\n new_NP = groupall(NP, sent)\r\n final.append(new_NP)\r\nfind_NP(full_split)\r\ndef process_NP(final):\r\n for NP in final:\r\n del NP[-1]\r\n return final\r\nfinal = process_NP(final)\r\ndef base(final): #Если иг или вершина повторяетя - 1, если нет - 0\r\n have = []\r\n for element in final:\r\n new = []\r\n new.append(element[1])\r\n new.append(element[0])\r\n new2 = []\r\n new2.append(element[1])\r\n new2.append(element[0])\r\n if (new2 in have ) or (new in have):\r\n element.append(1)\r\n else:\r\n have.append(new)\r\n have.append(new2)\r\n element.append(0)\r\n return final\r\nfinal=base(final) #doc id, content. lemma head, len in words, role, gram, base\r\ndef part(final):\r\n for element in final:\r\n if element[-2][0]=='N':\r\n element.append(1)\r\n elif element[-2][0]=='P':\r\n element.append(2)\r\n return final\r\nfinal = part(final)#doc id, content, lemma head, len in words, role, gram, base, part\r\ndef pr_com(final):#proper - 1, common - 2\r\n for element in final:\r\n if element[-3][1]=='p':\r\n element.append(1)\r\n else:\r\n element.append(2)\r\n return final\r\nfinal = pr_com(final) #doc id, content, lemma, len(words), role, gram, base, part, proper or common\r\ndef groups_pro(groups):\r\n new_groups = []\r\n for g in groups:\r\n group = []\r\n id = g[0]\r\n cont = g[6]\r\n group.append(id)\r\n group.append(cont)\r\n new_groups.append(group)\r\n return new_groups\r\ngroups = groups_pro(groups)\r\ndef groups_check(final, groups): #1 - singlton, 0-not\r\n for f in final:\r\n id = str(f[0])\r\n cont = f[1]\r\n for g in groups[1:]:\r\n if id ==g[0] and (cont in g[1]):\r\n f.append(0)\r\n break\r\n else:\r\n pass\r\n if len(f)==9:\r\n f.append(1)\r\n return final\r\nfinal = groups_check(final, groups)\r\ndef dele(final):\r\n for f in final:\r\n del f[2]\r\n del f[-5]\r\n return final\r\nfinal = dele(final) # doc id, cont, len in words, role, base, part, proper or common, singleton or not\r\ndef del_role(final):\r\n for f in final:\r\n del f[3]\r\n return final\r\nfinal = del_role(final) #doc id, cont, len in words, base, part, proper or common, singleton or not\r\n\r\ndef group_list(lists):#группирует списки по документам, чтобы делить на обучающую и тестовую тексты, а не ИГ.\r\n finals = []\r\n d = []\r\n for e in lists:\r\n if e[0] in d:\r\n pass\r\n else:\r\n d.append(e[0])\r\n for id in d:\r\n to_final = []\r\n for el in lists:\r\n if el[0]==id:\r\n to_final.append(el)\r\n else:\r\n pass\r\n finals.append(to_final)\r\n return finals\r\nfinal = group_list(final)\r\nfinal_train, final_test = train_test_split(final, test_size = 0.3, random_state = 10)\r\ndef for_pand(final):\r\n Y = []\r\n Xs = []\r\n name = []\r\n for text in final:\r\n for element in text:\r\n Y.append(element[-1])\r\n name.append(element[1])\r\n X = element[2:-1]\r\n Xs.append(X)\r\n return Y, Xs, name\r\ny_train, X_train, name_train = for_pand(final_train)\r\ny_test, X_test, name_test = for_pand(final_test)\r\n\r\ndata = pd.DataFrame(X_train + X_test)\r\ndata.columns = ['len'] + ['base:0-n,1-y'] + ['(Noun-1/Pr-2)'] + ['Pr-1/com-2']\r\ndata.index = name_train+name_test\r\nprint(data[-5:])\r\nprint(data.shape)\r\n\r\nfrom sklearn import ensemble\r\nimport sklearn\r\nrf = ensemble.RandomForestClassifier(n_estimators=100, random_state=20) #Baseline признаки\r\nrf.fit(X_train, y_train)\r\n\r\nerr_train = np.mean(y_train != rf.predict(X_train))\r\nerr_test = np.mean(y_test != rf.predict(X_test))\r\nprint(err_train, err_test)\r\nreport = sklearn.metrics.classification_report(y_test, rf.predict(X_test), target_names=['NoSingl', 'Singleton'])\r\nprint(report)\r\nprint(sklearn.metrics.confusion_matrix(y_test, rf.predict(X_test)))\r\n\r\ndef singletones(X_test, name_test): #Вершины синглтонов в отдельный файл.\r\n m = rf.predict(X_test)\r\n Singletones = []\r\n with open('Singletones.txt', 'a', encoding='utf-8') as file:\r\n for i in range(len(m)-1):\r\n if m[i]==1:\r\n file.write(name_test[i])\r\n file.write('\\n')\r\n Singletones.append(name_test[i])\r\n return Singletones\r\nSingletones = singletones(X_test, name_test)\r\n","sub_path":"Singletones.py","file_name":"Singletones.py","file_ext":"py","file_size_in_byte":9899,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"575613539","text":"#!/bin/env python\n# This script lets you ACK a Nagios alert \n# by replying to an email with the word \"ACK\". \n\n# Crudely read the email line by line from stdin and send an \n# acknowledgement command to Nagios if appropriate.\n# Requires a procmail recipe to feed it.\n\nimport os, re, sys, time\n\nack = None\ncmd_fifo = \"/var/lib/nagios3/rw/nagios.cmd\"\n\nfor line in sys.stdin:\n if line.startswith('Subject'):\n subject = line.strip()\n elif re.match('ack', line, re.IGNORECASE):\n ack = 'True'\n elif line.startswith('From:'):\n from_addr = line.strip()\n\nif ack is None:\n exit()\n\nmylist = subject.split(\" \")\nnow = int(time.time())\n\nif 'Host' in mylist: # Host down alerts.\n cmd = (\"[%s] ACKNOWLEDGE_HOST_PROBLEM;%s;1;1;1;email;%s;ack'd by email\" % (now, mylist[2], from_addr))\nelse:\t # Service Alerts.\n cmd = (\"[%s] ACKNOWLEDGE_SVC_PROBLEM;%s;%s;1;1;1;%s;ack'd by email\" % (now, mylist[2], mylist[3], from_addr))\n\nf = open(cmd_fifo, \"w\")\nf.write(cmd)\nf.close()\n","sub_path":"nagios/parsemail.py","file_name":"parsemail.py","file_ext":"py","file_size_in_byte":1001,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"588333929","text":"import os\nimport shutil\ndef rename(path):\n i=1\n for file in os.listdir(path):\n filedir=path+'\\\\'+ file\n for files in os.listdir(filedir):\n filesdir=filedir+'\\\\'+files\n print(filesdir)\n if os.path.isdir(filesdir):\n for video in os.listdir(filesdir):\n if video[-3:]=='blv':\n videodir = filesdir + '\\\\' + video\n os.rename(videodir,str(i)+'.blv')\n i+=1\n os.system('rd /s /q filedir')\nrename(r'C:\\Users\\lsjsg\\Desktop\\download\\test')","sub_path":"python/others/get_video_from_bilibili_file.py","file_name":"get_video_from_bilibili_file.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"649804042","text":"# July 2020\r\n#Take two lists, say for example these two:\r\n#and write a program that returns a list that contains only the elements that are common between the lists (without duplicates). Make sure your program works on two lists of different sizes.\r\n\r\na = [5, 6, 5, 20, 20, 20, 3,3]\r\nb = [20, 5, 3, 4, 5, 6]\r\nc=[]\r\nfor i,vi in enumerate(a):\r\n #print(i,vi)\r\n for j,vj in enumerate(b):\r\n #print(j,vj)\r\n if vi==vj:\r\n c.append(vi)\r\nprint(c)\r\ncclean=[]\r\nfor k in c:\r\n if k not in cclean:\r\n cclean.append(k)\r\nprint(cclean)\r\n","sub_path":"PythonExercise/Ex21E.py","file_name":"Ex21E.py","file_ext":"py","file_size_in_byte":559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"423472505","text":"\"\"\"\nThe core Dshell library\n\nThis library contains the base level plugins that all others will inherit.\n\nPacketPlugin contains attributes and functions for plugins that work with\nindividual packets.\n\nConnectionPlugin inherits from PacketPlugin and includes additional functions\nfor handling reassembled connections.\n\nIt also contains class definitions used by the plugins, including definitions\nfor Blob, Connection, and Packet.\n\n\"\"\"\n\n# standard Python imports\nimport datetime\nimport inspect\nimport ipaddress\nimport logging\nimport os\n#import pprint\nimport struct\nfrom collections import defaultdict\nfrom multiprocessing import Value\n\n# Dshell imports\nfrom dshell.output.output import Output\nfrom dshell.dshellgeoip import DshellGeoIP, DshellFailedGeoIP\n\n# third-party imports\nimport pcapy\nfrom pypacker.layer12 import can, ethernet, ieee80211, linuxcc, ppp, pppoe, radiotap\nfrom pypacker.layer3 import ip, ip6, icmp, icmp6\nfrom pypacker.layer4 import tcp, udp\n\nlogging.basicConfig(format=\"%(levelname)s (%(name)s) - %(message)s\")\nlogger = logging.getLogger(\"dshell.core\")\n\n__version__ = \"3.1.3\"\n\nclass SequenceNumberError(Exception):\n \"\"\"\n Raised when reassembling connections and data is missing or overlapping.\n See Blob.reassemble function\n \"\"\"\n pass\n\nclass DataError(Exception):\n \"\"\"\n Raised when any data being handled just isn't right.\n For example, invalid headers in httpplugin.py\n \"\"\"\n pass\n\n\n# Create GeoIP refrence object\ntry:\n geoip = DshellGeoIP(logger=logging.getLogger(\"dshellgeoip.py\"))\nexcept FileNotFoundError:\n logger.error(\"Could not find GeoIP data files! Country and ASN lookups will not be possible. Check README for instructions on where to find and install necessary data files.\")\n geoip = DshellFailedGeoIP()\n\n\ndef print_handler_exception(e, plugin, handler):\n \"\"\"\n A convenience function to display an error message when a handler raises\n an exception.\n\n If using --debug, it will print a full traceback.\n\n Args:\n e: the exception object\n plugin: the plugin object\n handler: name of the handler function\n \"\"\"\n etype = e.__class__.__name__\n if logger.isEnabledFor(logging.DEBUG):\n logger.error(\"The {!s} for the {!r} plugin raised an exception and failed! ({}: {!s})\".format(handler, plugin.name, etype, e))\n logger.exception(e)\n else:\n logger.error(\"The {!s} for the {!r} plugin raised an exception and failed! ({}: {!s}) Use --debug for more details.\".format(handler, plugin.name, etype, e))\n\n\nclass PacketPlugin(object):\n \"\"\"\n Base level class that plugins will inherit.\n\n This plugin handles individual packets. To handle reconstructed\n connections, use the ConnectionPlugin.\n\n Attributes:\n name: the name of the plugin\n description: short description of the plugin (used with decode -l)\n longdescription: verbose description of the plugin (used with -h)\n bpf: default BPF to apply to traffic entering plugin\n compiled_bpf: a compiled BPF for pcapy, usually created in decode.py\n vlan_bpf: boolean that tells whether BPF should be compiled with\n VLAN support\n author: preferably, the initials of the plugin's author\n seen_packet_count: number of packets this plugin has seen\n handled_packet_count: number of packets this plugin has passed\n through a handler function\n seen_conn_count: number of connections this plugin has seen\n handled_conn_count: number of connections this plugin has passed\n through a handler function\n out: output module instance\n raw_decoder: pypacker module to use for unpacking packet\n link_layer_type: numeric label for link layer\n striplayers: number of layers to automatically strip before handling\n (such as PPPoE, IP-over-IP, etc.)\n defrag_ip: rebuild fragmented IP packets (default: True)\n \"\"\"\n\n IP_PROTOCOL_MAP = dict((v, k[9:]) for k, v in ip.__dict__.items() if type(v) == int and k.startswith('IP_PROTO_') and k != 'IP_PROTO_HOPOPTS')\n\n def __init__(self, **kwargs):\n self.name = kwargs.get('name', __name__)\n self.description = kwargs.get('description', '')\n self.longdescription = kwargs.get('longdescription', self.description)\n self.bpf = kwargs.get('bpf', '')\n self.compiled_bpf = kwargs.get('compiled_bpf', None)\n self.vlan_bpf = kwargs.get(\"vlan_bpf\", True)\n self.author = kwargs.get('author', '')\n # define overall counts as multiprocessing Values for --parallel\n self.seen_packet_count = Value('i', 0)\n self.handled_packet_count = Value('i', 0)\n self.seen_conn_count = Value('i', 0)\n self.handled_conn_count = Value('i', 0)\n # dict of options specific to this plugin in format\n # 'optname':{configdict} translates to --pluginname_optname\n self.optiondict = kwargs.get('optiondict', {})\n\n # queues used by decode.py\n # if a handler decides a packet is worth keeping, it is placed in a\n # queue and later grabbed by decode.py to pass to subplugins\n self.raw_packet_queue = []\n self.packet_queue = []\n\n # self.out holds the output plugin instance\n # can be overwritten in decode.py by user selection\n self.out = kwargs.get('output', Output(label=__name__))\n\n # capture options\n # these can be updated with set_link_layer_type function\n self.raw_decoder = ethernet.Ethernet # assumed link-layer type\n self.link_layer_type = 1 # assume Ethernet\n # strip extra layers before IP/IPv6? (such as PPPoE, IP-over-IP, etc..)\n self.striplayers = 0\n # rebuild fragmented IP packets\n self.defrag_ip = True\n\n # holder for the pcap file being processing\n self.current_pcap_file = None\n\n # get the list of functions for this plugin\n # this is used in decode.py\n self.members = tuple([x[0] for x in inspect.getmembers(self, inspect.ismethod)])\n\n # a holder for IP packet fragments when attempting to reassemble them\n self.packet_fragments = defaultdict(dict)\n\n def write(self, *args, **kwargs):\n \"\"\"\n Sends information to the output formatter, after adding some\n additional fields.\n \"\"\"\n if 'plugin' not in kwargs:\n kwargs['plugin'] = self.name\n if 'pcapfile' not in kwargs:\n kwargs['pcapfile'] = self.current_pcap_file\n self.out.write(*args, **kwargs)\n\n def log(self, msg, level=logging.INFO):\n '''\n logs msg argument at specified level\n (default of INFO is for -v/--verbose output)\n\n Arguments:\n msg: text string to log\n level: logging level (default: logging.INFO)\n '''\n self.out.log(msg, level=level)\n\n def debug(self, msg):\n '''logs msg argument at debug level'''\n self.log(msg, level=logging.DEBUG)\n\n def warn(self, msg):\n '''logs msg argument at warning level'''\n self.log(msg, level=logging.WARN)\n\n def error(self, msg):\n '''logs msg argument at error level'''\n self.log(msg, level=logging.ERROR)\n\n def __str__(self):\n return \"<{}: {}>\".format(\"Plugin\", self.name)\n\n def __repr__(self):\n return '<{}: {}/{}/{}>'.format(\"Plugin\", self.name, self.bpf,\n ','.join([('%s=%s' % (x, str(self.__dict__.get(x)))) for x in self.optiondict]))\n\n def set_link_layer_type(self, datalink):\n \"\"\"\n Attempts to set the raw_decoder attribute based on the capture file's\n datalink type, which is fetched by pcapy when used in decode.py. It\n takes one argument: the numeric value of the link layer.\n\n http://www.tcpdump.org/linktypes.html\n \"\"\"\n # NOTE: Not all of these have been tested\n # TODO add some more of these\n self.link_layer_type = datalink\n if datalink == 1:\n self.raw_decoder = ethernet.Ethernet\n elif datalink == 9:\n self.raw_decoder = ppp.PPP\n elif datalink == 51:\n self.raw_decoder = pppoe.PPPoE\n elif datalink == 105:\n self.raw_decoder = ieee80211.IEEE80211\n elif datalink == 113:\n self.raw_decoder = linuxcc.LinuxCC\n elif datalink == 127:\n self.raw_decoder = radiotap.Radiotap\n elif datalink == 204:\n self.raw_decoder = ppp.PPP\n elif datalink == 227:\n self.raw_decoder = can.CAN\n elif datalink == 228:\n self.raw_decoder = ip.IP\n elif datalink == 229:\n self.raw_decoder = ip6.IP6\n else:\n # by default, assume Ethernet and hope for the best\n self.link_layer_type = 1\n self.raw_decoder = ethernet.Ethernet\n self.debug(\"Datalink input: {!s}. Setting raw_decoder to {!r}, link_layer_type to {!s}\".format(datalink, self.raw_decoder, self.link_layer_type))\n\n def recompile_bpf(self):\n \"Compile the BPF stored in the .bpf attribute\"\n # This function is normally only called by the decode.py script,\n # but can also be called by plugins that need to dynamically update\n # their filter.\n if not self.bpf:\n logger.debug(\"Cannot compile BPF: .bpf attribute not set for plugin {!r}.\".format(self.name))\n self.compiled_bpf = None\n return\n\n # Add VLAN wrapper, if necessary\n if self.vlan_bpf:\n bpf = \"({0}) or (vlan and {0})\".format(self.bpf)\n else:\n bpf = self.bpf\n self.debug(\"Compiling BPF as {!r}\".format(bpf))\n\n # Compile BPF and handle any expected errors\n try:\n self.compiled_bpf = pcapy.compile(\n self.link_layer_type, 65536, bpf, True, 0xffffffff\n )\n except pcapy.PcapError as e:\n if str(e).startswith(\"no VLAN support for data link type\"):\n logger.error(\"Cannot use VLAN filters for {!r} plugin. Recommend running with --no-vlan argument.\".format(self.name))\n elif str(e) == \"syntax error\":\n logger.error(\"Fatal error when compiling BPF: {!r}\".format(bpf))\n sys.exit(1)\n else:\n raise e\n\n def ipdefrag(self, pkt):\n \"IP fragment reassembly\"\n if isinstance(pkt, ip.IP): # IPv4\n f = self.packet_fragments[(pkt.src, pkt.dst, pkt.id)]\n f[pkt.offset] = pkt\n\n if not pkt.flags & 0x1:\n data = b''\n for key in sorted(f.keys()):\n data += f[key].body_bytes\n del self.packet_fragments[(pkt.src, pkt.dst, pkt.id)]\n newpkt = ip.IP(pkt.header_bytes + data)\n newpkt.bin(update_auto_fields=True) # refresh checksum\n return newpkt\n\n elif isinstance(pkt, ip6.IP6): # IPv6\n # TODO handle IPv6 offsets https://en.wikipedia.org/wiki/IPv6_packet#Fragment\n return pkt\n\n def handle_plugin_options(self):\n \"\"\"\n A placeholder.\n\n This function is called immediately after plugin args are processed\n and set in decode.py. A plugin can overwrite this function to perform\n actions based on the arg values as soon as they are set, before\n decoder.py does any further processing (e.g. updating a BPF based on\n provided arguments before handling --ebpf and --bpf flags).\n \"\"\"\n pass\n\n def _premodule(self):\n \"\"\"\n _premodule is called before capture starts or files are read. It will\n attempt to call the child plugin's premodule function.\n \"\"\"\n self.premodule()\n self.out.setup()\n# self.debug('{}'.format(pprint.pformat(self.__dict__)))\n self.debug(str(self.__dict__))\n\n def premodule(self):\n \"\"\"\n A placeholder.\n\n A plugin can overwrite this function to perform an action before\n capture starts or files are read.\n \"\"\"\n pass\n\n def _postmodule(self):\n \"\"\"\n _postmodule is called when capture ends. It will attempt to call the\n child plugin's postmodule function. It will also print stats if in\n debug mode.\n \"\"\"\n self.postmodule()\n self.out.close()\n self.log(\"{} seen packets, {} handled packets, {} seen connections, {} handled connections\".format(self.seen_packet_count.value, self.handled_packet_count.value, self.seen_conn_count.value, self.handled_conn_count.value))\n\n def postmodule(self):\n \"\"\"\n A placeholder.\n\n A plugin can overwrite this function to perform an action after\n capture ends or all files are processed.\n \"\"\"\n pass\n\n def _prefile(self, infile=None):\n \"\"\"\n _prefile is called just before an individual file is processed.\n Stores the current pcap file string and calls the child plugin's\n prefile function.\n \"\"\"\n self.current_pcap_file = infile\n self.prefile(infile)\n self.log('working on file \"{}\"'.format(infile))\n\n def prefile(self, infile=None):\n \"\"\"\n A placeholder.\n\n A plugin will be able to overwrite this function to perform an action\n before an individual file is processed.\n\n Arguments:\n infile: filepath or interface that will be processed\n \"\"\"\n pass\n\n def _postfile(self):\n \"\"\"\n _postfile is called just after an individual file is processed.\n It may expand some day, but for now it just calls a child's postfile\n function.\n \"\"\"\n self.postfile()\n\n def postfile(self):\n \"\"\"\n A placeholder.\n\n A plugin will be able to overwrite this function to perform an action\n after an individual file is processed.\n \"\"\"\n pass\n\n def _raw_handler(self, pktlen, pkt, ts):\n \"\"\"\n Accepts raw packet data (pktlen, pkt, ts), and handles decapsulation\n and layer stripping.\n\n Then, it passes the massaged data to the child's raw_handler function,\n if additional custom handling is necessary. The raw_handler function\n should return (pktlen, pkt, ts) if it wishes to continue with the call\n chain. Otherwise, return None.\n \"\"\"\n# with self.seen_packet_count.get_lock():\n# self.seen_packet_count.value += 1\n#\n# # call raw_handler and check its output\n# # decode.py will continue down the chain if it returns proper output or\n# # display a warning if it doesn't return the correct things\n# try:\n# raw_handler_out = self.raw_handler(pktlen, pkt, ts)\n# except Exception as e:\n# print_handler_exception(e, self, 'raw_handler')\n# return\n#\n# failed_msg = \"The output of {} raw_handler must be (pktlen, pkt, ts) or a list of such lists! Further packet refinement and plugin chaining will not be possible\".format(self.name)\n# if raw_handler_out and isinstance(raw_handler_out, (list, tuple)):\n# self.warn(failed_msg)\n# return\n\n with self.seen_packet_count.get_lock():\n self.seen_packet_count.value += 1\n # decode with the raw decoder (probably ethernet.Ethernet)\n pkt = self.raw_decoder(pkt)\n\n # strip any intermediate layers (e.g. PPPoE, etc.)\n # NOTE: make sure only the first plugin in a chain has striplayers set\n for _ in range(self.striplayers):\n try:\n pkt = pkt.upper_layer\n except AttributeError:\n # No more layers to strip\n break\n\n # call raw_handler and check its output\n # decode.py will continue down the chain if it returns proper output or\n # display a warning if it doesn't return the correct things\n try:\n raw_handler_out = self.raw_handler(pktlen, pkt, ts)\n except Exception as e:\n print_handler_exception(e, self, 'raw_handler')\n return\n failed_msg = \"The output of {} raw_handler must be (pktlen, pkt, ts) or a list of such lists! Further packet refinement and plugin chaining will not be possible\".format(self.name)\n if isinstance(raw_handler_out, (list, tuple)):\n if len(raw_handler_out) == 3 and (\n isinstance(raw_handler_out[0], type(pktlen)) and\n isinstance(raw_handler_out[1], type(pkt)) and\n isinstance(raw_handler_out[2], type(ts))):\n # If it returns one properly formed response, queue and continue\n self.raw_packet_queue.append(raw_handler_out)\n else:\n # If it returns several responses, check them individually\n for rhout in raw_handler_out:\n if isinstance(rhout, (list, tuple)) and \\\n len(rhout) == 3 and \\\n isinstance(rhout[0], type(pktlen)) and \\\n isinstance(rhout[1], type(pkt)) and \\\n isinstance(rhout[2], type(ts)):\n self.raw_packet_queue.append(rhout)\n elif rhout:\n self.warn(failed_msg)\n elif raw_handler_out:\n self.warn(failed_msg)\n\n\n def raw_handler(self, pktlen, pkt, ts):\n \"\"\"\n A placeholder.\n\n Plugins will be able to overwrite this to perform custom activites on\n raw packet data, such as decapsulation or decryption, before it\n becomes further refined down the chain. It should return the same\n arguments: pktlen, pkt, ts\n\n Generally speaking, however, this should never be overwritten unless\n there is a very, very good reason for it.\n\n Arguments:\n pktlen: length of packet\n pkt: raw bytes of the packet\n ts: timestamp of packet\n \"\"\"\n return pktlen, pkt, ts\n\n def _packet_handler(self, pktlen, pkt, ts):\n \"\"\"\n Accepts the output of raw_handler, pulls out addresses, and converts\n it all into a dshell.Packet object before calling the child's\n packet_handler function.\n \"\"\"\n # Attempt to perform defragmentation\n if isinstance(pkt.upper_layer, (ip.IP, ip6.IP6)):\n ipp = pkt.upper_layer\n if self.defrag_ip:\n ipp = self.ipdefrag(ipp)\n if not ipp:\n # we do not yet have all of the packet fragments, so move\n # on to next packet for now\n return\n else:\n pkt.upper_layer = ipp\n\n # Initialize a Packet object\n # This will be populated with values as we continue through\n # the function and eventually be passed to packet_handler\n packet = Packet(self, pktlen, pkt, ts)\n\n # call packet_handler and return its output\n # decode.py will continue down the chain if it returns anything\n try:\n packet_handler_out = self.packet_handler(packet)\n except Exception as e:\n print_handler_exception(e, self, 'packet_handler')\n return\n failed_msg = \"The output from {} packet_handler must be of type dshell.Packet or a list of such objects! Handling connections or chaining from this plugin may not be possible.\".format(self.name)\n if isinstance(packet_handler_out, (list, tuple)):\n for phout in packet_handler_out:\n if isinstance(phout, Packet):\n self.packet_queue.append(phout)\n with self.handled_packet_count.get_lock():\n self.handled_packet_count.value += 1\n elif phout:\n self.warn(failed_msg)\n elif isinstance(packet_handler_out, Packet):\n self.packet_queue.append(packet_handler_out)\n with self.handled_packet_count.get_lock():\n self.handled_packet_count.value += 1\n elif packet_handler_out:\n self.warn(failed_msg)\n\n\n def packet_handler(self, pkt):\n \"\"\"\n A placeholder.\n\n Plugins will be able to overwrite this to perform custom activites on\n Packet data.\n\n It should return a Packet object for functions further down the chain\n (i.e. connection_handler and/or blob_handler)\n\n Arguments:\n pkt: a Packet object\n \"\"\"\n return pkt\n\n\n\nclass ConnectionPlugin(PacketPlugin):\n \"\"\"\n Base level class that plugins will inherit.\n\n This plugin reassembles connections from packets.\n \"\"\"\n\n def __init__(self, **kwargs):\n PacketPlugin.__init__(self, **kwargs)\n\n # similar to packet_queue and raw_packet_queue in superclass\n self.connection_queue = []\n\n # dictionary to store packets for connections according to addr()\n self.connection_tracker = {}\n # maximum number of blobs a connection will store before calling\n # connection_handler\n # it defaults to infinite, but this should be lowered for huge datasets\n self.maxblobs = float(\"inf\") # infinite\n # how long do we wait before deciding a connection is \"finished\"\n # time is checked by iterating over cached connections and checking if\n # the timestamp of the connection's last packet is older than the\n # timestamp of the current packet, minus this value\n self.connection_timeout = datetime.timedelta(hours=1)\n\n def _connection_handler(self, pkt):\n \"\"\"\n Accepts a single Packet object and tracks the connection it belongs to.\n\n If it is the first packet in a connection, it creates a new Connection\n object and passes it to connection_init_handler. Otherwise, it will\n find the existing Connection in self.connection_tracker.\n\n The Connection will then be passed to connection_handler.\n\n If a connection changes direction with this packet, blob_handler will\n be called.\n\n Finally, if this packet is a FIN or RST, it will determine if the\n connection should close.\n \"\"\"\n # Sort the addr value for consistent dictionary key purposes\n addr = tuple(sorted(pkt.addr))\n\n # If this is a new connection, initialize it and call the init handler\n if addr not in self.connection_tracker:\n conn = Connection(self, pkt)\n self.connection_tracker[addr] = conn\n try:\n self.connection_init_handler(conn)\n except Exception as e:\n print_handler_exception(e, self, 'connection_init_handler')\n return\n with self.seen_conn_count.get_lock():\n self.seen_conn_count.value += 1\n else:\n conn = self.connection_tracker[addr]\n\n if conn.stop:\n # This connection was flagged to not be tracked\n return\n\n # If connection data is about to change, we set it to a \"dirty\" state\n # for future calls to connection_handler\n if pkt.data:\n conn.handled = False\n\n # Check and update the connection's current state\n if pkt.tcp_flags in (tcp.TH_SYN, tcp.TH_ACK, tcp.TH_SYN|tcp.TH_ACK, tcp.TH_SYN|tcp.TH_ACK|tcp.TH_ECE):\n # if new connection and a handshake is taking place, set to \"init\"\n if not conn.client_state:\n conn.client_state = \"init\"\n if not conn.server_state:\n conn.server_state = \"init\"\n else:\n # otherwise, if the connection isn't closed, set to \"established\"\n # TODO do we care about \"listen\", \"syn-sent\", and other in-between states?\n if conn.client_state not in ('finishing', 'closed'):\n conn.client_state = \"established\"\n if conn.server_state not in ('finishing', 'closed'):\n conn.server_state = \"established\"\n\n # Add the packet to the connection\n # If the direction changed, a Blob will be returned for handling\n # Note: The Blob will not be reassembled ahead of time. reassemble()\n # must be run inside the blob_handler to catch any unwanted exceptions.\n previous_blob = conn.add_packet(pkt)\n if previous_blob:\n try:\n blob_handler_out = self._blob_handler(conn, previous_blob)\n except Exception as e:\n print_handler_exception(e, self, 'blob_handler')\n return\n if (blob_handler_out\n and not isinstance(blob_handler_out[0], Connection)\n and not isinstance(blob_handler_out[1], Blob)):\n self.warn(\"The output from {} blob_handler must be of type (dshell.Connection, dshell.Blob)! Chaining plugins from here may not be possible.\".format(self.name))\n blob_handler_out = None\n # If the blob_handler decides this Blob isn't interesting, it sets\n # the hidden flag, which excludes it and its packets from further\n # processing along the plugin chain\n if not blob_handler_out:\n conn.blobs[-2].hidden = True\n\n # Check if a side of the connection is attempting to close the\n # connection using a FIN or RST packet. Once both sides make a\n # closing gesture, the connection is considered closed and handled\n if pkt.tcp_flags and pkt.tcp_flags & (tcp.TH_RST | tcp.TH_FIN):\n if pkt.sip == conn.clientip:\n conn.client_state = \"closed\"\n else:\n conn.server_state = \"closed\"\n\n if conn.connection_closed:\n # Both sides have closed the connection\n self._close_connection(conn, full=True)\n\n elif len(conn.blobs) > self.maxblobs:\n # Max blobs hit, so we will run connection_handler and decode.py\n # will clear the connection's blob cache\n self._close_connection(conn)\n\n # The current connection is done processing. Now, look over existing\n # connections and look for any that have timed out.\n # This is based on comparing the time of the current packet, minus\n # self.connection_timeout, to each connection's current endtime value.\n for addr, conn in self.connection_tracker.items():\n if conn.handled:\n continue\n if conn.endtime < (pkt.dt - self.connection_timeout):\n self._close_connection(conn)\n\n\n def _close_connection(self, conn, full=False):\n \"\"\"\n Runs through some standard actions to close a connection\n \"\"\"\n try:\n connection_handler_out = self.connection_handler(conn)\n except Exception as e:\n print_handler_exception(e, self, 'connection_handler')\n return None\n conn.handled = True\n if connection_handler_out and not isinstance(connection_handler_out, Connection):\n self.warn(\"The output from {} connection_handler must be of type dshell.Connection! Chaining plugins from here may not be possible.\".format(self.name))\n connection_handler_out = None\n if connection_handler_out:\n self.connection_queue.append(connection_handler_out)\n with self.handled_conn_count.get_lock():\n self.handled_conn_count.value += 1\n if full:\n try:\n self.connection_close_handler(conn)\n except Exception as e:\n print_handler_exception(e, self, 'connection_close_handler')\n return connection_handler_out\n\n\n def _cleanup_connections(self):\n \"\"\"\n decode.py will often reach the end of packet capture before all of the\n connections are closed properly. This function is called at the end\n of things to process those dangling connections.\n\n NOTE: Because the connections did not close cleanly,\n connection_close_handler will not be called.\n \"\"\"\n for addr, conn in self.connection_tracker.items():\n if not conn.stop and not conn.handled:\n # try to process the final blob in the connection\n try:\n blob_handler_out = self._blob_handler(conn, conn.blobs[-1])\n except Exception as e:\n print_handler_exception(e, self, 'blob_handler')\n blob_handler_out = None\n if (blob_handler_out\n and not isinstance(blob_handler_out[0], Connection)\n and not isinstance(blob_handler_out[1], Blob)):\n self.warn(\"The output from {} blob_handler must be of type (dshell.Connection, dshell.Blob)! Chaining plugins from here may not be possible.\".format(self.name))\n blob_handler_out = None\n if not blob_handler_out:\n conn.blobs[-1].hidden = True\n\n # then, handle the connection itself\n connection_handler_out = self._close_connection(conn)\n yield connection_handler_out\n\n def _purge_connections(self):\n \"\"\"\n When finished with handling a pcap file, calling this will clear all\n caches in preparation for next file.\n \"\"\"\n self.connection_queue = []\n self.connection_tracker = {}\n\n def _blob_handler(self, conn, blob):\n \"\"\"\n Accepts a Connection and a Blob.\n\n It doesn't really do anything except call the blob_handler and is only\n here for consistency and possible future features.\n \"\"\"\n return self.blob_handler(conn, blob)\n\n def blob_handler(self, conn, blob):\n \"\"\"\n A placeholder.\n\n Plugins will be able to overwrite this to perform custom activites on\n Blob data.\n\n It should return a Connection object and a Blob object for functions\n further down the chain.\n\n Args:\n conn: Connection object\n blob: Blob object\n \"\"\"\n return conn, blob\n\n def connection_init_handler(self, conn):\n \"\"\"\n A placeholder.\n\n Plugins will be able to overwrite this to perform custom activites on\n a connection it is first seen.\n\n Args:\n conn: Connection object\n \"\"\"\n return\n\n def connection_handler(self, conn):\n \"\"\"\n A placeholder.\n\n Plugins will be able to overwrite this to perform custom activites on\n Connection data.\n\n It should return a Connection object for functions further down the chain\n\n Args:\n conn: Connection object\n \"\"\"\n return conn\n\n def connection_close_handler(self, conn):\n \"\"\"\n A placeholder.\n\n Plugins will be able to overwrite this to perform custom activites on\n a TCP connection when it is cleanly closed with RST or FIN.\n\n Args:\n conn: Connection object\n \"\"\"\n return\n\nclass Packet(object):\n \"\"\"\n Class for holding data of individual packets\n\n def __init__(self, plugin, pktlen, pkt, ts):\n\n Args:\n plugin: an instance of the plugin creating this packet\n pktlen: length of packet\n pkt: pypacker object for the packet\n ts: timestamp of packet\n\n Attributes:\n plugin: name of plugin creating Packet\n ts: timestamp of packet\n dt: datetime of packet\n pkt: pypacker object for the packet\n rawpkt: raw bytestring of the packet\n pktlen: length of packet\n byte_count: length of packet body\n sip: source IP\n dip: destination IP\n sip_bytes: source IP as bytes\n dip_bytes: destination IP as bytes\n sport: source port\n dport: destination port\n smac: source MAC\n dmac: destination MAC\n sipcc: source IP country code\n dipcc: dest IP country code\n siplat: source IP latitude\n diplat: dest IP latitude\n siplon: source IP longitude\n diplon: dest IP longitude\n sipasn: source IP ASN\n dipasn: dest IP ASN\n protocol: text version of protocol in layer-3 header\n protocol_num: numeric version of protocol in layer-3 header\n data: data of the packet after TCP layer, or highest layer\n sequence_number: TCP sequence number, or None\n ack_number: TCP ACK number, or None\n tcp_flags: TCP header flags, or None\n \"\"\"\n\n def __init__(self, plugin, pktlen, pkt, ts):\n self.plugin = plugin.name\n self.ts = ts\n self.dt = datetime.datetime.fromtimestamp(ts)\n self.pkt = pkt\n self.rawpkt = pkt.bin()\n self.pktlen = pktlen\n self.byte_count = None\n self.sip = None\n self.dip = None\n self.sport = None\n self.dport = None\n self.smac = None\n self.dmac = None\n self.sipcc = None\n self.dipcc = None\n self.siplat = None\n self.diplat = None\n self.siplon = None\n self.diplon = None\n self.sipasn = None\n self.dipasn = None\n self.protocol = None\n self.protocol_num = None\n self.data = b''\n self.sequence_number = None\n self.ack_number = None\n self.tcp_flags = None\n\n # these are the layers Dshell will help parse\n # try to find them in the packet and eventually pull out useful data\n ethernet_p = None\n ieee80211_p = None\n ip_p = None\n tcp_p = None\n udp_p = None\n current_layer = pkt\n while current_layer:\n if isinstance(current_layer, ethernet.Ethernet) and not ethernet_p:\n ethernet_p = current_layer\n elif isinstance(current_layer, ieee80211.IEEE80211) and not ieee80211_p:\n ieee80211_p = current_layer\n elif isinstance(current_layer, (ip.IP, ip6.IP6)) and not ip_p:\n ip_p = current_layer\n elif isinstance(current_layer, tcp.TCP) and not tcp_p:\n tcp_p = current_layer\n elif isinstance(current_layer, udp.UDP) and not udp_p:\n udp_p = current_layer\n try:\n current_layer = current_layer.upper_layer\n except AttributeError:\n break\n\n # attempt to grab MAC addresses\n if ethernet_p:\n # from Ethernet\n self.smac = ethernet_p.src_s\n self.dmac = ethernet_p.dst_s\n elif ieee80211_p:\n # from 802.11\n try:\n if ieee80211_p.subtype == ieee80211.M_BEACON:\n ieee80211_p2 = ieee80211_p.beacon\n elif ieee80211_p.subtype == ieee80211.M_DISASSOC:\n ieee80211_p2 = ieee80211_p.disassoc\n elif ieee80211_p.subtype == ieee80211.M_AUTH:\n ieee80211_p2 = ieee80211_p.auth\n elif ieee80211_p.subtype == ieee80211.M_DEAUTH:\n ieee80211_p2 = ieee80211_p.deauth\n elif ieee80211_p.subtype == ieee80211.M_ACTION:\n ieee80211_p2 = ieee80211_p.action\n else:\n # can't figure out how pypacker stores the other subtypes\n raise AttributeError\n self.smac = ieee80211_p2.src_s\n self.dmac = ieee80211_p2.dst_s\n except AttributeError as e:\n pass\n\n # process IP addresses and associated metadata (if applicable)\n if ip_p:\n # get IP addresses\n sip = ipaddress.ip_address(ip_p.src)\n dip = ipaddress.ip_address(ip_p.dst)\n self.sip = sip.compressed\n self.dip = dip.compressed\n self.sip_bytes = sip.packed\n self.dip_bytes = dip.packed\n\n # get protocols, country codes, and ASNs\n self.protocol_num = ip_p.p if isinstance(ip_p, ip.IP) else ip_p.nxt\n self.protocol = PacketPlugin.IP_PROTOCOL_MAP.get(self.protocol_num, str(self.protocol_num))\n self.sipcc, self.siplat, self.siplon = geoip.geoip_location_lookup(self.sip)\n self.sipasn = geoip.geoip_asn_lookup(self.sip)\n self.dipcc, self.diplat, self.diplon = geoip.geoip_location_lookup(self.dip)\n self.dipasn = geoip.geoip_asn_lookup(self.dip)\n\n if tcp_p:\n self.sport = tcp_p.sport\n self.dport = tcp_p.dport\n self.sequence_number = tcp_p.seq\n self.ack_number = tcp_p.ack\n self.tcp_flags = tcp_p.flags\n self.data = tcp_p.body_bytes\n\n elif udp_p:\n self.sport = udp_p.sport\n self.dport = udp_p.dport\n self.data = udp_p.body_bytes\n\n else:\n self.data = pkt.highest_layer.body_bytes\n\n self.byte_count = len(self.data)\n\n\n\n @property\n def addr(self):\n \"\"\"\n A standard representation of the address:\n ((self.sip, self.sport), (self.dip, self.dport))\n or\n ((self.smac, self.sport), (self.dmac, self.dport))\n \"\"\"\n # try using IP addresses first\n if self.sip or self.dip:\n return ((self.sip, self.sport), (self.dip, self.dport))\n # then try MAC addresses\n elif self.smac or self.dmac:\n return ((self.smac, self.sport), (self.dmac, self.dport))\n # if all else fails, return Nones\n else:\n return ((None, None), (None, None))\n\n @property\n def packet_tuple(self):\n \"\"\"\n A standard representation of the raw packet tuple:\n (self.pktlen, self.rawpkt, self.ts)\n \"\"\"\n return (self.pktlen, self.rawpkt, self.ts)\n\n def __repr__(self):\n return \"%s %16s :%-5s -> %5s :%-5s (%s -> %s)\" % (self.dt, self.sip, self.sport, self.dip, self.dport, self.sipcc, self.dipcc)\n\n def info(self):\n \"\"\"\n Provides a dictionary with information about a packet. Useful for\n calls to a plugin's write() function, e.g. self.write(\\\\*\\\\*pkt.info())\n \"\"\"\n d = dict(self.__dict__)\n del d['pkt']\n del d['rawpkt']\n del d['data']\n return d\n\n\nclass Connection(object):\n \"\"\"\n Class for holding data about connections\n\n def __init__(self, plugin, first_packet)\n\n Args:\n plugin: an instance of the plugin creating this connection\n first_packet: the first Packet object to initialize connection\n\n Attributes:\n plugin: name of the plugin that created object\n addr: .addr attribute of first packet\n sip: source IP\n smac: source MAC address\n sport: source port\n sipcc: country code of source IP\n siplat: latitude of source IP\n siplon: longitude of source IP\n sipasn: ASN of source IP\n clientip: same as sip\n clientmac: same as smac\n clientport: same as sport\n clientcc: same as sipcc\n clientlat: same as siplat\n clientlon: same as siplon\n clientasn: same as sipasn\n dip: dest IP\n dmac: dest MAC address\n dport: dest port\n dipcc: country code of dest IP\n diplat: latitude of dest IP\n diplon: longitude of dest IP\n dipasn: ASN of dest IP\n serverip: same as dip\n servermac: same as dmac\n serverport: same as dport\n servercc: same as dipcc\n serverlat: same as diplat\n serverlon: same as diplon\n serverasn: same as dipasn\n protocol: text version of protocol in layer-3 header\n clientpackets: counts of packets from client side\n clientbytes: total bytes transferred from client side\n serverpackets: counts of packets from server side\n serverbytes: total bytes transferred from server side\n ts: timestamp of first packet\n dt: datetime of first packet\n starttime: datetime of first packet\n endtime: datetime of last packet\n client_state: the TCP state on the client side (\"init\",\n \"established\", \"closed\", etc.)\n server_state: the TCP state on server side\n blobs: list of reassembled half-stream Blobs\n stop: if True, stop following connection\n handled: used to indicate if a connection was already passed through\n a plugin's connection_handler function. Resets when new\n data for a connection comes in.\n\n \"\"\"\n\n def __init__(self, plugin, first_packet):\n \"\"\"\n Initializes Connection object\n\n Args:\n plugin: an instance of the plugin creating this connection\n first_packet: the first Packet object to initialize connection\n \"\"\"\n self.plugin = plugin.name\n self.addr = first_packet.addr\n self.sip = first_packet.sip\n self.smac = first_packet.smac\n self.sport = first_packet.sport\n self.sipcc = first_packet.sipcc\n self.siplat = first_packet.siplat\n self.siplon = first_packet.siplon\n self.sipasn = first_packet.sipasn\n self.clientip = first_packet.sip\n self.clientmac = first_packet.smac\n self.clientport = first_packet.sport\n self.clientcc = first_packet.sipcc\n self.clientlat = first_packet.siplat\n self.clientlon = first_packet.siplon\n self.clientasn = first_packet.sipasn\n self.dip = first_packet.dip\n self.dmac = first_packet.dmac\n self.dport = first_packet.dport\n self.dipcc = first_packet.dipcc\n self.diplat = first_packet.diplat\n self.diplon = first_packet.diplon\n self.dipasn = first_packet.dipasn\n self.serverip = first_packet.dip\n self.servermac = first_packet.dmac\n self.serverport = first_packet.dport\n self.servercc = first_packet.dipcc\n self.serverlat = first_packet.diplat\n self.serverlon = first_packet.diplon\n self.serverasn = first_packet.dipasn\n self.protocol = first_packet.protocol\n self.clientpackets = 0\n self.clientbytes = 0\n self.serverpackets = 0\n self.serverbytes = 0\n self.ts = first_packet.ts\n self.dt = first_packet.dt\n self.starttime = first_packet.dt\n self.endtime = first_packet.dt\n self.client_state = None\n self.server_state = None\n self.blobs = []\n self.stop = False\n self.handled = False\n # used to determine if direction changes\n self._current_addr_pair = None\n\n @property\n def duration(self):\n \"total seconds from starttime to endtime\"\n tdelta = self.endtime - self.starttime\n return tdelta.total_seconds()\n\n @property\n def connection_closed(self):\n return self.client_state == \"closed\" and self.server_state == \"closed\"\n\n def add_packet(self, packet):\n \"\"\"\n Accepts a Packet object and attempts to push it into the current Blob.\n If the direction changes, it creates a new Blob and returns the old one\n to the caller.\n\n Args:\n packet: a Packet object to add to the connection\n\n Returns:\n Previous Blob if direction has changed\n \"\"\"\n if packet.sip == self.clientip and (not packet.sport or packet.sport == self.clientport):\n # packet moving from client to server\n direction = 'cs'\n else:\n # packet moving from server to client\n direction = 'sc'\n\n if (packet.addr != self._current_addr_pair and packet.data) or len(self.blobs) == 0:\n try:\n old_blob = self.blobs[-1]\n except IndexError:\n old_blob = None\n self.blobs.append(Blob(packet, direction))\n self._current_addr_pair = packet.addr\n else:\n old_blob = None\n\n blob = self.blobs[-1]\n blob.add_packet(packet)\n\n # Only count packets if they have data (i.e. ignore SYNs, ACKs, etc.)\n if packet.data:\n if packet.addr == self.addr:\n self.clientpackets += 1\n self.clientbytes += packet.byte_count\n else:\n self.serverpackets += 1\n self.serverbytes += packet.byte_count\n\n if packet.dt > self.endtime:\n self.endtime = packet.dt\n\n if old_blob:\n return old_blob\n\n def info(self):\n \"\"\"\n Provides a dictionary with information about a connection. Useful for\n calls to a plugin's write() function, e.g. self.write(\\\\*\\\\*conn.info())\n\n Returns:\n Dictionary with information\n \"\"\"\n d = dict(self.__dict__)\n d['duration'] = self.duration\n del d['blobs']\n del d['stop']\n del d['_current_addr_pair']\n del d['handled']\n return d\n\n def __repr__(self):\n return '%s %16s -> %16s (%s -> %s) %6s %6s %5d %5d %7d %7d %-.4fs' % (\n self.starttime,\n self.clientip,\n self.serverip,\n self.clientcc,\n self.servercc,\n self.clientport,\n self.serverport,\n self.clientpackets,\n self.serverpackets,\n self.clientbytes,\n self.serverbytes,\n self.duration,\n )\n\nclass Blob(object):\n \"\"\"\n Class for holding and reassembling pieces of a connection.\n\n A Blob holds the packets and reassembled data for traffic moving in one\n direction in a connection, before direction changes.\n\n def __init__(self, first_packet, direction)\n\n Args:\n first_packet: the first Packet object to initialize Blob\n direction: direction of blob -\n 'cs' for client-to-server, 'sc' for sever-to-client\n\n Attributes:\n addr: .addr attribute of the first packet\n ts: timestamp of the first packet\n starttime: datetime for first packet\n endtime: datetime of last packet\n sip: source IP\n smac: source MAC address\n sport: source port\n sipcc: country code of source IP\n sipasn: ASN of source IP\n dip: dest IP\n dmac: dest MAC address\n dport: dest port\n dipcc: country code of dest IP\n dipasn: ASN of dest IP\n protocol: text version of protocol in layer-3 header\n direction: direction of the blob -\n 'cs' for client-to-server, 'sc' for sever-to-client\n ack_sequence_numbers: set of ACK numbers from the receiver for ####################################\n collected data packets\n all_packets: list of all packets in the blob\n hidden (bool): Used to indicate that a Blob should not be passed to\n next plugin. Can theoretically be overruled in, say, a\n connection_handler to force a Blob to be passed to next\n plugin.\n \"\"\"\n\n # max offset before wrap, default is MAXINT32 for TCP sequence numbers\n MAX_OFFSET = 0xffffffff\n\n def __init__(self, first_packet, direction):\n self.addr = first_packet.addr\n self.ts = first_packet.ts\n self.starttime = first_packet.dt\n self.endtime = first_packet.dt\n self.sip = first_packet.sip\n self.smac = first_packet.smac\n self.sport = first_packet.sport\n self.sipcc = first_packet.sipcc\n self.sipasn = first_packet.sipasn\n self.dip = first_packet.dip\n self.dmac = first_packet.dmac\n self.dport = first_packet.dport\n self.dipcc = first_packet.dipcc\n self.dipasn = first_packet.dipasn\n self.protocol = first_packet.protocol\n self.direction = direction\n# self.ack_sequence_numbers = {}\n self.all_packets = []\n# self.data_packets = []\n self.__data_bytes = b''\n\n # Used to indicate that a Blob should not be passed to next plugin.\n # Can theoretically be overruled in, say, a connection_handler to\n # force a Blob to be passed to next plugin.\n self.hidden = False\n\n @property\n def data(self):\n \"\"\"\n Returns the reassembled byte string.\n\n If it was not already reassembled, reassemble is called with default\n arguments.\n \"\"\"\n if not self.__data_bytes:\n self.reassemble()\n return self.__data_bytes\n\n def reassemble(self, allow_padding=True, allow_overlap=True, padding=b'\\x00'):\n \"\"\"\n Rebuild the data string from the current list of data packets\n For each packet, the TCP sequence number is checked.\n\n If overlapping or padding is disallowed, it will raise a\n SequenceNumberError exception if a respective event occurs.\n\n Args:\n allow_padding (bool): If data is missing and allow_padding = True\n (default: True), then the padding argument\n will be used to fill the gaps.\n allow_overlap (bool): If data is overlapping, the new data is\n used if the allow_overlap argument is True\n (default). Otherwise, the earliest data is\n kept.\n padding: Byte character(s) to use to fill in missing data. Used\n in conjunction with allow_padding (default: b'\\\\\\\\x00')\n \"\"\"\n data = b\"\"\n unacknowledged_data = []\n acknowledged_data = {}\n for pkt in self.all_packets:\n if not pkt.sequence_number:\n # if there are no sequence numbers (i.e. not TCP), just rebuild\n # in chronological order\n data += pkt.data\n continue\n\n if pkt.data:\n if pkt.sequence_number in acknowledged_data:\n continue\n unacknowledged_data.append(pkt)\n\n elif pkt.tcp_flags and pkt.tcp_flags & tcp.TH_ACK:\n ackpkt = pkt\n for i, datapkt in enumerate(unacknowledged_data):\n if (datapkt.ack_number == ackpkt.sequence_number\n and ackpkt.ack_number == (datapkt.sequence_number + len(datapkt.data))):\n # if the seq/ack numbers align, this is the data packet\n # we want\n # TODO confirm this logic is correct\n acknowledged_data[datapkt.sequence_number] = datapkt.data\n unacknowledged_data.pop(i)\n break\n\n if not acknowledged_data and not unacknowledged_data:\n # For non-sequential protocols, just return what we have\n self.__data_bytes = data\n\n else:\n # Create a list of each segment of the complete data. Use\n # acknowledged data first, and then try to fill in the blanks with\n # unacknowledged data.\n segments = acknowledged_data.copy()\n for pkt in reversed(unacknowledged_data):\n if pkt.sequence_number in segments: continue\n segments[pkt.sequence_number] = pkt.data\n\n offsets = sorted(segments.keys())\n # iterate over the segments and try to piece them together\n # handle any instances of missing or overlapping segments\n nextoffset = offsets[0]\n startoffset = offsets[0]\n for offset in offsets:\n if offset > nextoffset:\n # data is missing\n if allow_padding:\n data += padding * (offset - nextoffset)\n else:\n raise SequenceNumberError(\"Missing data for sequence number %d %s\" % (nextoffset, self.addr))\n elif offset < nextoffset:\n # data is overlapping\n if not allow_overlap:\n raise SequenceNumberError(\"Overlapping data for sequence number %d %s\" % (nextoffset, self.addr))\n\n nextoffset = (offset + len(segments[offset])) & self.MAX_OFFSET\n data = data[:offset - startoffset] + \\\n segments[offset] + \\\n data[nextoffset - startoffset:]\n self.__data_bytes = data\n\n return data\n\n\n\n\n# segments = {}\n# for pkt in self.data_packets:\n# if pkt.sequence_number:\n# segments.setdefault(pkt.sequence_number, []).append(pkt.data)\n# else:\n# # if there are no sequence numbers (i.e. not TCP), just rebuild\n# # in chronological order\n# data += pkt.data\n#\n# if not segments:\n# # For non-sequential protocols, just return what we have\n# self.__data_bytes = data\n# return data\n#\n# offsets = sorted(segments.keys())\n#\n# # iterate over the segments and try to piece them together\n# # handle any instances of missing or overlapping segments\n# nextoffset = offsets[0]\n# startoffset = offsets[0]\n# for offset in offsets:\n# # TODO do we still want to implement custom error handling?\n# if offset > nextoffset:\n# # data is missing\n# if allow_padding:\n# data += padding * (offset - nextoffset)\n# else:\n# raise SequenceNumberError(\"Missing data for sequence number %d %s\" % (nextoffset, self.addr))\n# elif offset < nextoffset:\n# # data is overlapping\n# if not allow_overlap:\n# raise SequenceNumberError(\"Overlapping data for sequence number %d %s\" % (nextoffset, self.addr))\n## nextoffset = (offset + len(segments[offset][dup])) & self.MAX_OFFSET\n## if nextoffset in self.ack_sequence_numbers:\n# if offset in self.ack_sequence_numbers:\n# # If the data packet was acknowledged by the receiver,\n# # we use the first packet received.\n# dup = 0\n# else:\n# # If it went unacknowledged, we use the last packet and hope\n# # for the best.\n# dup = -1\n# print(dup)\n# print(offset)\n# print(nextoffset)\n# print(str(self.ack_sequence_numbers))\n# nextoffset = (offset + len(segments[offset][dup])) & self.MAX_OFFSET\n# data = data[:offset - startoffset] + \\\n# segments[offset][dup] + \\\n# data[nextoffset - startoffset:]\n# self.__data_bytes = data\n# return data\n\n def info(self):\n \"\"\"\n Provides a dictionary with information about a blob. Useful for\n calls to a plugin's write() function, e.g. self.write(\\\\*\\\\*conn.info())\n\n Returns:\n Dictionary with information\n \"\"\"\n d = dict(self.__dict__)\n del d['hidden']\n del d['_Blob__data_bytes']\n del d['all_packets']\n return d\n\n def add_packet(self, packet):\n \"\"\"\n Accepts a Packet object and stores it.\n\n Args:\n packet: a Packet object\n \"\"\"\n self.all_packets.append(packet)\n\n if packet.dt > self.endtime:\n self.endtime = packet.dt\n","sub_path":"dshell/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":55911,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"402264731","text":"import pandas as pd\nfrom tqdm import tqdm, trange\nimport tables\nfrom sklearn.linear_model import LinearRegression, SGDRegressor\nfrom sklearn import cross_validation\nimport numpy as np\nfrom sklearn.metrics import mean_squared_error\nfrom clean_training_set import load_clean_training_set\nfrom featurizer import *\nfrom pandas.tseries.offsets import *\nfrom learn_structure import load_structure\nimport math\nimport os, pickle\n\n\ndef update_data(df, sums, numbers):\n for index, row in df.iterrows():\n triplet = (row.DAY_WE_DS, row.ASS_ASSIGNMENT, row.DATE.time())\n if triplet not in sums:\n sums[triplet] = 0\n numbers[triplet] = 0\n sums[triplet] += row.CSPL_RECEIVED_CALLS\n numbers[triplet] += 1\n\n\ndef load_data(df):\n sums = {}\n numbers = {}\n\n update_data(df, sums, numbers)\n\n pickle.dump((sums, numbers), open(\"files/time_series_data.pkl\", \"wb\"))\n return sums, numbers\n\n\ntraining_df = load_clean_training_set(\"files/train_clean.pkl\")\n\nsubmission_df = load_submission(\"files/submission_test.txt\")\n\ny_true = np.copy(submission_df['prediction'].as_matrix())\ny_pred = np.zeros(len(y_true))\n\nmin_date = submission_df['DATE'].min()\nmax_date = submission_df['DATE'].max()\n\nold_df = training_df[training_df.DATE < min_date - DateOffset(days=3)]\n\nsums, numbers = load_data(old_df)\n\nprev_date = min_date\ncurrent_date = min_date + DateOffset(days=7)\n\nn = 0\nwith tqdm(total=((max_date - min_date).days / 7) + 1) as pbar:\n while prev_date < max_date:\n sub_df = submission_df[(submission_df.DATE >= prev_date) & (submission_df.DATE < current_date)]\n for index, row in sub_df.iterrows():\n triplet = (row.DAY_WE_DS, row.ASS_ASSIGNMENT, row.DATE.time())\n if triplet in sums:\n y_pred[index] = float(sums[triplet]) / numbers[triplet]\n\n old_df = training_df[(training_df.DATE >= prev_date - DateOffset(days=3)) &\n (training_df.DATE < current_date - DateOffset(days=3))]\n update_data(old_df, sums, numbers)\n prev_date = current_date\n current_date = prev_date + DateOffset(days=7)\n pbar.update(1)\n\nprint(n)\ny_pred_round = [int(math.ceil(x)) if x > 0 else 0 for x in y_pred]\nsubmission_df.prediction = y_pred_round\nprint(submission_df)\nsubmission_df.DATE = submission_df.DATE.apply(lambda x: x.strftime('%Y-%m-%d %H:%M:%S.%f')[:-3])\nsubmission_df[['DATE', 'ASS_ASSIGNMENT', 'prediction']].to_csv('results/submission_real.txt', sep='\\t', index=False)\n#\nprint('MSE round: '),\nprint(mean_squared_error(y_true, y_pred_round))\n#\nprint('MSE not round: '),\nprint(mean_squared_error(y_true, y_pred))\n","sub_path":"classifier_time_series_optimized.py","file_name":"classifier_time_series_optimized.py","file_ext":"py","file_size_in_byte":2647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"33915077","text":"\"\"\"The Vulcan component.\"\"\"\nimport asyncio\nimport logging\n\nfrom aiohttp import ClientConnectorError\nfrom homeassistant.exceptions import ConfigEntryNotReady\nfrom homeassistant.helpers.entity import Entity\nfrom homeassistant.helpers.typing import ConfigType\nfrom vulcan import Account, Keystore, Vulcan\nfrom vulcan._utils import VulcanAPIException\n\nfrom .const import DOMAIN\n\n_LOGGER = logging.getLogger(__name__)\n\nPLATFORMS = [\"sensor\", \"calendar\"]\n\n\nasync def async_setup(hass, config) -> bool:\n hass.data.setdefault(DOMAIN, {})\n\n return True\n\n\nasync def async_setup_entry(hass, config_entry):\n try:\n with open(f\".vulcan/keystore-{config_entry.data.get('login')}.json\") as f:\n keystore = Keystore.load(f)\n with open(f\".vulcan/account-{config_entry.data.get('login')}.json\") as f:\n account = Account.load(f)\n client = Vulcan(keystore, account)\n await client.select_student()\n students = await client.get_students()\n for student in students:\n if str(student.pupil.id) == str(config_entry.data.get(\"student_id\")):\n client.student = student\n break\n except VulcanAPIException as err:\n if str(err) == \"The certificate is not authorized.\":\n _LOGGER.error(\n \"The certificate is not authorized, please authorize integration again.\"\n )\n hass.async_create_task(\n hass.config_entries.flow.async_init(\n DOMAIN,\n context={\"source\": \"reauth\"},\n )\n )\n else:\n _LOGGER.error(\"Vulcan API error: %s\", err)\n return False\n except FileNotFoundError as err:\n _LOGGER.error(\n \"The certificate is not authorized, please authorize integration again.\"\n )\n hass.async_create_task(\n hass.config_entries.flow.async_init(\n DOMAIN,\n context={\"source\": \"reauth\"},\n )\n )\n return False\n except ClientConnectorError as err:\n if \"connection_error\" not in hass.data[DOMAIN]:\n _LOGGER.error(\n \"Connection error - please check your internet connection: %s\", err\n )\n hass.data[DOMAIN][\"connection_error\"] = True\n await client.close()\n raise ConfigEntryNotReady\n num = 0\n for _ in hass.config_entries.async_entries(DOMAIN):\n num += 1\n hass.data[DOMAIN][\"students_number\"] = num\n hass.data[DOMAIN][config_entry.entry_id] = client\n\n if not config_entry.update_listeners:\n update_listener = config_entry.add_update_listener(_async_update_options)\n\n for platform in PLATFORMS:\n hass.async_create_task(\n hass.config_entries.async_forward_entry_setup(config_entry, platform)\n )\n\n return True\n\n\nasync def async_unload_entry(hass, entry):\n \"\"\"Unload a config entry.\"\"\"\n for platform in PLATFORMS:\n await hass.config_entries.async_forward_entry_unload(entry, platform)\n\n return True\n\n\nasync def _async_update_options(hass, entry):\n \"\"\"Update options.\"\"\"\n await hass.config_entries.async_reload(entry.entry_id)\n\n\nclass VulcanEntity(Entity):\n @property\n def name(self):\n return self._name\n\n @property\n def icon(self):\n return self._icon\n\n @property\n def unique_id(self):\n return self._unique_id\n\n @property\n def state(self):\n return self._state\n","sub_path":"custom_components/vulcan/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"23797646","text":"# coding: utf-8\n\nfrom io import BytesIO\nimport uuid\n\nfrom base64 import b64encode, b64decode\nfrom flask import request, jsonify\n\nimport config\nfrom main import conn\nfrom common import verify_helper\nfrom libs.image_storage import bucket\nfrom . import api\n\n@api.route('/api/profile', methods=['GET'])\ndef profile():\n\tsess = request.cookies.get('s_', '')\n\tsessionid = b64decode(sess)\n\tuser_mobile = conn.hget('login:', sessionid)\n\tif not user_mobile:\n\t\tdata = {'errno': -1, 'errmsg': '获取用户失败'}\n\t\treturn jsonify(data)\n\n\tuser = conn.hgetall('user:' + user_mobile)\n\tif user:\n\t\tuser_info = {}\n\t\tuser_info['name'] = user.get('uname', '')\n\t\tuser_info['mobile'] = user.get('phone', '')\n\t\tavatar_ = user.get('avatar', '')\n\t\tuser_info['avatar'] = config.OSS_PREFIX + str(avatar_) if avatar_ else ''\n\t\tdata = {'errno': 0, 'errmsg': '获取用户信息成功', 'data': user_info}\n\telse:\n\t\tdata = {'errno': 0, 'errmsg': '获取用户信息失败'}\n\treturn jsonify(data)\n\n\n@api.route('/api/profile/avatar', methods=['POST'])\ndef avatar():\n\tsess = request.cookies.get('s_', '')\n\tsessionid = b64decode(sess)\n\tuser_mobile = conn.hget('login:', sessionid)\n\tif not user_mobile:\n\t\tdata = {'errno': -1, 'errmsg': '获取用户失败'}\n\t\treturn jsonify(data)\n\n\n\t# print dir(request.files)\n\t# print type(request.files)\n\t# print request.files\n\tfile = request.files.get('avatar')\n\t# print file \n\t# print dir(file)\n\t# print file.stream\n\t# # print dir(file)\n\tsuffix = '.' + file.filename.rpartition('.')[-1]\n\t# print name\n\tfilename = 'img_' + str(uuid.uuid4()) + suffix\n\tbuffer = BytesIO()\n\tfile.save(buffer)\n\tfile.close()\n\t# print buffer.getvalue()\n\t# filename有些多余,真正文件名由bucket产生\n\tf = bucket.upload(filename, buffer.getvalue())\n\t# file.save('./html/static/images/' + name)\n\t# print filename\n\t\n\n\ttry:\n\t\tavatar = conn.hset('user:' + user_mobile, 'avatar', f)\n\t\tdata = {'errno': 0, 'errmsg': '上传图像成功', 'data': config.OSS_PREFIX + f}\n\texcept:\n\t\tdata = {'errno': 0, 'errmsg': '上传图像失败'}\n\treturn jsonify(data)\n\n\t\n\n@api.route('/api/profile/name', methods=['POST'])\ndef profile_name():\n\tname = request.json.get('name', '')\n\tsess = request.cookies.get('s_', '')\n\tsessionid = b64decode(sess)\n\tuser_mobile = conn.hget('login:', sessionid)\n\tif not user_mobile:\n\t\tdata = {'errno': -1, 'errmsg': '获取用户失败'}\n\t\treturn jsonify(data)\n\t# 比较是否与前一次名字相同和直接修改一次名字,操作量一样 因此,不作比较\n\ttry:\n\t\tconn.hset('user:' + user_mobile, 'uname', name)\n\t\tdata = {'errno': 0, 'msg': '重设用户名成功'}\n\texcept:\n\t\tdata = {'errno': -1, 'msg': '重设用户名失败'}\n\t\n\treturn jsonify(data)\n\n\n@api.route('/api/profile/auth', methods=['GET'])\ndef get_auth():\n\tsess = request.cookies.get('s_', '')\n\tsessionid = b64decode(sess)\n\tuser_mobile = conn.hget('login:', sessionid)\n\tif not user_mobile:\n\t\tdata = {'errno': -1, 'errmsg': '获取用户失败'}\n\t\treturn jsonify(data)\n\tuser_info = conn.hgetall('user:' + user_mobile)\n\treal_name = user_info.get('real_name', '')\n\tid_card = user_info.get('id_card', '')\n\tdata = {'errno': 0, 'errmsg': '获取信息成功', \n\t\t'data': {'real_name': real_name, 'id_card': id_card}}\n\treturn jsonify(data)\n\n\n@api.route('/api/profile/auth', methods=['POST'])\ndef post_auth():\n\tsess = request.cookies.get('s_', '')\n\tsessionid = b64decode(sess)\n\tuser_mobile = conn.hget('login:', sessionid)\n\tif not user_mobile:\n\t\tdata = {'errno': -1, 'errmsg': '获取用户失败'}\n\t\treturn jsonify(data)\n\n\tdata = request.json\n\t# real_name = data.get('real_name', '')\n\t# id_card = data.get('id_card', '')\n\tuser_info = {}\n\tuser_info['real_name'] = data.get('real_name', '')\n\tuser_info['id_card'] = data.get('id_card', '')\n\n\tconn.hmset('user:' + user_mobile, user_info)\n\t\t\n\tres = {'errno': 0, 'errmsg': '获取信息成功', \n\t\t'data': {'real_name': user_info['real_name'], 'id_card': user_info['id_card']}}\n\treturn jsonify(res)\n\n\n","sub_path":"api/profile.py","file_name":"profile.py","file_ext":"py","file_size_in_byte":3909,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"520547623","text":"#!/usr/bin/env python3\n#\n# A script that scrapes a web page for a blog post etc, then uses AWS Polly to\n# convert the content text to an audio file suitable for a private podcast.\n#\n# Usage:\n# as_audio.py \n\nimport os\nimport sys\nimport re\nimport requests\nimport argparse\nimport boto3\nimport html2text\nfrom markdown import markdown\nfrom bs4 import BeautifulSoup\nfrom readability import Document\nimport pydub\n\ndef extract_doi_from_string(s):\n \"\"\"\n Extract the DOI from a string.\n \"\"\"\n doi = re.search(r'(10\\.\\d{4,9}/[-._;()/:A-Za-z0-9]+|10.1002/[^\\s]+|10.\\d{4}/\\d+-\\d+X?(\\d+)\\d+<[\\d\\w]+:[\\d\\w]*>\\d+.\\d+.\\w+;\\d|10.1021/\\w\\w\\d+|10.1207/[\\w\\d]+\\&\\d+_\\d+)', s)\n if doi:\n return doi.group()\n else:\n return None\n\ndef extract_isbn_from_string(s):\n \"\"\"\n Extract the ISBN from a string.\n \"\"\"\n isbn = re.search(r'(?=(?:\\D*\\d){10}(?:(?:\\D*\\d){3})?$)[\\d-]+', s)\n if isbn:\n return str(isbn.group()).replace('-',\"\")\n else:\n return None\n\ndef get_article_doc(url: str) -> Document:\n \"\"\"\n Get the Document version of the article from the given URL.\n \"\"\"\n response = requests.get(url)\n if response.status_code != 200:\n print(\"Error: {}\".format(response.status_code))\n raise Exception(\"Failed to get article text from URL: {}\".format(url))\n html = response.text\n doc = Document(html)\n return doc\n\ndef convert_doc_to_dict(doc: Document, strip_html=True) -> dict:\n \"\"\"\n Convert the Document to a dictionary of the article's text and title.\n \"\"\"\n if strip_html:\n h = html2text.HTML2Text()\n # ignore links\n h.ignore_links = True\n text = h.handle(doc.summary())\n else:\n text = doc.summary()\n return {\n \"title\": doc.title(),\n \"text\": text\n }\n\ndef split_article_text(text: str, max_length=3000) -> list:\n \"\"\"\n Split the article text into paragraphs as best as possible.\n \"\"\"\n if '\\n\\n' in text:\n split_text = text.split('\\n\\n')\n split_text = [p.replace('\\n', ' ').strip() for p in split_text if p.strip()]\n else:\n split_text = text.split('\\n')\n clean_split_text = []\n split_text = [p.strip() for p in split_text if len(p.strip()) > 0]\n for p in split_text:\n if p.startswith('>'):\n p = p[1:].strip()\n if len(p) >= max_length:\n # find a space to split on\n sentences = p.split('.')\n parts = [\"\"]\n for s in sentences:\n latest_part = parts[-1] if len(parts) > 0 else \"\"\n if len(latest_part) + len(s) + 1 >= max_length:\n parts.append(s)\n else:\n parts[-1] = \"{}.{}\".format(latest_part, s)\n clean_split_text.extend(parts)\n else:\n clean_split_text.append(p)\n return clean_split_text\n\n\ndef strip_markdown(text: str) -> str:\n \"\"\"\n Strip out markdown from the given text snippet.\n \"\"\"\n html = markdown(text)\n soup = BeautifulSoup(html, 'html.parser')\n return soup.get_text()\n\ndef build_polly_client():\n \"\"\"\n Build a Polly client.\n \"\"\"\n return boto3.client('polly')\n\ndef generate_temp_id():\n \"\"\"\n Generate a temporary ID for a file.\n \"\"\"\n return os.urandom(16).hex()\n\ndef get_and_prep_for_polly(url: str, remove_html=True, remove_markdown=True):\n \"\"\"Take a url; prep a dict containing a list of polly-suitable texts.\"\"\"\n try:\n raw_doc = get_article_doc(url)\n clean_doc = convert_doc_to_dict(raw_doc, strip_html=remove_html)\n title = clean_doc['title']\n text = clean_doc['text']\n if remove_markdown:\n paragraphs = [strip_markdown(p) for p in split_article_text(text)]\n else:\n paragraphs = split_article_text(text)\n paragraphs = [p.strip().replace('>','').replace(' ',' ') for p in paragraphs if len(p.strip()) > 0]\n # add the title to the first paragraph\n if len(title.strip()) > 0:\n paragraphs.insert(0, title.strip())\n return {\n \"title\": title,\n \"text\": paragraphs,\n \"url\": url,\n \"id\": generate_temp_id()\n }\n except Exception as e:\n print(\"Error: {}\".format(e))\n return None\n\ndef send_and_save(text:str, polly_client, outdir, id, n, voice=\"Matthew\"):\n \"\"\"\n Send a short string to a polly client, then save the resulting file.\n \"\"\"\n response = polly_client.synthesize_speech(\n OutputFormat='mp3',\n Text=text,\n VoiceId=voice,\n TextType='text',\n Engine='neural'\n )\n if 'AudioStream' in response:\n with open(\"{}/{}-{}.mp3\".format(outdir, id, str(n).zfill(6)), 'wb') as f:\n f.write(response['AudioStream'].read())\n\ndef list_temp_audio_files(outdir, id):\n \"\"\"\n Find all the temporary audio files for a given id; return a list in order.\n \"\"\"\n files = [os.path.join(outdir, f) for f in os.listdir(outdir) if id in f]\n files.sort()\n return files\n\ndef generate_combined_audio_outpath(outdir, id):\n \"\"\"\n Generate the output path for the combined audio file.\n \"\"\"\n return os.path.join(outdir, \"{}.mp3\".format(id))\n\ndef join_audio_files(files, outdir, id, buffer_silence=1000):\n \"\"\"\n Join all the audio files into a single file.\n \"\"\"\n audio_files = [pydub.AudioSegment.from_mp3(f) for f in files]\n silence = pydub.AudioSegment.silent(duration=buffer_silence)\n joined_audio = pydub.AudioSegment.empty()\n for i, audio in enumerate(audio_files):\n joined_audio += audio\n joined_audio += silence\n combined_outpath = generate_combined_audio_outpath(outdir, id)\n joined_audio.export(combined_outpath, format=\"mp3\")\n if not os.path.exists(combined_outpath):\n print(\"Attempt to saved combined audio to {} failed for an unknown reason!\".format(combined_outpath))\n\n\ndef build_final_outpath(outdir, document_dict):\n \"\"\"\n Build the final output path for the given document.\n \"\"\"\n if 'title' in document_dict.keys() and len(document_dict['title']) > 0:\n final_name = \"{}.mp3\".format(document_dict['title'].strip())\n final_outpath = os.path.join(outdir, final_name)\n return final_outpath\n else:\n return None\n\ndef rename_combined_file(temp_outdir, id, final_outdir, document_dict):\n \"\"\"\n Rename the combined file to the final name.\n \"\"\"\n combined_outpath = generate_combined_audio_outpath(temp_outdir, id)\n final_outpath = build_final_outpath(final_outdir, document_dict)\n if final_outpath is not None:\n os.rename(src=combined_outpath, dst=final_outpath)\n else:\n final_name = \"{}.mp3\".format(id)\n final_outpath = os.path.join(final_outdir, final_name)\n os.rename(src=combined_outpath, dst=final_outpath)\n\ndef make_audible(url, temp_dir=\"/Users/g/Desktop\", final_dir=\"/Users/g/Desktop\", remove_html=True, remove_markdown=True, buffer_silence=1000, voice=\"Matthew\"):\n \"\"\"\n Take a url and generate an audio file from it.\n \"\"\"\n document = get_and_prep_for_polly(url)\n if document is not None:\n final_outpath = build_final_outpath(final_dir, document) # use to check if it already exists!\n if final_outpath is not None and os.path.exists(final_outpath):\n print(\"{} already exists! Skipping.\".format(final_outpath))\n return None\n else:\n client = build_polly_client()\n for n, text in enumerate(document['text']):\n send_and_save(text, client, temp_dir, document['id'], n, voice=voice)\n temp_files = list_temp_audio_files(temp_dir, document['id'])\n join_audio_files(temp_files, temp_dir, document['id'])\n rename_combined_file(temp_dir, document['id'], final_dir, document)\n # remove temp files\n for f in temp_files:\n os.remove(f)\n\ndef get_args(sysargs = sys.argv[1:]):\n \"\"\"\n Get the command line arguments.\n \"\"\"\n parser = argparse.ArgumentParser(description=\"Make an audio file from a given URL.\")\n parser.add_argument(\"url\", help=\"The URL to make an audio file from.\")\n parser.add_argument(\"--temp_dir\", help=\"The temporary directory to save the in-process audio files.\", default=\"/Users/g/Desktop\")\n parser.add_argument(\"--final_dir\", help=\"The directory to save the final audio file.\", default=\"/Users/g/Desktop\")\n parser.add_argument(\"--remove_html\", help=\"Remove HTML from the text.\", action=\"store_true\", default=True)\n parser.add_argument(\"--remove_markdown\", help=\"Remove markdown from the text.\", action=\"store_true\", default=True)\n parser.add_argument(\"--buffer_silence\", help=\"The amount of silence to add between each audio file.\", default=1000, type=int)\n parser.add_argument(\"--voice\", help=\"The Polly voice to use.\", default=\"Matthew\")\n return parser.parse_args(sysargs)\n\ndef main():\n \"\"\"\n Get the arguments and make an audio file.\n \"\"\"\n args = get_args(sys.argv[1:])\n if args.url is not None and args.url != \"\":\n # make sure the link doesn't have a doi or isbn\n doi = extract_doi_from_string(args.url)\n isbn = extract_isbn_from_string(args.url)\n if doi is None and isbn is None and not str(args.url).endswith('.pdf'):\n make_audible(args.url,\n args.temp_dir,\n args.final_dir,\n args.remove_html,\n args.remove_markdown,\n args.buffer_silence,\n args.voice)\n\n\nif __name__ == \"__main__\":\n main()","sub_path":"scripts/as_audio.py","file_name":"as_audio.py","file_ext":"py","file_size_in_byte":9600,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"501175939","text":"import numpy as np\n\ndef eval_perplexity(model, corpus, batch_size=10, time_size=35):\n print('=== Evaluating perplexity... ===')\n corpus_size = len(corpus)\n total_loss, loss_cnt = 0, 0\n max_iters = (corpus_size - 1) // (batch_size * time_size)\n jump = (corpus_size - 1) // batch_size\n\n for iters in range(max_iters):\n xs = np.zeros((batch_size, time_size), dtype=np.int32)\n ts = np.zeros((batch_size, time_size), dtype=np.int32)\n time_offset = iters * time_size\n offsets = [time_offset + (i * jump) for i in range(batch_size)]\n for t in range(time_size):\n for i, offset in enumerate(offsets):\n xs[i, t] = corpus[(offset + t) % corpus_size]\n ts[i, t] = corpus[(offset + t + 1) % corpus_size]\n try:\n loss = model.forward(xs, ts, train_flag=False)\n except TypeError:\n loss = model.forward(xs, ts)\n total_loss += loss\n sys.stdout.write('\\r%d / %d' % (iters, max_iters))\n sys.stdout.flush()\n print('')\n ppl = np.exp(total_loss / max_iters)\n return ppl\n","sub_path":"src/concerns/eval_perplexity.py","file_name":"eval_perplexity.py","file_ext":"py","file_size_in_byte":1106,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"563563308","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.neighbors import NearestNeighbors\n\nfrom iccpy.gadget import load_snapshot\nfrom iccpy.gadget.labels import cecilia_labels\nfrom iccpy.gadget.subfind import SubfindCatalogue\nfrom iccpy.utils import match\n\nimport time, pickle\n\n\n################################################################################################\n################################################################################################\n\n\ndef M_vir_crit(snap, subh, layers, rmax):\n \"\"\"\n Returns M_vir, Mstar_vir, r_200_crit\n \"\"\"\n hubble0 = snap.header.hubble0[0]\n ind_gas = np.concatenate([match(subh.ids, snap['ID '][0]), match(subh.ids + 2**31, snap['ID '][0])])\n ind_gas = ind_gas[ind_gas != -1]\n ind_stars = match(subh.ids, snap['ID '][4])\n ind_stars = ind_stars[ind_stars != -1]\n ind_DM = match(subh.ids, snap['ID '][1])\n ind_DM = ind_DM[ind_DM != -1]\n\n CM = subh.pot_min\n\n pos = np.concatenate((snap['POS '][0][ind_gas] - CM, snap['POS '][4][ind_stars] - CM, snap['POS '][1][ind_DM] - CM))\n pos = pos / hubble0 * 1000\n r = np.linalg.norm(pos, axis=1)\n\n\n masses = np.concatenate((snap['MASS'][0][ind_gas], snap['MASS'][4][ind_stars], snap['MASS'][1][ind_DM])) * 1e10 / hubble0\n\n # We make a mass histogram with radial bins:\n mass, radius = np.histogram(r, bins=layers, range=(0, rmax), weights=masses)\n\n inner_mass = np.cumsum(mass)\n rho = inner_mass / (4/3 * np.pi * radius[1:]**3)\n rho_crit = 126.7 # solar masses per kpc^3, from Planck\n\n ind_200 = (np.abs(rho - 200*rho_crit)).argmin() # This gives the index of the bin where rho is closest to 200*rho_crit\n r_200 = radius[ind_200]\n print('r_200 is {}'.format(r_200))\n M_vir = np.sum(masses[r < r_200])\n\n pos = (snap['POS '][4][ind_stars] - CM) / hubble0 * 1000\n r = np.linalg.norm(pos, axis=1)\n\n star_mass = snap['MASS'][4][ind_stars] * 1e10 / hubble0\n Mstar_vir = np.sum(star_mass[r < r_200])\n\n return M_vir, Mstar_vir, r_200\n\n\n################################################################################################\n################################################################################################\n\n\ndef get_inner_inds(snap, subh, component):\n \"\"\"\n Finds the indexes of the particles of\n a given component which lie within r_200 / 10 from the subhalo CM.\n This will return the indexes for the particles of the given type so one can\n then use them with snap['XXXX'][component][indexes]\n \"\"\"\n CM = subh.pot_min\n hubble0 = snap.header.hubble0[0]\n pos = (snap['POS '][component] - CM) * 1000 / hubble0\n r = np.linalg.norm(pos, axis=1)\n r_200 = M_vir_crit(snap, subh, 5000, 300)[2]\n ind = r < r_200 / 10\n # r_half = .05\n print('r_200 is {} kpc'.format(r_200))\n \n ind = np.nonzero(ind)[0]\n\n return ind\n\n\n################################################################################################\n################################################################################################\n\n\ndef most_massives(cat, n):\n \"\"\"\n This gets the n most massive subhalos in the catalogue\n \"\"\"\n import heapq\n n_most_massive = []\n masses = []\n for subh in cat.subhalo[:]:\n masses.append(subh.mass)\n thresh = np.min(heapq.nlargest(n, masses))\n for subh in cat.subhalo[:]:\n if subh.mass >= thresh:\n n_most_massive.append(subh)\n\n return n_most_massive\n\n\n################################################################################################\n################################################################################################\n\n\ndef get_massive(snap, cat, M):\n \"\"\"\n Returns a list with subhalo objects with more mass than M (given in solar masses)\n TAKES TOO MUCH TIME TO RUN, BUT IF WE TAKE SUBFIND MASS IT'S TOO BIG\n MAYBE FILTER OUT LOW TOTAL MASSES BEFORE FILTERING BY VIRIAL MASS????\n \"\"\"\n hubble0 = snap.header.hubble0[0]\n massives = []\n i = 0\n for subh in cat.subhalo[:]:\n \tif subh.mass > M * hubble0 / 1e10:\n \t\tmassives.append(subh)\n \ti += 1\n return massives\n\n\n################################################################################################\n################################################################################################\n\n\ndef PCA_matrix(snap, subh):\n CM = subh.pot_min\n # ind = match(subh.ids, snap['ID '][4])\n # ind = ind[ind != -1]\n\n ind = get_inner_inds(snap, subh, 4)\n print('{} stars for PCA'.format(np.size(ind)))\n pos = (snap['POS '][4][ind] - CM)\n # We calculate covariance matrix and diagonalize it. The eigenvectors are the galaxy's principal axes\n covMatrix = np.cov(np.transpose(pos))\n eigenval, eigenvect = np.linalg.eig(covMatrix)\n\n # eigenvalues are not ordered; we make it so rot_matrix has eigenvectors as columns ordered from highest eigenvalue to lowest:\n eig1 = eigenval.argmax()\n eig3 = eigenval.argmin()\n eig2 = 3 - eig1 - eig3\n\n rot_matrix = np.array([eigenvect[:, eig1], eigenvect[:, eig2], eigenvect[:, eig3]])\n rot_matrix = np.transpose(rot_matrix)\n\n # Now we check if the total angular momentum is antiparallel to z; if it is we flip the galaxy\n vel = snap['VEL '][4][ind]\n V_cm = Vcm(snap, subh)\n vel = vel - V_cm\n vel = np.dot(vel, rot_matrix)\n pos = np.dot(pos, rot_matrix)\n\n pos_x = pos[:, 0]\n pos_y = pos[:, 1]\n vel_x = vel[:, 0]\n vel_y = vel[:, 1]\n\n jz = pos_x * vel_y - pos_y * vel_x\n\n if np.sum(jz) < 0:\n # We invert first and last row (x and z) from the rot_matrix which is equivalent to rotating around the y axis\n rot_matrix[:, 0] = - rot_matrix[:, 0]\n rot_matrix[:, 2] = - rot_matrix[:, 2]\n\n return rot_matrix\n\n\n################################################################################################\n################################################################################################\n\n\ndef Vcm(snap, subh):\n \"\"\"\n Computes the Vcm using only star particles\n \"\"\"\n ind = match(subh.ids, snap['ID '][4])\n ind = ind[ind != -1]\n vel = snap['VEL '][4][ind]\n masses = snap['MASS'][4][ind]\n masses_reshaped = np.transpose(np.array([masses, masses, masses]))\n\n V_cm = np.sum(vel * masses_reshaped, axis=0) / np.sum(masses)\n\n return V_cm\n\n\n################################################################################################\n################################################################################################\n\n\ndef grid_maker(snap, subh, quantity, component, axis1, axis2, length, res, use_subf_ids):\n \"\"\"\n Returns a res*res 2darray with the projected quantity (e.g. 'MASS') for the\n desired component (0 for gas, 1 for DM, 4 for stars)\n subfind: True if the subfind can be trusted, False if we want to use\n get_subh_from_CM to get all the particles inside r_200 / 10\n \"\"\"\n hubble0 = snap.header.hubble0[0]\n CM = subh.pot_min\n\n if use_subf_ids:\n if component == 0:\n \tind = np.concatenate([match(subh.ids, snap['ID '][0]), match(subh.ids + 2**31, snap['ID '][0])])\n else:\n \tind = match(subh.ids, snap['ID '][component])\n ind = ind[ind != -1]\n positions = (snap['POS '][component][ind] - CM) / hubble0\n else:\n positions = (snap['POS '][component] - CM) / hubble0\n ind = list(abs(positions[:, 0]) < length/2) and list(abs(positions[:, 1]) < length/2) and list(abs(positions[:, 2]) < length/2)\n positions = positions[ind]\n\n # We rotate the positions so that the galactic angular momentum is parallel to the z axis:\n rot_matrix = PCA_matrix(snap, subh)\n positions = np.dot(positions, rot_matrix)\n\n pos_1 = positions[:, axis1]\n pos_2 = positions[:, axis2]\n # axis3 = 3 - axis2 - axis1\n # pos_3 = (snap['POS '][component][index] - CM)[axis3] * 1000 / hubble0\n magnitude = snap[quantity][component][ind] * 1e10 / hubble0 # cambio de unidades para masa\n\n # # Here we smooth the mass distribution, averaging with 32 nearest neighbors:\n # nbrs = NearestNeighbors(n_neighbors=32, algorithm='auto').fit(positions)\n # indices = nbrs.kneighbors(positions)[1]\n # mag_smoothed = []\n #\n # for i in range(np.size(magnitude)):\n # # print(indices[i])\n # mag = np.sum(magnitude[indices[i]]) / 32\n # mag_smoothed.append(mag)\n\n # hist = np.histogram2d(pos_1, pos_2, bins=res, range=[[-length/2, length/2], [-length/2, length/2]], weights=mag_smoothed)\n hist = np.histogram2d(pos_1, pos_2, bins=res, range=[[-length/2, length/2], [-length/2, length/2]], weights=magnitude)\n\n return hist[0]\n\n\n################################################################################################\n################################################################################################\n\n\ndef grid_maker_SPH(snap, subh, quantity, component, axis1, axis2, length, res, use_subf_ids):\n \"\"\"\n Returns a res*res 2darray with the projected quantity (e.g. 'MASS') for the\n desired component (0 for gas, 1 for DM, 4 for stars)\n subfind: True if the subfind can be trusted, False if we want to use\n get_subh_from_CM to get all the particles inside r_200 / 10\n \"\"\"\n hubble0 = snap.header.hubble0[0]\n CM = subh.pot_min\n\n if use_subf_ids:\n if component == 0:\n \tind = np.concatenate([match(subh.ids, snap['ID '][0]), match(subh.ids + 2**31, snap['ID '][0])])\n else:\n \tind = match(subh.ids, snap['ID '][component])\n ind = ind[ind != -1]\n positions = (snap['POS '][component][ind] - CM) / hubble0\n else:\n positions = (snap['POS '][component] - CM) / hubble0\n\n # We rotate the positions so that the galactic angular momentum is parallel to the z axis:\n rot_matrix = PCA_matrix(snap, subh)\n positions = np.dot(positions, rot_matrix)\n\n # We exclude all the particles laying outside the box post-rotation:\n ind = np.all([[abs(positions[:, 0]) < length/2], [abs(positions[:, 1]) < length/2], [abs(positions[:, 2]) < length/2]], axis=0)[0]\n positions = positions[ind]\n print('{} particles inside the grid'.format(np.sum(ind)))\n # We take HSML from snap for gas and we calculate the mean distance of the 32 nearest neighbors for other components:\n if component == 0:\n hsml = snap['HSML'][0][ind] / hubble0\n else:\n hsml = []\n nbrs = NearestNeighbors(n_neighbors=32, algorithm='auto').fit(positions)\n distances = nbrs.kneighbors(positions)[0]\n for d in distances:\n hsml.append(d.mean())\n hsml = np.array(hsml)\n\n magnitude = snap[quantity][component][ind] * 1e10 / hubble0 # cambio de unidades para masa\n\n grid3d = np.zeros((res, res, res))\n # Here we write the hsml and positions in grid units:\n h_grid = (2 * hsml * res / length).astype(int)\n # print(np.min(h_grid))\n # print(np.max(h_grid))\n\n pos_grid = (positions * res / length + res / 2).astype(int)\n\n # We depickle the kernels previously computed:\n pickle_in = open('kernel_list', 'rb')\n kernels = pickle.load(pickle_in)\n pickle_in.close()\n\n def addAtPos(mat1, mat2, pos):\n \"\"\"\n Add two 3-arrays of different sizes in place, offset by xyz coordinates\n Usage:\n - mat1: base matrix\n - mat2: add this matrix to mat1\n - pos: [x,y,z] containing coordinates\n \"\"\"\n x, y, z = pos[0], pos[1], pos[2]\n x1, y1, z1 = mat1.shape\n if np.size(mat2) == 1:\n mat1[x, y, z] += mat2\n else:\n x2, y2, z2 = mat2.shape\n\n # get slice ranges for matrix1\n x1min = max(0, x)\n y1min = max(0, y)\n z1min = max(0, z)\n x1max = max(min(x + x2, x1), 0)\n y1max = max(min(y + y2, y1), 0)\n z1max = max(min(z + z2, z1), 0)\n\n # get slice ranges for matrix2\n x2min = max(0, -x)\n y2min = max(0, -y)\n z2min = max(0, -z)\n x2max = min(-x + x1, x2)\n y2max = min(-y + y1, y2)\n z2max = min(-z + z1, z2)\n\n mat1[x1min:x1max, y1min:y1max, z1min:z1max] += mat2[x2min:x2max, y2min:y2max, z2min:z2max]\n return mat1\n\n l = 0\n for pos, h, mag in zip(pos_grid, h_grid, magnitude):\n l += 1\n # if l%(int(np.size(hsml)/10)) == 0:\n # print('Currently {}0%'.format(int(l//(np.size(hsml)/10))))\n # We just add the contribution of the particle:\n if h < res:\n # If hsml exceeds the maximum kernel available, we take the maximum kernel instead:\n kernel = kernels[min(h, np.size(kernels) - 1)]\n # If h = 0 we just add a point to the grid:\n grid3d += addAtPos(np.zeros((res, res, res)), mag * kernel, pos - 2*h + 1)\n\n axis3 = 3 - axis2 - axis1\n grid = np.sum(grid3d, axis=axis3)\n grid = np.transpose(grid)\n print('Finished!!!!!!!!!!!!!')\n return grid\n\n\n################################################################################################\n################################################################################################\n\n\ndef V_i_grid(snap, subh, component, axis1, axis2, length, res, i):\n \"\"\"\n Returns a res*res 2darray with the mean velocity in the i direction (0 is x, 1 is y and 2 is z) for the desired matter component (0 for gas, 1 for DM, 4 for stars)\n \"\"\"\n hubble0 = snap.header.hubble0[0]\n if component == 0:\n \tind = np.concatenate([match(subh.ids, snap['ID '][0]), match(subh.ids + 2**31, snap['ID '][0])])\n else:\n \tind = match(subh.ids, snap['ID '][component])\n ind = ind[ind != -1]\n\n CM = subh.pot_min\n\n # We rotate the positions so that the galactic angular momentum is parallel to the z axis:\n positions = (snap['POS '][component][ind] - CM) / hubble0\n\n rot_matrix = PCA_matrix(snap, subh)\n positions = np.dot(positions, rot_matrix)\n\n pos_1 = positions[:, axis1]\n pos_2 = positions[:, axis2]\n # We rotate the velocities:\n vel = snap['VEL '][component][ind] - Vcm(snap, subh)\n vel = np.dot(vel, rot_matrix)\n # and take the i-th component:\n v_i = vel[:, i]\n hist = np.histogram2d(pos_1, pos_2, bins=res, range=[[-length/2, length/2], [-length/2, length/2]], weights=v_i)\n return hist[0]\n","sub_path":"functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":14317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"10471611","text":"from collections import deque\nimport numpy as np\nimport random\n\nLAYERS = 4\nINSANE_MUTATIONS = False\nCELL_FUNCTIONS = [\n lambda x: x,\n lambda x: (x*2)**3,\n lambda x: 1/(10*x+1) if x >= 0 else 1/(10*x-1),\n]\n\nclass Neuron(object):\n def __init__(self):\n self.name = \"\"\n self.signal = 0\n# self.factor = random.random() * 10 - 5\n self.factor = 1\n self.rating = 1\n\n self.function = random.choice(CELL_FUNCTIONS)\n\n def set_inputs(self, inputs):\n # should be a tuple of (connection-strength, neuron, signal) items\n self.inputs = inputs\n\n def copy(self):\n result = Neuron()\n result.signal = self.signal\n result.rating = self.rating\n result.factor = self.factor\n\n def calculate(self):\n cumulative = 0\n count = 0\n for strength, neuron, signal in self.inputs:\n if signal and strength:\n cumulative += signal * strength\n count += 1\n if count:\n self.signal = cumulative / count * self.factor\n else:\n self.signal = 0\n\n def connect(self, neuron, strength):\n neuron.inputs.add((self, strength))\n self.outputs.add(neuron)\n\n def __repr__(self):\n return \"<%s signal=%f>\" % (self.name, self.signal)\n\n\nclass Sensor(Neuron):\n def __init__(self, name, function):\n Neuron.__init__(self)\n self.name = name\n self.sensor_function = function\n\n def calculate(self):\n self.signal = self.sensor_function()\n\n def copy(self):\n raise NotImplemented()\n\n\nclass Actuator(Neuron):\n def __init__(self, name, function):\n Neuron.__init__(self)\n self.name = name\n self.actuator_function = function\n\n def calculate(self):\n Neuron.calculate(self)\n self.actuator_function(self.signal)\n\n def copy(self):\n raise NotImplemented()\n\n\nclass NeuronCluster(object):\n def __init__(self, count, sensors, actuators):\n self.currentNeuron = 0\n self.sensors = [Sensor(name, function) \\\n for name, function in sensors]\n self.actuators = [Actuator(name, function) \\\n for name, function in actuators]\n self.neurons = [Neuron() for i in range(count)]\n self.sinks = self.actuators + self.neurons\n self.sources = self.sensors + self.neurons\n\n xpad = len(self.sensors)\n ypad = len(self.actuators)\n\n self.width = len(self.sources)\n self.height = len(self.sinks)\n\n perLayer = count // LAYERS\n self.connections = np.zeros([self.height, self.width])\n\n # Connect neurons in layers\n for layer in range(LAYERS):\n yStep = perLayer\n yStart = ypad + perLayer * layer\n yEnd = yStart + yStep\n xStep = (xpad if layer == 0 else perLayer)\n xStart = 0 if layer == 0 else xpad + (layer - 1) * perLayer\n xEnd = xStart + xStep\n self.connections[yStart:yEnd, xStart:xEnd] = np.random.rand(yStep, xStep)\n self.connections[0:ypad, xpad+(LAYERS-1)*perLayer:xpad+LAYERS*perLayer] = np.random.rand(ypad, perLayer)\n\n for y in range(self.connections.shape[0]):\n for x in range(self.connections.shape[1]):\n if self.connections[y][x]:\n self.connections[y][x] = self.connections[y][x] * 2 - 1\n\n# print(\"\")\n# print(self.connections)\n\n #self.connections[0:ypad, 0:xpad] = np.zeros([ypad, xpad])\n #for i in range(len(self.neurons)):\n #self.connections[ypad + i][xpad + i] = 0\n\n # Set random factors\n# for i in range(len(self.neurons)):\n# self.connections[ypad + i][xpad + i] = random.random() * 2 - 1\n\n def copy(self, neuronCluster):\n self.connections = neuronCluster.connections.copy()\n for this, other in zip(self.neurons, neuronCluster.neurons):\n this.function = other.function\n\n def sense(self):\n for i, sensor in enumerate(self.sensors):\n sensor.calculate()\n\n def step(self):\n i = self.currentNeuron\n neuron = self.sinks[i]\n# for i, neuron in enumerate(self.sinks):\n# neuron.set_inputs(tuple(zip(self.connections[i], self.sources,\n# [neuron.signal for neuron in self.sources])))\n neuron.set_inputs(tuple(zip(self.connections[i], self.sources,\n [neuron.signal for neuron in self.sources])))\n\n# xpad = len(self.sensors)\n# ypad = len(self.actuators)\n\n# for i, neuron in enumerate(self.neurons):\n# neuron.factor = self.connections[ypad + i][xpad + i]\n\n neuron.calculate()\n# for neuron in self.sinks:\n# neuron.calculate()\n\n self.currentNeuron = (self.currentNeuron + 1) % len(self.sinks)\n\n def mutate(self):\n for _ in range(20):\n y = random.randint(0, self.connections.shape[0] - 1)\n x = random.randint(0, self.connections.shape[1] - 1)\n if INSANE_MUTATIONS or self.connections[y][x]:\n self.connections[y][x] = random.random()\n break\n","sub_path":"cell.py","file_name":"cell.py","file_ext":"py","file_size_in_byte":5163,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"386933329","text":"\"\"\"\nTesting for Extreme Learning Machine module (pyrcn.extreme_learning_machine)\n\"\"\"\nimport scipy\nimport numpy as np\n\nimport pytest\n\nfrom sklearn.datasets import load_iris, load_digits\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.model_selection import GridSearchCV\n\nfrom pyrcn.base import InputToNode\nfrom pyrcn.linear_model import IncrementalRegression\nfrom pyrcn.extreme_learning_machine import ELMClassifier, ELMRegressor\n\n\nX_iris, y_iris = load_iris(return_X_y=True)\n\n\ndef test_elm_regressor_jobs():\n print('\\ntest_elm_regressor_jobs():')\n X = np.linspace(0, 10, 2000)\n y = np.hstack((np.sin(X).reshape(-1, 1), np.cos(X).reshape(-1, 1)))\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=10, random_state=42)\n param_grid = {\n 'input_to_node': [\n [('default', InputToNode(bias_scaling=10., hidden_layer_size=20, random_state=42))],\n [('default', InputToNode(bias_scaling=10., hidden_layer_size=50, random_state=42))]],\n 'regressor': [\n IncrementalRegression(alpha=.0001),\n IncrementalRegression(alpha=.01)],\n 'random_state': [42]\n }\n elm = GridSearchCV(ELMRegressor(), param_grid)\n elm.fit(X_train.reshape(-1, 1), y_train, n_jobs=2)\n y_elm = elm.predict(X_test.reshape(-1, 1))\n print(\"tests - elm:\\n sin | cos \\n {0}\".format(y_test-y_elm))\n print(\"best_params_: \".format(elm.best_params_))\n print(\"best_score: \".format(elm.best_score_))\n np.testing.assert_allclose(y_test, y_elm, atol=1e-1)\n\n\ndef test_elm_regressor_chunk():\n print('\\ntest_elm_regressor_chunk():')\n X = np.linspace(0, 10, 2000)\n y = np.hstack((np.sin(X).reshape(-1, 1), np.cos(X).reshape(-1, 1)))\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=10, random_state=42)\n param_grid = {\n 'input_to_node__hidden_layer_size': [20, 50],\n 'input_to_node__input_scaling': [1.],\n 'input_to_node__bias_scaling': [10.],\n 'input_to_node__activation': ['tanh'],\n 'input_to_node__random_state': [42],\n 'chunk_size': [500],\n 'regressor__alpha': [1e-2, 1e-5],\n 'random_state': [42]\n }\n elm = GridSearchCV(ELMRegressor(), param_grid)\n elm.fit(X_train.reshape(-1, 1), y_train, n_jobs=2)\n y_elm = elm.predict(X_test.reshape(-1, 1))\n print(\"tests - elm:\\n sin | cos \\n {0}\".format(y_test-y_elm))\n print(\"best_params_: \".format(elm.best_params_))\n print(\"best_score: \".format(elm.best_score_))\n np.testing.assert_allclose(y_test, y_elm, atol=1e-1)\n\n\ndef test_iris_ensemble_iterative_regression():\n print('\\ntest_iris_ensemble_iterative_regression():')\n X_train, X_test, y_train, y_test = train_test_split(X_iris, y_iris, test_size=5, random_state=42)\n cls = ELMClassifier(\n input_to_node=[\n ('tanh', InputToNode(hidden_layer_size=10, random_state=42, activation='tanh')),\n ('bounded_relu', InputToNode(hidden_layer_size=10, random_state=42, activation='bounded_relu'))],\n regressor=IncrementalRegression(alpha=.01),\n random_state=42)\n\n for samples in np.split(np.arange(0, X_train.shape[0]), 5):\n cls.partial_fit(X_train[samples, :], y_train[samples], classes=np.arange(3, dtype=int))\n y_predicted = cls.predict(X_test)\n\n for record in range(len(y_test)):\n print('predicted: {0} \\ttrue: {1}'.format(y_predicted[record], y_test[record]))\n\n print('score: {0}'.format(cls.score(X_test, y_test)))\n print('proba: {0}'.format(cls.predict_proba(X_test)))\n print('log_proba: {0}'.format(cls.predict_log_proba(X_test)))\n assert cls.score(X_test, y_test) >= 4./5.\n\n\nif __name__ == \"__main__\":\n test_elm_regressor_jobs()\n test_elm_regressor_chunk()\n test_iris_ensemble_iterative_regression()\n","sub_path":"pyrcn/extreme_learning_machine/tests/test_elm.py","file_name":"test_elm.py","file_ext":"py","file_size_in_byte":3817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"581530034","text":"from EmergencyGame.models import Option, Role, Task\n\nr1 = Role(role_name=\"Room coordinator\")\nr1.save()\nr2 = Role(role_name=\"People's safety\")\nr2.save()\n\no1 = Option(option_text=\"Yes\")\no1.save()\no2 = Option(option_text=\"No\")\no2.save()\no3 = Option(option_text=\"Maybe\")\no3.save()\no4 = Option(option_text=\"Only if it is really dirty\")\no4.save()\no5 = Option(option_text=\"Run\")\no5.save()\no6 = Option(option_text=\"Walk\")\no6.save()\no7 = Option(option_text=\"Skipping\")\no7.save()\no8 = Option(option_text=\"The classroom next door \")\no8.save()\no9 = Option(option_text=\"Home\")\no9.save()\no10 = Option(option_text=\"To designated area for emergencies outside\")\no10.save()\no11 = Option(option_text=\"110\")\no11.save()\no12 = Option(option_text=\"112\")\no12.save()\no13 = Option(option_text=\"113\")\no13.save()\no14 = Option(option_text=\"Open\")\no14.save()\no15 = Option(option_text=\"Closed\")\no15.save()\no16 = Option(option_text=\"Open and go through\")\no16.save()\no17 = Option(option_text=\"Keep closed and find another exit\")\no17.save()\no18 = Option(option_text=\"Try to cool it down\")\no18.save()\no19 = Option(option_text=\"crawl out of the building\")\no19.save()\no20 = Option(option_text=\"Run to the nearest exit\")\no20.save()\no21 = Option(option_text=\"Hold your breath and keep walking\")\no21.save()\no22 = Option(option_text=\"..put books in alphabetical order\")\no22.save()\no23 = Option(option_text=\"..find a book to take home\")\no23.save()\no24 = Option(option_text=\"..just leave\")\no24.save()\no25 = Option(option_text=\"Only if really valuable\")\no25.save()\n\n\nt1 = Task(role=r1, description=\"Keep the windows open or closed?\", max_score=100, correct_option=o15)\nt1.save()\nt1.options.add(o14)\nt1.options.add(o15)\nt2 = Task(role=r1, description=\"Clean the board now?\", max_score=100, correct_option=o2)\nt2.save()\nt2.options.add(o1)\nt2.options.add(o2)\nt2.options.add(o4)\nt3 = Task(role=r2, description=\"In what manner should the children leave the room?\", max_score=100, correct_option=o6)\nt3.save()\nt3.options.add(o6)\nt3.options.add(o5)\nt3.options.add(o7)\nt4 = Task(role=r1, description=\"Do you take your things with you?\", max_score=100, correct_option=o2)\nt4.save()\nt4.options.add(o1)\nt4.options.add(o2)\nt4.options.add(o25)\nt5 = Task(role=r2, description=\"Where do you go when you leave the classroom?\", max_score=100, correct_option=o10)\nt5.save()\nt5.options.add(o8)\nt5.options.add(o9)\nt5.options.add(o10)\nt6 = Task(role=r1, description=\"Which number do you call when a fire occurs?\", max_score=100, correct_option=o11)\nt6.save()\nt6.options.add(o11)\nt6.options.add(o12)\nt6.options.add(o13)\nt7 = Task(role=r2, description=\"What do you do if you meet a closed door that is warm?\", max_score=100, correct_option=o17)\nt7.save()\nt7.options.add(o16)\nt7.options.add(o17)\nt7.options.add(o18)\nt8 = Task(role=r2, description=\"If there is smoke in the room, what do you do?\", max_score=100, correct_option=o19)\nt8.save()\nt8.options.add(o19)\nt8.options.add(o20)\nt8.options.add(o21)\nt9 = Task(role=r1, description=\"Finish your homework before reacting to the fire alarm?\", max_score=100, correct_option=o2)\nt9.save()\nt9.options.add(o1)\nt9.options.add(o2)\nt10 = Task(role=r1, description=\"Should you..?\", max_score=100, correct_option=o24)\nt10.save()\nt10.options.add(o22)\nt10.options.add(o23)\nt10.options.add(o24)\nt11 = Task(role=r2, description=\"Close the window?\", max_score=100, correct_option=o1)\nt11.save()\nt11.options.add(o1)\nt11.options.add(o2)\nt11.options.add(o16)\n","sub_path":"DjangoApp/populate.py","file_name":"populate.py","file_ext":"py","file_size_in_byte":3423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"651115972","text":"import time\nimport threading\nfrom queue import Queue\nimport re\nfrom bs4 import BeautifulSoup\nfrom urllib.request import urlopen\n# from sys import argv\n\nimport pyperclip\nimport gallery_get\n\n\ndef is_imagefap(url):\n if url.startswith(\"https://www.imagefap.com\"):\n print(\"Found url: %s\" % str(url))\n return True\n return False\n\n\nDOWNLOAD_QUEUE = Queue()\nEXTRACTION_QUEUE = Queue()\n\n\ndef sort_url(url):\n if bool(re.match(\"(?s)^https://www.imagefap.com/pictures/[0-9]+/.*\", url)):\n DOWNLOAD_QUEUE.put(url)\n if bool(re.match(\"(?s)^https://www.imagefap.com/(?:profile|organizer)/[0-9]+/.*\", url)):\n EXTRACTION_QUEUE.put(url)\n\n\nclass ClipboardWatcher(threading.Thread):\n def __init__(self, predicate, callback, pause=5.):\n super(ClipboardWatcher, self).__init__()\n self._predicate = predicate\n self._callback = callback\n self._pause = pause\n self._stopping = False\n\n def run(self):\n recent_value = \"\"\n\n while not self._stopping:\n tmp_value = pyperclip.paste()\n if tmp_value != recent_value:\n recent_value = tmp_value\n\n for url in recent_value.splitlines():\n if self._predicate(url):\n self._callback(url)\n time.sleep(self._pause)\n\n def stop(self):\n self._stopping = True\n\n\nclass LinkExtractor(threading.Thread):\n def __init__(self, pause=5.):\n super().__init__()\n self._stopping = False\n self._pause = pause\n\n @staticmethod\n def get_links(url):\n html_page = urlopen(url)\n soup = BeautifulSoup(html_page, features=\"lxml\")\n links = []\n\n for link in soup.findAll('a', attrs={'href': re.compile(\"^/gallery\")}):\n links.append(\"https://www.imagefap.com\" + link.get('href'))\n\n return links\n\n def run(self):\n while not self._stopping:\n for gallery_url in self.get_links(EXTRACTION_QUEUE.get()):\n DOWNLOAD_QUEUE.put(gallery_url)\n time.sleep(self._pause)\n\n def stop(self):\n self._stopping = True\n\n\nclass Downloader(threading.Thread):\n def __init__(self, location, pause=5.):\n super().__init__()\n self._stopping = False\n self._pause = pause\n self._location = location\n\n def run(self):\n while not self._stopping:\n try:\n gallery_url = DOWNLOAD_QUEUE.get()\n print(\"Downloading \" + gallery_url)\n gallery_get.run(gallery_url, self._location)\n except Exception as e:\n print(e)\n time.sleep(self._pause)\n\n def stop(self):\n self._stopping = True\n\n\ndef main():\n # download_location = argv[1]\n download_location = \"V:\\\\imagefap\\\\test\"\n\n downloader = Downloader(download_location, 5)\n extractor = LinkExtractor(5)\n watcher = ClipboardWatcher(\n is_imagefap,\n sort_url,\n 3.\n )\n\n downloader.start()\n extractor.start()\n watcher.start()\n\n while True:\n try:\n # print(\"Waiting for changed clipboard...\")\n time.sleep(3)\n except KeyboardInterrupt:\n watcher.stop()\n extractor.stop()\n downloader.stop()\n break\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"429134069","text":"import aiohttp\nimport discord\nfrom discord.ext import commands\n\nclass Bus(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n self.session = aiohttp.ClientSession(loop=self.bot.loop)\n\n def cog_unload(self):\n self.bot.loop.create_task(self.session.detach())\n\n async def httpget(self, url):\n async with self.session.get(url) as response:\n return await response.json()\n\n @commands.command(aliases=[\"bus\", \"stop\"])\n async def realtime(self, message, stopnumber: int):\n body = await self.httpget(\n f\"https://data.smartdublin.ie/cgi-bin/rtpi/realtimebusinformation?stopid={stopnumber}&format=json\"\n )\n error = body[\"errormessage\"]\n response = body[\"results\"]\n departure_data = []\n\n if not error:\n for bus in response:\n departure_data.append(f\"*{bus['route']}* to {bus['destination']} - in **{bus['duetime']}** min(s). \\n\")\n\n InfoEmbed = discord.Embed(\n colour=0x36393e,\n title=f\"Departues for stop: {stopnumber}\",\n description=\"\\n\".join(departure_data),\n )\n await message.send(embed=InfoEmbed)\n else:\n await message.send(error)\n\n\ndef setup(bot):\n bot.add_cog(Bus(bot))","sub_path":"commands/bus.py","file_name":"bus.py","file_ext":"py","file_size_in_byte":1300,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"364408221","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport numpy as np\nimport sys\nsys.path.append('../')\nfrom package import data as datagen\n\ndef load_train_images():\n (train_data, val_data, test_data) = datagen.get_data_web([6,7], 0, [8,8], 2, sample_size=1000)\n \n (train_images, train_labels) = train_data\n train_images = np.reshape(train_images, [1000, 1, 8, 8])\n train_images = torch.from_numpy(train_images).to(dtype=torch.float32)\n train_labels = torch.from_numpy(train_labels).to(dtype=torch.int64)\n \n (test_images, test_labels) = test_data\n test_images = np.reshape(test_images, [1000, 1, 8, 8])\n test_images = torch.from_numpy(test_images).to(dtype=torch.float32)\n test_labels = torch.from_numpy(test_labels).to(dtype=torch.int64)\n \n return (train_images, train_labels, test_images, test_labels)\n \nclass Net(nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n self.conv1 = nn.Conv2d(1, 20, kernel_size=2)\n self.bn1 = nn.BatchNorm2d(20)\n self.conv1_drop = nn.Dropout2d()\n self.conv2 = nn.Conv2d(20, 40, kernel_size=2)\n self.bn2 = nn.BatchNorm2d(40)\n self.conv2_drop = nn.Dropout2d()\n self.conv3 = nn.Conv2d(40, 80, kernel_size=2) \n self.bn3 = nn.BatchNorm2d(80)\n self.conv3_drop = nn.Dropout2d()\n self.fc1 = nn.Linear(320, 80)\n self.fc2 = nn.Linear(80, 10)\n self.fc3 = nn.Linear(10, 2)\n self.sigmoid = nn.Sigmoid()\n\n def forward(self, x): #[samp, 1, 8, 8]\n x = self.conv1(x) #[samp, 20, 7, 7]\n self.bn1(x)\n self.conv1_drop(x)\n x = self.conv2(x) #[samp, 40, 6, 6]\n self.bn2(x)\n self.conv2_drop(x)\n x = F.relu(F.max_pool2d(x, 2)) #[samp, 80, 3, 3]\n x = self.conv3(x) #[samp, 80, 2, 2]\n self.bn3(x)\n self.conv3_drop(x) #[samp, 80, 2, 2]\n x = F.relu(x)\n x = x.view(-1, 320) #[samp, 320]\n x = self.fc1(x) #[samp, 80]\n x = F.dropout(x, training=self.training)\n x = F.relu(self.fc2(x)) #[samp, 10]\n x = F.dropout(x, training=self.training)\n x = self.fc3(x) #[samp, 2]\n x = self.sigmoid(x)\n return x\n\ndef train(network, optimizer, train_images, train_labels):\n network.train()\n batch_idx = 0\n batch_iter_train = datagen.batch_generator(train_images, train_labels, 100)\n for (data, target) in batch_iter_train:\n batch_idx += 1\n optimizer.zero_grad()\n output = network(data)\n loss = ((output - target)**2).mean()\n loss.backward()\n optimizer.step()\n \ndef test(network, test_images, test_labels):\n network.eval()\n test_loss = 0\n correct = 0\n with torch.no_grad():\n batch_iter_test = datagen.batch_generator(test_images, test_labels, 1000)\n for data, target in batch_iter_test:\n output = network(data)\n test_loss += ((output - target)**2).mean()\n correct += get_accuracy(output, target)\n\n return correct\n\ndef get_accuracy(output, target):\n output_index = np.argmax(output, axis=1)\n target_index = np.argmax(target, axis=1)\n compare = output_index - target_index\n compare = compare.numpy()\n num_correct = float(np.sum(compare == 0))\n total = float(output_index.shape[0])\n accuracy = num_correct / total\n return accuracy\n\ndef main():\n (train_images, train_labels, test_images, test_labels) = load_train_images()\n network = Net()\n optimizer = optim.Adam(network.parameters())\n \n print('Test Accuracy: %.3f'%test(network, test_images, test_labels))\n for epoch in range(1, 101):\n print('Epoch: %s'%epoch)\n train(network, optimizer, train_images, train_labels)\n print('Test Accuracy: %.3f'%test(network, test_images, test_labels))\n \n torch.save(network.state_dict(), '../trained_models/samp1000_size8_dig67.pth')\n print('Model saved')\n \nif __name__ == \"__main__\":\n main()","sub_path":"package/train_cnn_size8.py","file_name":"train_cnn_size8.py","file_ext":"py","file_size_in_byte":3951,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"98628304","text":"from django.shortcuts import get_object_or_404\nfrom products.models import Product\n\n\ndef get_cart_items_and_total(cart):\n total = 0\n cart_items = []\n for product_id, quantity in cart.items():\n product = get_object_or_404(Product, pk=product_id)\n item_total = quantity * product.bruto_price\n total += item_total\n cart_items.append({'product':product, 'quantity': quantity, 'total': item_total}) \n \n \n return {'cart_items' : cart_items, 'total': total}","sub_path":"cart/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"260135602","text":"#\n# API ROUTES\n#\n\nfrom flask import redirect, jsonify, request\n\nimport storage\n\nfrom main import app\nfrom main import verify_login\n\n@app.route(\"/content/\")\n@app.route(\"/content/\")\ndef get_content(search=None):\n response = verify_login(request)\n if response:\n return response\n items = storage.get_notes(search)\n data = { \"data\": items }\n return jsonify(data)\n\n@app.route(\"/remove/\")\ndef get_remove(id):\n response = verify_login(request)\n if response:\n return response\n storage.delete_note(id)\n return redirect(\"/notes\")\n","sub_path":"api_routes.py","file_name":"api_routes.py","file_ext":"py","file_size_in_byte":576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"208759228","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"ROS2 Twist to Jetbot Move.\n\nThis script subscribes to \"/cmd_vel\" topic, reads Twist message, and moves the\nJetbot.\n\nRevision History:\n 2021-08-18 (Animesh): Baseline Software.\n\nExample:\n $ colcon build && source install/setup.bash && ros2 run ros2_twist_message_to_robot_motion execute\n $ source install/setup.bash && ros2 run ros2_twist_message_to_robot_motion execute\n $ ros2 run ros2_twist_message_to_robot_motion execute\n\n\"\"\"\n\n\n#___Import Modules:\n\nimport atexit\nfrom Adafruit_MotorHAT import Adafruit_MotorHAT\nimport traitlets\nfrom traitlets.config.configurable import Configurable, SingletonConfigurable\n\nimport rclpy\nfrom rclpy.node import Node\nfrom geometry_msgs.msg import Twist\n\n\n#___Global Variables:\nSUBSCRIBE_TOPIC = '/cmd_vel'\nXCAL = 0.50 #Calibration X\nZCAL = 0.25 #Calibration Z\n\n\n#__Classes\nclass Twist_to_Motion(Node):\n \"\"\"TWIST to Jetbot Move Class.\n \n This class contains all methods to read TWIST message and move the Jetbot. \n \n \"\"\"\n\n def __init__(self):\n \n super().__init__('twist_to_motion')\n \n # initialize robot\n self.robot = Robot()\n \n # initialize subscriber\n self.subscription = self.create_subscription(\n Twist,\n SUBSCRIBE_TOPIC,\n self.listener_callback,\n 10)\n self.subscription # prevent unused variable warning\n\n\n def listener_callback(self, msg):\n \n # parses data from subscribed topic message\n x = XCAL*float(msg.linear.x)\n z = ZCAL*float(msg.angular.z)\n \n # control robot movement\n # both wheel same state\n if z == 0:\n # total stop\n if x == 0:\n self.robot.stop()\n else:\n self.robot.set_motors(-x, -x)\n \n # one wheel moving\n if x == 0:\n # rotate right\n if z > 0:\n self.robot.set_motors(0, -z)\n # rotate left\n elif z < 0:\n self.robot.set_motors(-z, 0)\n \n # moving forward\n elif x > 0:\n # rotate right\n if z > 0:\n self.robot.set_motors(-x/2, -(x+z)/2)\n # rotate left\n elif z < 0:\n self.robot.set_motors(-(x-z)/2, -x/2)\n \n # moving backward\n elif x < 0:\n # rotate right\n if z > 0:\n self.robot.set_motors(-x/2, -(x-z)/2)\n # rotate left\n elif z < 0:\n self.robot.set_motors(-(x+z)/2, -x/2)\n\n\n\n# Jetbot Motor Driver Classes From NVIDIA-AI-IOT\n# Ref: https://github.com/NVIDIA-AI-IOT/jetbot/blob/master/jetbot/motor.py\nclass Motor(Configurable):\n\n value = traitlets.Float()\n \n # config\n alpha = traitlets.Float(default_value=1.0).tag(config=True)\n beta = traitlets.Float(default_value=0.0).tag(config=True)\n\n def __init__(self, driver, channel, *args, **kwargs):\n super(Motor, self).__init__(*args, **kwargs) # initializes traitlets\n\n self._driver = driver\n self._motor = self._driver.getMotor(channel)\n if(channel == 1):\n self._ina = 1\n self._inb = 0\n else:\n self._ina = 2\n self._inb = 3\n atexit.register(self._release)\n \n @traitlets.observe('value')\n def _observe_value(self, change):\n self._write_value(change['new'])\n\n def _write_value(self, value):\n \"\"\"Sets motor value between [-1, 1]\"\"\"\n mapped_value = int(255.0 * (self.alpha * value + self.beta))\n speed = min(max(abs(mapped_value), 0), 255)\n self._motor.setSpeed(speed)\n if mapped_value < 0:\n self._motor.run(Adafruit_MotorHAT.FORWARD)\n # The two lines below are required for the Waveshare JetBot Board only\n self._driver._pwm.setPWM(self._ina,0,0)\n self._driver._pwm.setPWM(self._inb,0,speed*16)\n else:\n self._motor.run(Adafruit_MotorHAT.BACKWARD)\n # The two lines below are required for the Waveshare JetBot Board only\n self._driver._pwm.setPWM(self._ina,0,speed*16)\n self._driver._pwm.setPWM(self._inb,0,0)\n\n def _release(self):\n \"\"\"Stops motor by releasing control\"\"\"\n self._motor.run(Adafruit_MotorHAT.RELEASE)\n # The two lines below are required for the Waveshare JetBot Board only\n self._driver._pwm.setPWM(self._ina,0,0)\n self._driver._pwm.setPWM(self._inb,0,0)\n\n\n# Jetbot Robot Driver Classes From NVIDIA-AI-IOT\n# Ref: https://github.com/NVIDIA-AI-IOT/jetbot/blob/master/jetbot/robot.py\nclass Robot(SingletonConfigurable):\n \n left_motor = traitlets.Instance(Motor)\n right_motor = traitlets.Instance(Motor)\n\n # config\n i2c_bus = traitlets.Integer(default_value=1).tag(config=True)\n left_motor_channel = traitlets.Integer(default_value=1).tag(config=True)\n left_motor_alpha = traitlets.Float(default_value=1.0).tag(config=True)\n right_motor_channel = traitlets.Integer(default_value=2).tag(config=True)\n right_motor_alpha = traitlets.Float(default_value=1.0).tag(config=True)\n \n def __init__(self, *args, **kwargs):\n super(Robot, self).__init__(*args, **kwargs)\n self.motor_driver = Adafruit_MotorHAT(i2c_bus=self.i2c_bus)\n self.left_motor = Motor(self.motor_driver, channel=self.left_motor_channel, alpha=self.left_motor_alpha)\n self.right_motor = Motor(self.motor_driver, channel=self.right_motor_channel, alpha=self.right_motor_alpha)\n \n def set_motors(self, left_speed, right_speed):\n self.left_motor.value = left_speed\n self.right_motor.value = right_speed\n \n def forward(self, speed=1.0, duration=None):\n self.left_motor.value = speed\n self.right_motor.value = speed\n\n def backward(self, speed=1.0):\n self.left_motor.value = -speed\n self.right_motor.value = -speed\n\n def left(self, speed=1.0):\n self.left_motor.value = -speed\n self.right_motor.value = speed\n\n def right(self, speed=1.0):\n self.left_motor.value = speed\n self.right_motor.value = -speed\n\n def stop(self):\n self.left_motor.value = 0\n self.right_motor.value = 0\n\n\n#___Main Method:\ndef main(args=None):\n rclpy.init(args=args)\n\n twist_to_motion = Twist_to_Motion()\n\n rclpy.spin(twist_to_motion)\n\n # Destroy the node explicitly\n # (optional - otherwise it will be done automatically\n # when the garbage collector destroys the node object)\n twist_to_motion.destroy_node()\n rclpy.shutdown()\n\n\n#___Driver Program:\nif __name__ == '__main__':\n main()\n\n\n# \n# end of file\n\"\"\"ANI717\"\"\"\n","sub_path":"robot_ws/src/ros2-twist-message-to-robot-motion/ros2_twist_message_to_robot_motion/jetbot_motion.py","file_name":"jetbot_motion.py","file_ext":"py","file_size_in_byte":6825,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"636497890","text":"import pyperclip\nimport unittest\nfrom contact import Contact\n\n\nclass TestContact(unittest.TestCase):\n\n '''\n Test class that defines test cases for the contact class behaviours\n\n Args:\n unittest.TestCase: Testcase class that helps in creating test cases\n\n '''\n\n def setUp(self):\n '''\n Set up method to run before each test cases.\n '''\n self.new_contact = Contact(\"Irene\", \"Adler\", \"98378973\", \"iadler@gmail.com\")\n\n def test_init(self):\n '''\n test_init test case to test if the object is initialized properly\n '''\n\n self.assertEqual(self.new_contact.first_name, \"Irene\")\n self.assertEqual(self.new_contact.last_name, \"Adler\")\n self.assertEqual(self.new_contact.phone_number, \"98378973\")\n self.assertEqual(self.new_contact.email, \"iadler@gmail.com\")\n\n def test_save_contact(self):\n '''\n test_save_contact test case to test if the contact object is saved into\n the contact list\n '''\n self.new_contact.save_contact()\n self.assertEqual(len(Contact.contact_list), 1)\n\n def tearDown(self):\n '''\n tearDown method that does cleanup after each test case has run.\n '''\n Contact.contact_list = []\n\n def test_save_multiple_contact(self):\n '''\n test_save_multiple to check if we can save multiple contact\n objects to our contact_list\n '''\n self.new_contact.save_contact()\n test_contact = Contact(\n \"Irene\", \"Adler\", \"98378973\", \"iadler@gmail.com\")\n test_contact.save_contact()\n self.assertEqual(len(Contact.contact_list), 2)\n\n def test_delete_contact(self):\n '''\n test_delete_contact to test if we can remove a contact from our contact list\n '''\n self.new_contact.save_contact()\n test_contact = Contact(\n \"Irene\", \"Adler\", \"98378973\", \"iadler@gmail.com\")\n test_contact.save_contact()\n\n self.new_contact.delete_contact()\n\n self.assertEqual(len(Contact.contact_list), 1)\n\n def test_find_contact_by_number(self):\n '''\n test to check if we can find a contact by phone number and display information\n '''\n\n self.new_contact.save_contact()\n test_contact = Contact(\n \"Irene\", \"Adler\", \"98378973\", \"iadler@gmail.com\")\n test_contact.save_contact()\n\n found_contact = Contact.find_by_number(\"98378973\")\n\n self.assertEqual(found_contact.email, test_contact.email)\n\n def test_contact_exists(self):\n '''\n test to check if we can return a Boolean if we cannot find the contact.\n '''\n\n self.new_contact.save_contact()\n test_contact = Contact(\n \"Irene\", \"Adler\", \"98378973\", \"iadler@gmail.com\")\n test_contact.save_contact()\n\n contact_exists = Contact.contact_exists(\"98378973\")\n\n self.assertTrue(contact_exists)\n\n def test_display_all_contacts(self):\n '''\n method that returns a list of all contacts saved\n '''\n\n self.assertEqual(Contact.display_contacts(), Contact.contact_list)\n\n def test_copy_email(self):\n '''\n Test to confirm thet we are copying the email address from a found contact\n '''\n\n self.new_contact.save_contact()\n Contact.copy_email(\"98378973\")\n\n self.assertEqual(self.new_contact.email,pyperclip.paste())\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"contact_test.py","file_name":"contact_test.py","file_ext":"py","file_size_in_byte":3473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"555256301","text":"import numpy as np\nimport pandas as pd\n\nfrom scipy.stats import f_oneway\n\nfrom tqdm import tqdm\n\nclass MABFramework(object):\n available_strategies = ['static-one-fits-all', 'dynamic-one-fits-all','contextual-one-fits-one']\n \n def __init__(self,strategy,n_actions,\n rstate=42,\n static_min_steps = 1,\n alphas=[],betas=[],\n modelling_approach=None,\n modelling_approach_pms=None\n ):\n if n_actions <= 0:\n raise ValueError('Invalid number of actions, it should be a positive number')\n elif strategy not in MABFramework.available_strategies:\n raise ValueError('Unrecognised MAB strategy, available strategies are {}'.format(MABFramework.available_strategies))\n elif (strategy=='dynamic-one-fits-all') and ((n_actions != len(alphas)) or (n_actions != len(betas))):\n raise ValueError('Cannot run a dynamic strategy without specified priors')\n elif (strategy=='contextual-one-fits-all') and ((modelling_approach is None) or (modelling_approach_pms is None)):\n raise ValueError('Cannot run a contextual strategy if a modelling approach and parameters are not provided') \n\n \n self.strategy = strategy\n self.n_actions = n_actions\n \n if self.strategy == 'static-one-fits-all':\n self.static_status = None\n self.static_min_steps = static_min_steps\n elif self.strategy == 'dynamic-one-fits-all':\n self.alphas = alphas\n self.betas = betas\n self.thompson_pms = pd.DataFrame(columns=['alphas','betas'])\n else: \n self.predictive_units = [modelling_approach(**modelling_approach_pms)]*self.n_actions\n \n self.current_data = pd.DataFrame()\n np.random.seed(rstate)\n \n def append_data(self, new_data_batch):\n \n if not len(self.current_data):\n self.current_data = pd.concat([self.current_data,new_data_batch])\n else:\n column_check = self.current_data.columns.intersection(new_data_batch.columns)\n if not len(column_check):\n raise ValueError('The new data batch has not the same column names as current data, stopping experiment')\n else:\n self.current_data = pd.concat([self.current_data,new_data_batch])\n \n def observe_rewards(self, new_data_batch, reward_columns):\n \n nrows=len(new_data_batch)\n new_data_batch['action_code'] = self.best_actions\n self.append_data(new_data_batch.drop(columns = reward_columns))\n \n def warm_up(self,incoming_data_batch):\n if self.strategy == 'static-one-fits-all':\n self.best_actions = np.random.choice(range(self.n_actions),size=len(incoming_data_batch))\n elif self.strategy == 'dynamic-one-fits-all':\n arms_average_rewards = np.random.beta(self.alphas,self.betas,[len(incoming_data_batch),self.n_actions])\n self.best_actions = np.argmax(arms_average_rewards,axis=1).tolist()\n elif self.strategy == 'contextual-one-fits-one':\n self.best_actions = np.random.choice(range(self.n_actions),size=len(incoming_data_batch))\n \n def apply_decision_policy(self, incoming_data_batch,step):\n \n if not(len(self.current_data)):\n self.warm_up(incoming_data_batch)\n else:\n if self.strategy == 'static-one-fits-all':\n if self.static_status != 'converged':\n self.static_one_fits_all('action_code','reward',incoming_data_batch,step)\n elif self.strategy == 'dynamic-one-fits-all':\n self.dynamic_one_fits_all('action_code','reward',incoming_data_batch,step)\n elif self.strategy == 'contextual-one-fits-one':\n self.contextual_one_fits_one('action_code','reward',incoming_data_batch,step)\n \n def static_one_fits_all(self, actions_column, reward_column, incoming_data_batch, step):\n \n n_choices = len(incoming_data_batch)\n \n grouped_dataset = self.current_data.groupby(by=[actions_column])[reward_column].agg([('n_trials','count'),('p','mean')])\n grouped_dataset['std_err'] = np.sqrt(grouped_dataset['p']*(1-grouped_dataset['p']))/np.sqrt(grouped_dataset['n_trials'])\n \n list_of_samples = []\n for idx in grouped_dataset.index:\n list_of_samples.append(np.random.normal(loc=grouped_dataset.loc[idx,'p'],scale=grouped_dataset.loc[idx,'std_err'],size=grouped_dataset.loc[idx,'n_trials']))\n \n pvalue = f_oneway(*list_of_samples)[1]\n if pvalue <= .05 and step>self.static_min_steps:\n self.static_status = 'converged'\n self.best_actions = [np.argmax(grouped_dataset['p'].values)]*n_choices\n \n def dynamic_one_fits_all(self, actions_column, reward_column, incoming_data_batch, step):\n \n n_choices = len(incoming_data_batch)\n \n grouped = self.current_data.groupby(by=[actions_column])[reward_column]\n self.alphas = grouped.sum().values.ravel()\n mask = self.alphas == 0.\n self.alphas[mask] = 1. \n self.betas = grouped.count().values.ravel()-self.alphas\n arms_average_rewards = np.random.beta(self.alphas,self.betas,[n_choices,self.n_actions])\n \n self.best_actions = np.argmax(arms_average_rewards,axis=1).tolist()\n self.thompson_pms.loc[step,'alphas'] = self.alphas\n self.thompson_pms.loc[step,'betas'] = self.betas\n \n def contextual_one_fits_one(self, actions_column, reward_column, incoming_data_batch, step):\n \n predictors = self.current_data.drop(columns=['reward','action_code']).columns\n sampled_probs = np.zeros([len(incoming_data_batch),self.n_actions])\n \n for action_id in range(self.n_actions):\n subset = self.current_data[self.current_data['action_code']==action_id]\n X = pd.get_dummies(subset[predictors],drop_first=True).values\n y = subset['reward'].values\n self.predictive_units[action_id].fit(X,y)\n \n X_in = pd.get_dummies(incoming_data_batch[predictors],drop_first=True).values\n predicted_probs = self.predictive_units[action_id].predict(X_in)\n sampling_indices = np.random.choice(range(predicted_probs.shape[1]),size=len(predicted_probs))\n sampled_probs[:,action_id] = np.array([predicted_probs[row,sampling_indices[row]] for row in range(predicted_probs.shape[0])])\n\n self.best_actions = np.argmax(sampled_probs,axis=1).tolist()\n \ndef run_experiment(experiment_data, batch_size, exp_class, exp_pms, return_exp_obj=False):\n \n n_steps = experiment_data.shape[0]//batch_size\n uneven = experiment_data.shape[0]%batch_size\n \n exp_obj = exp_class(**exp_pms)\n action_cols = [column for column in experiment_data.columns if 'action' in column]\n for step in tqdm(range(n_steps)):\n incoming_data = experiment_data[step*batch_size:(step+1)*batch_size].copy()\n exp_obj.apply_decision_policy(incoming_data,step)\n rewards = incoming_data[action_cols].values\n incoming_data['reward'] = [rewards[idx,exp_obj.best_actions[idx]] for idx in range(len(incoming_data))]\n incoming_data['action_code'] = exp_obj.best_actions\n exp_obj.append_data(incoming_data.drop(columns = action_cols))\n \n if uneven:\n incoming_data = experiment_data[(step+1)*batch_size:].copy()\n exp_obj.apply_decision_policy(incoming_data,step)\n rewards = incoming_data[action_cols].values\n incoming_data['reward'] = [rewards[idx,exp_obj.best_actions[idx]] for idx in range(len(incoming_data))]\n incoming_data['action_code'] = exp_obj.best_actions[:len(incoming_data)]\n exp_obj.append_data(incoming_data.drop(columns = action_cols))\n \n if return_exp_obj:\n return exp_obj\n else:\n return exp_obj.current_data['reward'].mean()","sub_path":"contextual_mab/experiments/framework.py","file_name":"framework.py","file_ext":"py","file_size_in_byte":8105,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"149081797","text":"import datetime\nimport schema\nfrom collections import namedtuple, defaultdict\nfrom itertools import compress\nfrom dbmodel import Rss, Data, Tokenized, SQL_DB\n\nSQL_SCRIPT = 'db.sql'\nDbRow = namedtuple('DbRow', 'source_name title published data')\n\n\ndef create():\n \"\"\"Creates db from model and executes sql script to fill tables\"\"\"\n Rss.create_table()\n Data.create_table()\n Tokenized.create_table()\n with open(SQL_SCRIPT) as f:\n for line in f:\n SQL_DB.execute_sql(line.strip())\n\n\ndef get_feeds():\n \"\"\"Returns list of feeds\"\"\"\n return [i for i in Rss.select()]\n\n\ndef get_descriptions(date_point=None, date_range=None):\n \"\"\"Returns list of descriptions within given date range,\n from given date point\"\"\"\n if not date_point:\n date_point = datetime.datetime.now()\n if not date_range:\n date_range = datetime.timedelta(hours=12)\n dt = get_datetime(date_point, date_range)\n rows = Tokenized.select().join(Data).where(\n (Data.published <= date_point) &\n (Data.published >= dt)).order_by(Tokenized.data_id.desc())\n result = QueryResult('data_id', 'name', 'title', 'published', 'data')\n for row in rows:\n result.add_bunch(data_id=row.data_id.data_id,\n name=row.data_id.rss_id.name,\n title=row.data_id.title,\n published=row.data_id.published.isoformat(),\n data=schema.deserialize_plain(str(row.tokenized_plain)))\n return result\n\n\nclass QueryResult(object):\n\n def __init__(self, *args):\n self.keys = set(args)\n self.storage = defaultdict(list)\n self.processed = None\n\n def __nonzero__(self):\n return any(self.storage.itervalues())\n\n def __getitem__(self, key):\n return self.storage[key]\n\n def add(self, key, value):\n self.storage[key].append(value)\n\n def add_bunch(self, **kwargs):\n for key, val in kwargs.iteritems():\n self.add(key, val)\n\n def process(self, process_fun, **kwargs):\n return process_fun(self.storage['data'], **kwargs)\n\n def filter(self, data, mask_fun):\n mask = mask_fun(data)\n for key, val in self.storage.iteritems():\n self.storage[key] = [item for item in compress(val, mask)]\n tmp = data[mask, :]\n return tmp[:, mask]\n\n def __repr__(self):\n return \"\".format(self.keys)\n\n\ndef get_datetime(date_point=None, time_delta=None):\n if not date_point:\n date_point = datetime.datetime.now()\n if not time_delta:\n time_delta = datetime.timedelta(hours=36)\n return date_point - time_delta\n\n\ndef insert_data(data):\n new_row = Data()\n new_row.rss_id = data['rss_id']\n new_row.identifier = data['identifier']\n new_row.published = data['published']\n new_row.last_modified = datetime.datetime.now()\n new_row.description = data['description']\n new_row.description_hash = data['description_hash']\n new_row.title = data['title']\n new_row.link = data['link']\n new_row.save()\n return new_row.data_id\n\n\ndef update_data(row, data):\n row.published = data['published']\n row.last_modified = datetime.datetime.now()\n row.description = data['description']\n row.description_hash = data['description_hash']\n row.title = data['title']\n row.save()\n return row.data_id\n\n\ndef insert_update_tokenized(data):\n \"\"\"Inserts or updates tokenized data\"\"\"\n if Tokenized.select().where(\n Tokenized.data_id == data['data_id']).exists():\n row = Tokenized.get(\n Tokenized.data_id == data['data_id'])\n row.tokenized_count = data['tokenized_count']\n row.tokenized_plain = data['tokenized_plain']\n row.save()\n result = row.tokenized_id\n else:\n new_row = Tokenized()\n new_row.data_id = data['data_id']\n new_row.tokenized_count = data['tokenized_count']\n new_row.tokenized_plain = data['tokenized_plain']\n new_row.save()\n result = new_row.tokenized_id\n return result\n\n\ndef insert_update_data(data, cache_time_delta=None):\n \"\"\"Inserts new prased feed or updates existing parsed feed\"\"\"\n dt = get_datetime(time_delta=cache_time_delta)\n if Data.select().where(\n (Data.last_modified > dt) &\n (Data.identifier == data['identifier'])).exists():\n row = Data.get(\n (Data.last_modified > dt) &\n (Data.identifier == data['identifier']))\n result = update_data(row, data)\n elif Data.select().where(\n (Data.last_modified > dt) &\n (Data.rss_id == data['rss_id']) &\n (Data.description_hash == data['description_hash'])).exists():\n row = Data.get(\n (Data.last_modified > dt) &\n (Data.rss_id == data['rss_id']) &\n (Data.description_hash == data['description_hash']))\n result = update_data(row, data)\n else:\n result = insert_data(data)\n return result\n\n\ndef insert_update_feed(data):\n \"\"\"Updates existing feed data\"\"\"\n if Rss.select().where(Rss.rss_id == data['rss_id']).exists():\n row = Rss.get(Rss.rss_id == data['rss_id'])\n row.last_access = datetime.datetime.now()\n row.etag = data['etag']\n row.modified_parsed = data['modified_parsed']\n row.save()\n","sub_path":"rss/collector/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":5335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"45252494","text":"\"\"\"\nAt the core of a rating and of Nordic Credit Rating's business model is the\nissuer. This model defines an issuer, including meta data linked to the\nissuer.\n\"\"\"\nfrom django.contrib.auth.models import User\nfrom django.core.validators import RegexValidator, validate_email\nfrom django.db import models\nfrom django.db.models.signals import post_save\nfrom django.dispatch import receiver\nfrom django.utils import timezone\nfrom tinymce import HTMLField\n\nfrom a_helper.static_database_table.models.gics import GICSSubIndustry\nfrom datalake.gleif.command import GLEIFEditor\nfrom a_helper.other.sql import SQL\n\nfrom simple_history.models import HistoricalRecords\n\nALLOWED_ISSUER_TYPES = (\n (1, 'Corporate'),\n (2, 'Financial institution'),\n (3, 'Real estate corporate')\n)\n\n\nclass ProcessQuerySet(models.QuerySet):\n \"\"\"ProcessQuerySet class.\"\"\"\n\n def is_live(self):\n \"\"\"Return all decisions that have not been deleted.\"\"\"\n return self.filter(inactivated__isnull=True)\n\n def list_eligible_for_assessment(self):\n \"\"\"List all issuers that are eligible for getting a credit\n assessment.\"\"\"\n\n sql = '''\n SELECT i.id\n , i.legal_name\n FROM public.issuer_issuer as i\n WHERE i.issuer_type_id IN (1, 3)\n AND i.gics_sub_industry_id IS NOT NULL\n AND i.inactivated IS NULL\n AND i.id NOT IN (SELECT issuer_id\n FROM public.rating_process_ratingdecision)\n AND i.id NOT IN (SELECT issuer_id\n FROM public.credit_assessment_assessmentjob)\n ORDER BY i.legal_name\n ''' # noqa: E501\n\n return self.raw(sql)\n\n def list_all(self):\n \"\"\"Get all companies using an optimized query.\"\"\"\n\n sql = '''\n SELECT i.id\n , gs.name as sector_name\n , country.name as country\n , i.legal_name as legal_name\n , CASE WHEN it.description = 1 THEN 'Corporate'\n WHEN it.description = 2 THEN 'Financial'\n WHEN it.description = 3 THEN 'Real estate'\n END as issuer_type_name\n , t9.first_name || ' ' || t9.last_name as p_analyst\n , t10.first_name || ' ' || t10.last_name as s_analyst\n , au.first_name || ' ' || au.last_name as c_manager\n , decision.decided_lt as current_long_term\n , decision.decided_lt_outlook as current_outlook\n , CASE WHEN iob.date_time_onboarding_completed IS NULL THEN 'Not Onboarded'\n WHEN iob.date_time_onboarding_completed IS NOT NULL AND decision_ongoing.id IS NOT NULL THEN 'Rating job in progress'\n WHEN iob.date_time_onboarding_completed IS NOT NULL AND decision.id IS NOT NULL THEN 'Ongoing surveillance'\n WHEN iob.date_time_onboarding_completed IS NOT NULL AND decision_ongoing.id IS NULL THEN 'Onboarded'\n END AS status\n , CASE WHEN decision_ongoing.process_step = 1 THEN 'Rating job started'\n WHEN decision_ongoing.process_step = 2 THEN 'Pre-committee'\n WHEN decision_ongoing.process_step = 3 THEN 'Analytical phase'\n WHEN decision_ongoing.process_step = 4 THEN 'Post committee'\n WHEN decision_ongoing.process_step = 5 THEN 'Editing'\n WHEN decision_ongoing.process_step = 6 THEN 'Issuer confirmation'\n WHEN decision_ongoing.process_step = 7 THEN 'Analyst final approval'\n WHEN decision_ongoing.process_step = 8 THEN 'Chair final approval'\n WHEN decision_ongoing.process_step = 9 THEN 'Ready to be published'\n END AS status_text\n , iob.engagement_letter_signed\n , COALESCE(class.peer_free_text, gsi.name) AS internal_peer\n\n FROM issuer_issuer as i\n INNER JOIN issuer_issuertype as it ON 1 = 1\n AND i.issuer_type_id = it.id\n INNER JOIN issuer_onboardingprocess as iob ON 1 = 1\n AND iob.issuer_id = i.id\n LEFT OUTER JOIN auth_user as au ON 1 = 1\n AND i.relationship_manager_id = au.id\n LEFT OUTER JOIN static_database_table_gicssubindustry as gsi\n ON 1 = 1\n AND i.gics_sub_industry_id = gsi.id\n LEFT OUTER JOIN static_database_table_gicsindustry as gi\n ON 1 = 1\n AND gsi.industry_id = gi.id\n LEFT OUTER JOIN static_database_table_gicsindustrygroup as gig\n ON 1 = 1\n AND gi.industry_group_id = gig.id\n LEFT OUTER JOIN static_database_table_gicssector as gs\n ON 1 = 1\n AND gig.sector_id = gs.id\n LEFT OUTER JOIN issuer_analyst as ia\n ON 1 = 1\n AND i.id = ia.issuer_id\n LEFT OUTER JOIN auth_user as t9\n ON 1 = 1\n AND ia.primary_analyst_id = t9.id\n LEFT OUTER JOIN auth_user as t10\n ON 1 = 1\n AND ia.secondary_analyst_id = t10.id\n LEFT OUTER JOIN issuer_address as iadd\n ON 1 = 1\n AND i.id = iadd.issuer_id\n LEFT OUTER JOIN static_database_table_countryregion as country\n ON 1 = 1\n AND iadd.country_id = country.id\n LEFT OUTER JOIN rating_process_ratingdecision AS decision\n ON 1 = 1\n AND i.id = decision.issuer_id\n AND decision.is_current = True\n LEFT OUTER JOIN rating_process_ratingdecision AS decision_ongoing\n ON 1 = 1\n AND i.id = decision_ongoing.issuer_id\n AND decision_ongoing.date_time_published IS NULL\n AND decision_ongoing.date_time_deleted IS NULL\n LEFT OUTER JOIN issuer_classification as class\n ON 1 = 1\n AND i.id = class.issuer_id\n ORDER BY COALESCE(class.peer_free_text, gsi.name || ': ' || CAST(gsi.code AS TEXT))\n , i.legal_name\n ''' # noqa: E501\n\n return self.raw(sql)\n\n def list_all_rating(self):\n \"\"\"Get all companies with a rating using an optimized query.\"\"\"\n\n sql = '''\n SELECT i.id\n , gs.name as sector_name\n , country.name as country\n , i.legal_name as legal_name\n , CASE WHEN it.description = 1 THEN 'Corporate'\n WHEN it.description = 2 THEN 'Financial'\n WHEN it.description = 3 THEN 'Real estate'\n END as issuer_type_name\n , t9.first_name || ' ' || t9.last_name as p_analyst\n , t10.first_name || ' ' || t10.last_name as s_analyst\n , au.first_name || ' ' || au.last_name as c_manager\n , decision.decided_lt as current_long_term\n , decision.decided_lt_outlook as current_outlook\n , CASE WHEN iob.date_time_onboarding_completed IS NULL THEN 'Not Onboarded'\n WHEN iob.date_time_onboarding_completed IS NOT NULL AND decision_ongoing.id IS NOT NULL THEN 'Rating job in progress'\n WHEN iob.date_time_onboarding_completed IS NOT NULL AND decision.id IS NOT NULL THEN 'Ongoing surveillance'\n WHEN iob.date_time_onboarding_completed IS NOT NULL AND decision_ongoing.id IS NULL THEN 'Onboarded'\n END AS status\n , CASE WHEN decision_ongoing.process_step = 1 THEN 'Rating job started'\n WHEN decision_ongoing.process_step = 2 THEN 'Pre-committee'\n WHEN decision_ongoing.process_step = 3 THEN 'Analytical phase'\n WHEN decision_ongoing.process_step = 4 THEN 'Post committee'\n WHEN decision_ongoing.process_step = 5 THEN 'Editing'\n WHEN decision_ongoing.process_step = 6 THEN 'Issuer confirmation'\n WHEN decision_ongoing.process_step = 7 THEN 'Analyst final approval'\n WHEN decision_ongoing.process_step = 8 THEN 'Chair final approval'\n WHEN decision_ongoing.process_step = 9 THEN 'Ready to be published'\n END AS status_text\n , iob.engagement_letter_signed\n , COALESCE(class.peer_free_text, gsi.name) AS internal_peer\n FROM issuer_issuer as i\n INNER JOIN issuer_issuertype as it ON 1 = 1\n AND i.issuer_type_id = it.id\n INNER JOIN issuer_onboardingprocess as iob ON 1 = 1\n AND iob.issuer_id = i.id\n LEFT OUTER JOIN auth_user as au ON 1 = 1\n AND i.relationship_manager_id = au.id\n LEFT OUTER JOIN static_database_table_gicssubindustry as gsi\n ON 1 = 1\n AND i.gics_sub_industry_id = gsi.id\n LEFT OUTER JOIN static_database_table_gicsindustry as gi\n ON 1 = 1\n AND gsi.industry_id = gi.id\n LEFT OUTER JOIN static_database_table_gicsindustrygroup as gig\n ON 1 = 1\n AND gi.industry_group_id = gig.id\n LEFT OUTER JOIN static_database_table_gicssector as gs\n ON 1 = 1\n AND gig.sector_id = gs.id\n LEFT OUTER JOIN issuer_analyst as ia\n ON 1 = 1\n AND i.id = ia.issuer_id\n LEFT OUTER JOIN auth_user as t9\n ON 1 = 1\n AND ia.primary_analyst_id = t9.id\n LEFT OUTER JOIN auth_user as t10\n ON 1 = 1\n AND ia.secondary_analyst_id = t10.id\n LEFT OUTER JOIN issuer_address as iadd\n ON 1 = 1\n AND i.id = iadd.issuer_id\n LEFT OUTER JOIN static_database_table_countryregion as country\n ON 1 = 1\n AND iadd.country_id = country.id\n LEFT OUTER JOIN rating_process_ratingdecision AS decision\n ON 1 = 1\n AND i.id = decision.issuer_id\n AND decision.is_current = True\n LEFT OUTER JOIN rating_process_ratingdecision AS decision_ongoing\n ON 1 = 1\n AND i.id = decision_ongoing.issuer_id\n AND decision_ongoing.date_time_published IS NULL\n AND decision_ongoing.date_time_deleted IS NULL\n LEFT OUTER JOIN issuer_classification as class\n ON 1 = 1\n AND i.id = class.issuer_id\n WHERE decision.is_current = True\n ORDER BY COALESCE(class.peer_free_text, gsi.name || ': ' || CAST(gsi.code AS TEXT))\n , i.legal_name\n ''' # noqa: E501\n\n return self.raw(sql)\n\n def list_all_assessment(self, issuer_id_list):\n \"\"\"Get all companies with an assessment.\"\"\"\n\n sql = '''\n WITH t1 AS (\n SELECT i.id\n , i.legal_name\n , i.issuer_type_id\n , cr.iso_31661_alpha_2 as ccy\n , COALESCE(class.peer_free_text, gsi.name) as sort_key\n FROM issuer_issuer AS i\n LEFT OUTER JOIN issuer_address AS ia\n ON 1 = 1\n AND ia.issuer_id = i.id\n LEFT OUTER JOIN static_database_table_countryregion AS cr\n ON 1 = 1\n AND cr.id = ia.country_id\n LEFT OUTER JOIN static_database_table_gicssubindustry as gsi\n ON 1 = 1\n AND i.gics_sub_industry_id = gsi.id\n LEFT OUTER JOIN issuer_classification AS class\n ON 1 = 1\n AND i.id = class.issuer_id\n WHERE i.issuer_type_id IN %s\n ORDER BY sort_key, i.legal_name\n )\n SELECT i.id\n , i.legal_name AS i_name\n , i.ccy\n , i.sort_key AS internal_peer\n , i.issuer_type_id as i_id\n , COALESCE(dec_current.date_time_published, ass_current.date_time_approval) AS current_date_time_approval\n , ass_current.id AS current_id\n\n , ass_progress.id AS progress_id\n , ass_progress.process_step AS progress_process_step\n , ass_progress.initiated_by_id AS progress_initiated_by_id\n , ass_progress.assessment_lt AS progress_assessment_lt\n\n , dec_current.id as rating_id\n\n , COALESCE(dec_current.initiated_by_id, ass_progress.initiated_by_id, ass_current.initiated_by_id) initiated_by\n FROM t1 as i\n LEFT JOIN credit_assessment_assessmentjob AS ass_current ON 1 = 1\n AND ass_current.issuer_id = i.id\n AND ass_current.is_current = True\n LEFT JOIN credit_assessment_assessmentjob AS ass_progress ON 1 = 1\n AND ass_progress.issuer_id = i.id\n AND ass_progress.is_current = False\n AND ass_progress.date_time_approval IS NULL\n LEFT JOIN rating_process_ratingdecision as dec_current ON 1 = 1\n AND dec_current.issuer_id = i.id\n AND dec_current.is_current = True\n WHERE 1 = 1\n AND (( ass_current.id IS NOT NULL\n OR ass_progress.id IS NOT NULL ) OR dec_current.id IS NOT NULL )\n ORDER BY internal_peer\n , legal_name\n ''' # noqa: E501\n\n return self.raw(sql, [tuple(issuer_id_list)])\n\n def list_onboarded(self):\n \"\"\"List issuers that are onboarded.\"\"\"\n\n return self.filter(\n onboarding_issuer_link__date_time_onboarding_completed__isnull=False) # noqa: E501\n\n\nclass ProcessManager(models.Manager):\n \"\"\"Process manager class.\"\"\"\n\n def get_queryset(self):\n \"\"\"Basic query set. Always filter out those that have been deleted.\"\"\"\n return ProcessQuerySet(self.model, using=self._db) # Important!\n\n def is_live(self):\n \"\"\"Get all companies that are live (not deleted).\"\"\"\n return self.get_queryset().is_live()\n\n def list_all(self):\n \"\"\"Get all companies using an optimized query.\"\"\"\n return self.get_queryset().list_all()\n\n def list_all_rating(self):\n \"\"\"Get all companies with a rating using an optimized query.\"\"\"\n return self.get_queryset().list_all_rating()\n\n def list_eligible_for_assessment(self):\n \"\"\"List all issuers that are eligible for getting a credit\n assessment.\"\"\"\n\n return self.get_queryset().list_eligible_for_assessment()\n\n def list_all_assessment(self, issuer_id_list):\n \"\"\"Get all companies with an assessment.\"\"\"\n\n return self.get_queryset().list_all_assessment(issuer_id_list)\n\n def list_onboarded(self):\n \"\"\"List issuers that are onboarded.\"\"\"\n return self.get_queryset().list_onboarded()\n\n\nclass IssuerType(models.Model):\n \"\"\"\n Current, Nordic Credit Rating's business model revolvers around three\n type of issuers: corporates, corporates in the real estate sector and\n financials.\n\n The ID# linked to each issuer type is used throughout the web site, in\n views as well as in templates. The order and numbering may thus not be\n changed.\n \"\"\"\n date_added = models.DateTimeField(auto_now_add=True)\n last_updated = models.DateTimeField(auto_now=True)\n\n description = models.IntegerField(\n choices=ALLOWED_ISSUER_TYPES,\n unique=True)\n\n def __str__(self):\n return '{}'.format(self.get_description_display())\n\n\n# Create your models here.\nclass Issuer(models.Model):\n \"\"\"Define an issuer. The issuer is the core object\n of the analytical toolkit.\"\"\"\n\n # Add version history to the model\n history = HistoricalRecords()\n\n objects = ProcessManager()\n\n def __str__(self):\n \"\"\"Return a human readable representation of each record.\"\"\"\n return '{}'.format(self.legal_name)\n\n class Meta:\n \"\"\"Meta class.\"\"\"\n ordering = ['legal_name']\n\n @property\n def internal_identifier(self):\n \"\"\"Internal identifier for model.\n Same as in template_tags.\"\"\"\n\n return 'I' + str(self.pk).zfill(6)\n\n @property\n def peer_sector(self):\n \"\"\"Return the NCR defined peer group.\"\"\"\n\n return self.gics_sub_industry\n\n @property\n def is_onboarded(self):\n \"\"\"Flag if the issuer has been onboarded.\"\"\"\n\n if self in Issuer.objects.list_onboarded():\n return True\n else:\n return False\n\n parent_company = models.ForeignKey(\n 'self',\n on_delete=models.PROTECT,\n null=True,\n blank=True,\n )\n\n # The system wide unique identifier for an issuer\n # https://www.leiroc.org/lei.htm\n lei = models.CharField(\n max_length=20,\n unique=True\n )\n\n # A name in the model for the issuer's legal name\n # Is populated from GLEIF repository upon insertion\n # of issuer\n short_name = models.CharField(\n db_index=True,\n max_length=128,\n null=True,\n blank=True,\n )\n\n legal_name = models.CharField(\n db_index=True,\n max_length=128,\n unique=True\n )\n\n # Provide a description of the issuer. This description\n # is for internal as well as external use.\n description = HTMLField(\n null=True,\n blank=True,\n )\n\n # The person in the Commercial team responsible for\n # managing the relationship with the issuer\n relationship_manager = models.ForeignKey(\n User,\n on_delete=models.PROTECT,\n related_name=\"relationship_manager_link\",\n null=True,\n blank=True\n )\n\n # Type of issuer. Determines which methodology is\n # used\n issuer_type = models.ForeignKey(\n IssuerType,\n on_delete=models.PROTECT,\n null=False,\n blank=False\n )\n\n # Defines what sector the issuer belongs to\n # This is based on the Global Industry Classification Standard (GICS)\n gics_sub_industry = models.ForeignKey(\n GICSSubIndustry,\n on_delete=models.PROTECT,\n related_name=\"issuer_gicssubindustry_link\",\n null=True,\n blank=True\n )\n\n # This is if we want to de-activate the entity for some reason\n # Allows us to remove the issuer without physically delete it from the\n # database tables\n inactivated = models.DateTimeField(\n null=True,\n blank=True\n )\n\n\nclass Analyst(models.Model):\n \"\"\"Define a primary and, if applicable,\n secondary analyst for the issuer.\"\"\"\n\n # Add version history to the model\n history = HistoricalRecords()\n\n def __str__(self):\n \"\"\"Return a human readable representation of each record.\"\"\"\n return 'Analysts for {}'.format(self.issuer.legal_name)\n\n issuer = models.OneToOneField(\n Issuer,\n on_delete=models.PROTECT\n )\n\n primary_analyst = models.ForeignKey(\n User,\n on_delete=models.PROTECT,\n related_name=\"primary_analyst_link\",\n null=True,\n blank=True\n )\n\n secondary_analyst = models.ForeignKey(\n User,\n on_delete=models.PROTECT,\n related_name=\"secondary_analyst_link\",\n null=True,\n blank=True\n )\n\n\nCONTACT_TYPES = (\n (1, 'Primary contact'),\n (2, 'Secondary contact'),\n)\n\n\nclass InsiderList(models.Model):\n \"\"\"InsiderList class.\"\"\"\n\n # Add version history to the model\n history = HistoricalRecords()\n\n def __str__(self):\n \"\"\"Return a human readable representation of each record.\"\"\"\n return '%s %s is on insider list for %s %s' % (\n self.first_name,\n self.last_name,\n self.issuer.legal_name,\n 'as ' + self.get_contact_type_display().lower()\n if self.get_contact_type_display() else ''\n )\n\n issuer = models.ForeignKey(\n Issuer,\n on_delete=models.PROTECT,\n related_name=\"insider_list_issuer_link\",\n )\n\n company = models.CharField(\n max_length=100,\n null=True,\n blank=True,\n help_text=\"If other company than issuer.\"\n )\n\n contact_type = models.IntegerField(\n db_index=True,\n choices=CONTACT_TYPES,\n null=True,\n blank=True,\n help_text=\"Leave as '----' if not primary or secondary contact.\"\n )\n\n first_name = models.CharField(\n db_index=True,\n max_length=100,\n blank=False\n )\n\n last_name = models.CharField(\n db_index=True,\n max_length=100,\n blank=False\n )\n\n email = models.CharField(\n max_length=100,\n blank=False,\n validators=[validate_email],\n help_text=\"Eg name@host.com\"\n )\n\n phone_regex = RegexValidator(regex=r'^\\+?1?\\d{9,15}$',\n message=\"Phone number must be \"\n \"entered in the format: \"\n \"'+999999999999'. \"\n \"Up to 15 digits allowed.\")\n # validators should be a list\n phone_number = models.CharField(validators=[phone_regex],\n max_length=17,\n help_text=\"Eg +nnnnnnnnnn\")\n\n role = models.CharField(\n db_index=True,\n max_length=100,\n blank=False,\n help_text=\"Eg 'Debt analyst' or 'Legal counsel'\"\n )\n\n date_creation = models.DateTimeField(\n auto_now_add=True,\n )\n\n date_deletion = models.DateTimeField(\n db_index=True,\n null=True,\n blank=True\n )\n\n\nclass OnboardingProcess(models.Model):\n \"\"\"Onboarding process for Issuer.\"\"\"\n\n def __str__(self):\n \"\"\"Return a human readable representation of each record.\"\"\"\n return 'Onboarding for %s' % (\n self.issuer.legal_name,\n )\n\n issuer = models.ForeignKey(\n Issuer,\n on_delete=models.PROTECT,\n related_name=\"onboarding_issuer_link\",\n )\n\n engagement_letter_signed = models.BooleanField(\n default=False\n )\n\n date_time_engagement_letter_signed = models.DateTimeField(\n blank=True,\n null=True,\n )\n\n issuer_long_term = models.BooleanField(\n default=False\n )\n\n issuer_short_term = models.BooleanField(\n default=False\n )\n\n instrument_rating = models.BooleanField(\n default=False,\n )\n\n target_delivery_date = models.DateField(\n blank=True,\n null=True,\n )\n\n date_time_onboarding_completed = models.DateTimeField(\n blank=True,\n null=True,\n )\n\n\nclass EventType(models.Model):\n \"\"\"EventType model.\"\"\"\n\n def __str__(self):\n return self.description\n\n description = models.CharField(\n max_length=255,\n null=False,\n blank=False,\n )\n\n\nclass Event(models.Model):\n \"\"\"Log event that occur on the issuer level..\"\"\"\n\n def __str__(self):\n \"\"\"Return a human readable representation of each record.\"\"\"\n return '%s: \"%s\" on %s' % (\n self.issuer.legal_name,\n self.event_type,\n self.timestamp.strftime(\"%Y-%m-%d\")\n )\n\n issuer = models.ForeignKey(\n Issuer,\n on_delete=models.PROTECT,\n related_name=\"process_issuer_link\",\n )\n\n triggered_by_user = models.ForeignKey(\n User,\n on_delete=models.PROTECT,\n related_name=\"event_user_link\",\n null=False,\n blank=False\n )\n\n event_type = models.ForeignKey(\n EventType,\n on_delete=models.PROTECT,\n null=False,\n blank=False,\n )\n\n timestamp = models.DateTimeField(\n db_index=True,\n default=timezone.now\n )\n\n\n@receiver(post_save, sender=Issuer)\ndef create_issuer(sender, instance, created, **kwargs):\n \"\"\"\n Whenever an issuer_corporate is added to the system,\n we also want a record in the table for\n Analysts\n \"\"\"\n if created:\n Analyst.objects.create(issuer=instance)\n\n OnboardingProcess.objects.create(\n issuer=instance)\n\n # create database connection and session\n db_connection = SQL('datalake')\n\n # Get data from GLEIF database\n # If LEI does not exist, it will start with LEI_\n if not instance.lei[0:4] == 'LEI_':\n GLEIFEditor(db_connection).upsert(instance.lei)\n\n\n@receiver(post_save, sender=Issuer)\ndef save_issuer(sender, instance, created, **kwargs):\n \"\"\"\n Post save signal to check if all requirements to move to the next step in\n the onboarding process have been fulfilled.\n\n This signal does the test if the Issuer-model has been updated.\n\n If they have been fulfilled, set the 'Analyst appointed'-step in the\n OnboardingProcess-model to True.\n \"\"\"\n issuer = Issuer.objects.get(id=instance.id)\n analysts = Analyst.objects.get(issuer=issuer)\n\n if issuer.issuer_type and analysts.primary_analyst:\n onboarding_obj = OnboardingProcess.objects.get(issuer=issuer)\n onboarding_obj.analysts_appointed = True\n onboarding_obj.save()\n\n\n@receiver(post_save, sender=Analyst)\ndef save_analyst(sender, instance, created, **kwargs):\n \"\"\"\n Post save signal to check if all requirements to move to the next step in\n the onboarding process have been fulfilled.\n\n This signal does the test if the Analyst-model has been updated.\n\n If they have been fulfilled, set the 'Analyst appointed'-step in the\n OnboardingProcess-model to True.\n \"\"\"\n issuer = Issuer.objects.get(id=instance.issuer_id)\n analysts = Analyst.objects.get(issuer=issuer)\n\n if issuer.issuer_type and analysts.primary_analyst:\n onboarding_obj = OnboardingProcess.objects.get(issuer=issuer)\n onboarding_obj.analysts_appointed = True\n onboarding_obj.save()\n","sub_path":"ncr_website/issuer/models/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":26147,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"155461167","text":"import sys\n\ndef parse(adj):\n for l in sys.stdin: \n o,d,w = l.split() \n if o not in adj: \n adj[o] = [] \n if d not in adj: \n adj[d] = [] \n adj[o].append((d,int(w))) \n adj[d].append((o,int(w)))\n return adj \n\ndef dijkstra(adj,o): \n queue = [] \n parent = {} \n dist = {}\n for v in adj: \n dist[v] = float(\"inf\") \n queue.append(v) \n dist[o] = 0\n while queue: \n u = min(queue,key=lambda x : dist[x]) \n queue.remove(u)\n for (v,w) in adj[u]: \n alt = dist[u] + w \n if alt < dist[v]: \n dist[v] = alt \n parent[v] = u\n return parent,dist\n\ndef main():\n origem,destino = sys.stdin.readline().split(\" \")\n origem = origem.strip()\n destino = destino.strip()\n \n adj = {}\n grafo = parse(adj)\n travessia, pesos = dijkstra(grafo, origem)\n \n \n\n custo = pesos[destino]\n print(custo)\n\nmain()\n\n\n","sub_path":"2ºAno/2ºSemestre/LA2/grafos/voos.py","file_name":"voos.py","file_ext":"py","file_size_in_byte":1121,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"589833211","text":"#!/usr/bin/env python3\n#-*- coding:utf-8 -*-\n\"\"\"\nCreated on 2020/04/23\nauthor: lujie\n\"\"\"\nimport torch\nimport numpy as np\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.nn import Parameter\n\nfrom IPython import embed\n\nclass UncertaintyHead(nn.Module):\n ''' Evaluate the log(sigma^2) '''\n \n def __init__(self, in_feat = 512):\n\n super(UncertaintyHead, self).__init__()\n self.fc1 = Parameter(torch.Tensor(in_feat, in_feat))\n self.bn1 = nn.BatchNorm1d(in_feat, affine=True)\n self.relu = nn.ReLU(in_feat)\n self.fc2 = Parameter(torch.Tensor(in_feat, in_feat))\n self.bn2 = nn.BatchNorm1d(in_feat, affine=False)\n self.gamma = Parameter(torch.Tensor([1.0]))\n self.beta = Parameter(torch.Tensor([0.0])) # default = -7.0\n \n nn.init.kaiming_normal_(self.fc1)\n nn.init.kaiming_normal_(self.fc2)\n\n\n def forward(self, x):\n x = x.view(x.size(0), -1)\n x = self.relu(self.bn1(F.linear(x, F.normalize(self.fc1))))\n x = self.bn2(F.linear(x, F.normalize(self.fc2))) # 2*log(sigma)\n x = self.gamma * x + self.beta\n x = torch.log(1e-6 + torch.exp(x)) # log(sigma^2)\n return x\n\n\nif __name__ == \"__main__\":\n\n unh = UncertaintyHead(in_feat=3)\n \n mu_data = np.array([[-1.7847768 , -1.0991699 , 1.4248079 ],\n [ 1.0405252 , 0.35788524, 0.7338794 ],\n [ 1.0620259 , 2.1341069 , -1.0100055 ],\n [-0.00963581, 0.39570177, -1.5577421 ],\n [-1.064951 , -1.1261107 , -1.4181522 ],\n [ 1.008275 , -0.84791195, 0.3006532 ],\n [ 0.31099692, -0.32650718, -0.60247767]])\n \n muX = torch.from_numpy(mu_data).float()\n log_sigma_sq = unh(muX)\n print(log_sigma_sq)\n","sub_path":"model/uncertainty_head.py","file_name":"uncertainty_head.py","file_ext":"py","file_size_in_byte":1843,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"240193633","text":"#!/usr/bin/python\nimport sys\nimport os\nfrom numpy import *\nfrom mod_cond import *\n\ndef read_plot3d(filename):\n\tfp = open(filename,'r')\n\t\n\tbuf=fp.readline().split()\n\tNplane=int(buf.pop(0))\n\tnijk=[]\n\tBD=[]\n\t\n\tfor i in range(Nplane):\n\t buf=fp.readline().split()\n\t for i,var in enumerate(buf):\n\t buf[i]=int(var)\n\t nijk.append(buf)\n\t\n\tfor plane in range(Nplane):\n\t ng=1\n\t for var in nijk[plane]:\n\t ng*=var\n\t\n\t ### read data ###\n\t prearr=[]\n\t for i in range(int((ng+3)/4)):\n\t buf=fp.readline().split()\n\t for j,var in enumerate(buf):\n\t buf[j]=float(var)\n\t prearr.extend(buf)\n\t prearr = array(prearr)\n\t gridx = prearr.reshape(nijk[plane][1],nijk[plane][0]).transpose()\n\t\n\t prearr=[]\n\t for i in range(int((ng+3)/4)):\n\t buf=fp.readline().split()\n\t for j,var in enumerate(buf):\n\t buf[j]=float(var)\n\t prearr.extend(buf)\n\t prearr = array(prearr)\n\t gridy = prearr.reshape(nijk[plane][1],nijk[plane][0]).transpose()\n\t\n\t prearr=[]\n\t for i in range(int((ng+3)/4)):\n\t buf=fp.readline().split()\n\t for j,var in enumerate(buf):\n\t buf[j]=float(var)\n\t prearr.extend(buf)\n\t prearr = array(prearr)\n\t buf = prearr.reshape(nijk[plane][1],nijk[plane][0]).transpose()\n\t\n\t ### get only boundaries ###\n\t ni=nijk[plane][0]\n\t nj=nijk[plane][1]\n\t # i-plane\n\t for j in range(nijk[plane][1]):\n\t BD.append(BD_elm([gridx[ 0][j],gridy[ 0][j]],plane,0,0,j))\n\t for j in range(nijk[plane][1]):\n\t BD.append(BD_elm([gridx[ni-1][j],gridy[ni-1][j]],plane,0,1,j))\n\t\n\t # j-plane\n\t for i in range(nijk[plane][0]):\n\t BD.append(BD_elm([gridx[i][ 0],gridy[i][ 0]],plane,1,0,i))\n\t for i in range(nijk[plane][0]):\n\t BD.append(BD_elm([gridx[i][nj-1],gridy[i][nj-1]],plane,1,1,i))\n\t\n\tfp.close()\n\treturn [Nplane,nijk,BD]\n","sub_path":"store/cond/core/read_plot3d.py","file_name":"read_plot3d.py","file_ext":"py","file_size_in_byte":1833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"336857338","text":"class Solution:\n def isSubtree(self, s, t):\n if s == None:\n return False\n if self.isSame(s, t):\n return True\n return self.isSubtree(s.left, t) or self.isSubtree(s.right, t)\n\n def isSame(self, s, t):\n if s == None and t == None:\n return True\n if s == None or t == None:\n return False\n if s.val != t.val:\n return False\n return self.isSame(s.left, t.left) and self.isSame(s.right, t.right)\n","sub_path":"TOP_QUESTIONS/572.Subtree_of_Another_Tree.py","file_name":"572.Subtree_of_Another_Tree.py","file_ext":"py","file_size_in_byte":497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"255516680","text":"from django.shortcuts import render_to_response, redirect\nfrom django.template import RequestContext\nfrom django.contrib import auth\nfrom django.contrib.auth.models import AnonymousUser\nfrom django.core.context_processors import csrf\nfrom django.forms.models import model_to_dict\n\nfrom ProfileApp.forms import ProfileForm\nfrom ProfileApp.models import UserProfile\nfrom FootballApp.views import get_articles\n\n\ndef edit_profile(request):\n if request.user == AnonymousUser():\n return redirect('/')\n args = {}\n args.update(csrf(request))\n if request.POST:\n profile_form = ProfileForm(request.POST, request.FILES)\n if profile_form.is_valid():\n profile_commit = profile_form.save(commit=False)\n profile_commit.user_id = request.user.userprofile.user_id\n if not profile_commit.photo:\n profile_commit.photo = request.user.userprofile.photo\n profile_commit.save()\n return redirect('/profile/id/%s.html' % auth.get_user(request).id)\n else:\n initial = model_to_dict(request.user.userprofile)\n profile_form = ProfileForm(initial=initial)\n args['profile_form'] = profile_form\n args['articles'] = get_articles(request.user)\n args['username'] = auth.get_user(request).username\n args['user_id'] = auth.get_user(request).id\n return render_to_response(\n \"edit_profile.html\",\n args,\n context_instance=RequestContext(request)\n )\n\n\ndef profile(request, customer_id):\n return render_to_response(\n \"profile.html\",\n {\n 'user': UserProfile.objects.get(user_id=customer_id),\n 'articles': get_articles(request.user),\n 'username': auth.get_user(request).username,\n 'my': request.user.id == int(customer_id)\n }\n )\n","sub_path":"ProfileApp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"585517073","text":"\"\"\"Training script for the DeepLab-ResNet network on the PASCAL VOC dataset\n for semantic image segmentation.\n\nThis script trains the model using augmented PASCAL VOC,\nwhich contains approximately 10000 images for training and 1500 images for validation.\n\"\"\"\n\nfrom __future__ import print_function\n\nimport argparse\nfrom datetime import datetime\nimport os\nimport sys\nimport time\n\nimport tensorflow as tf\nimport numpy as np\n\nfrom deeplab_resnet import DeepLabResNetModel, ImageReader, decode_labels, inv_preprocess, prepare_label\n\nos.environ['CUDA_VISIBLE_DEVICES'] = '0'\nn_classes = 21\n\nBATCH_SIZE = 5\nDATA_DIRECTORY = '/workspace/hzl/tensorflow-program/data/VOCdevkit/voc12'\nDATA_LIST_PATH = './dataset/train.txt'\nINPUT_SIZE = '321,321'\nLEARNING_RATE = 2.5e-4\nMOMENTUM = 0.9\nNUM_STEPS = 20001\nPOWER = 0.9\nRANDOM_SEED = 1234\nRESTORE_FROM = './deeplab_resnet.ckpt'\nSAVE_NUM_IMAGES = 2\nSAVE_PRED_EVERY = 1000\nSNAPSHOT_DIR = './snapshots/'\nWEIGHT_DECAY = 0.0005\n\n\ndef get_arguments():\n \"\"\"Parse all the arguments provided from the CLI.\n \n Returns:\n A list of parsed arguments.\n \"\"\"\n parser = argparse.ArgumentParser(description=\"DeepLab-ResNet Network\")\n parser.add_argument(\"--batch-size\", type=int, default=BATCH_SIZE,\n help=\"Number of images sent to the network in one step.\")\n parser.add_argument(\"--data-dir\", type=str, default=DATA_DIRECTORY,\n help=\"Path to the directory containing the PASCAL VOC dataset.\")\n parser.add_argument(\"--data-list\", type=str, default=DATA_LIST_PATH,\n help=\"Path to the file listing the images in the dataset.\")\n parser.add_argument(\"--input-size\", type=str, default=INPUT_SIZE,\n help=\"Comma-separated string with height and width of images.\")\n parser.add_argument(\"--is-training\", action=\"store_true\",\n help=\"Whether to updates the running means and variances during the training.\")\n parser.add_argument(\"--learning-rate\", type=float, default=LEARNING_RATE,\n help=\"Base learning rate for training with polynomial decay.\")\n parser.add_argument(\"--momentum\", type=float, default=MOMENTUM,\n help=\"Momentum component of the optimiser.\")\n parser.add_argument(\"--num-steps\", type=int, default=NUM_STEPS,\n help=\"Number of training steps.\")\n parser.add_argument(\"--power\", type=float, default=POWER,\n help=\"Decay parameter to compute the learning rate.\")\n parser.add_argument(\"--random-mirror\", action=\"store_true\",\n help=\"Whether to randomly mirror the inputs during the training.\")\n parser.add_argument(\"--random-scale\", action=\"store_true\",\n help=\"Whether to randomly scale the inputs during the training.\")\n parser.add_argument(\"--random-seed\", type=int, default=RANDOM_SEED,\n help=\"Random seed to have reproducible results.\")\n parser.add_argument(\"--restore-from\", type=str, default=RESTORE_FROM,\n help=\"Where restore model parameters from.\")\n parser.add_argument(\"--save-num-images\", type=int, default=SAVE_NUM_IMAGES,\n help=\"How many images to save.\")\n parser.add_argument(\"--save-pred-every\", type=int, default=SAVE_PRED_EVERY,\n help=\"Save summaries and checkpoint every often.\")\n parser.add_argument(\"--snapshot-dir\", type=str, default=SNAPSHOT_DIR,\n help=\"Where to save snapshots of the model.\")\n parser.add_argument(\"--weight-decay\", type=float, default=WEIGHT_DECAY,\n help=\"Regularisation parameter for L2-loss.\")\n return parser.parse_args()\n\ndef save(saver, sess, logdir, step):\n '''Save weights.\n \n Args:\n saver: TensorFlow Saver object.\n sess: TensorFlow session.\n logdir: path to the snapshots directory.\n step: current training step.\n '''\n model_name = 'model.ckpt'\n checkpoint_path = os.path.join(logdir, model_name)\n \n if not os.path.exists(logdir):\n os.makedirs(logdir)\n saver.save(sess, checkpoint_path, global_step=step)\n print('The checkpoint has been created.')\n\ndef load(saver, sess, ckpt_path):\n '''Load trained weights.\n \n Args:\n saver: TensorFlow Saver object.\n sess: TensorFlow session.\n ckpt_path: path to checkpoint file with parameters.\n ''' \n saver.restore(sess, ckpt_path)\n print(\"Restored model parameters from {}\".format(ckpt_path))\n\ndef main():\n \"\"\"Create the model and start the training.\"\"\"\n args = get_arguments()\n \n h, w = map(int, args.input_size.split(','))\n input_size = (h, w)\n \n tf.set_random_seed(args.random_seed)\n \n # Create queue coordinator.\n coord = tf.train.Coordinator()\n \n # Load reader.\n with tf.name_scope(\"create_inputs\"):\n reader = ImageReader(\n args.data_dir,\n args.data_list,\n input_size,\n args.random_scale,\n args.random_mirror,\n coord)\n image_batch, label_batch = reader.dequeue(args.batch_size)\n \n # Create network.\n net = DeepLabResNetModel({'data': image_batch}, is_training=args.is_training)\n # For a small batch size, it is better to keep \n # the statistics of the BN layers (running means and variances)\n # frozen, and to not update the values provided by the pre-trained model. \n # If is_training=True, the statistics will be updated during the training.\n # Note that is_training=False still updates BN parameters gamma (scale) and beta (offset)\n # if they are presented in var_list of the optimiser definition.\n\n # Predictions.\n raw_output = net.layers['fc1_voc12']\n # Which variables to load. Running means and variances are not trainable,\n # thus all_variables() should be restored.\n restore_var = tf.global_variables()\n all_trainable = [v for v in tf.trainable_variables() if 'beta' not in v.name and 'gamma' not in v.name]\n fc_trainable = [v for v in all_trainable if 'fc' in v.name]\n conv_trainable = [v for v in all_trainable if 'fc' not in v.name] # lr * 1.0\n fc_w_trainable = [v for v in fc_trainable if 'weights' in v.name] # lr * 10.0\n fc_b_trainable = [v for v in fc_trainable if 'biases' in v.name] # lr * 20.0\n assert(len(all_trainable) == len(fc_trainable) + len(conv_trainable))\n assert(len(fc_trainable) == len(fc_w_trainable) + len(fc_b_trainable))\n \n \n # Predictions: ignoring all predictions with labels greater or equal than n_classes\n raw_prediction = tf.reshape(raw_output, [-1, n_classes])\n label_proc = prepare_label(label_batch, tf.stack(raw_output.get_shape()[1:3]), one_hot=False) # [batch_size, h, w]\n raw_gt = tf.reshape(label_proc, [-1,])\n indices = tf.squeeze(tf.where(tf.less_equal(raw_gt, n_classes - 1)), 1)\n gt = tf.cast(tf.gather(raw_gt, indices), tf.int32)\n prediction = tf.gather(raw_prediction, indices)\n \n \n # Pixel-wise softmax loss.\n loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=prediction, labels=gt)\n l2_losses = [args.weight_decay * tf.nn.l2_loss(v) for v in tf.trainable_variables() if 'weights' in v.name]\n reduced_loss = tf.reduce_mean(loss) + tf.add_n(l2_losses)\n \n # Processed predictions: for visualisation.\n raw_output_up = tf.image.resize_bilinear(raw_output, tf.shape(image_batch)[1:3,])\n raw_output_up = tf.argmax(raw_output_up, dimension=3)\n pred = tf.expand_dims(raw_output_up, dim=3)\n \n # Image summary.\n images_summary = tf.py_func(inv_preprocess, [image_batch, args.save_num_images], tf.uint8)\n labels_summary = tf.py_func(decode_labels, [label_batch, args.save_num_images], tf.uint8)\n preds_summary = tf.py_func(decode_labels, [pred, args.save_num_images], tf.uint8)\n \n total_summary = tf.summary.image('images', \n tf.concat([images_summary, labels_summary, preds_summary], 2), \n max_outputs=args.save_num_images) # Concatenate row-wise.\n summary_writer = tf.summary.FileWriter(args.snapshot_dir,\n graph=tf.get_default_graph())\n \n # Define loss and optimisation parameters.\n base_lr = tf.constant(args.learning_rate)\n step_ph = tf.placeholder(dtype=tf.float32, shape=())\n learning_rate = tf.scalar_mul(base_lr, tf.pow((1 - step_ph / args.num_steps), args.power))\n \n opt_conv = tf.train.MomentumOptimizer(learning_rate, args.momentum)\n opt_fc_w = tf.train.MomentumOptimizer(learning_rate * 10.0, args.momentum)\n opt_fc_b = tf.train.MomentumOptimizer(learning_rate * 20.0, args.momentum)\n\n grads = tf.gradients(reduced_loss, conv_trainable + fc_w_trainable + fc_b_trainable)\n grads_conv = grads[:len(conv_trainable)]\n grads_fc_w = grads[len(conv_trainable) : (len(conv_trainable) + len(fc_w_trainable))]\n grads_fc_b = grads[(len(conv_trainable) + len(fc_w_trainable)):]\n\n train_op_conv = opt_conv.apply_gradients(zip(grads_conv, conv_trainable))\n train_op_fc_w = opt_fc_w.apply_gradients(zip(grads_fc_w, fc_w_trainable))\n train_op_fc_b = opt_fc_b.apply_gradients(zip(grads_fc_b, fc_b_trainable))\n\n train_op = tf.group(train_op_conv, train_op_fc_w, train_op_fc_b)\n \n \n # Set up tf session and initialize variables. \n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n sess = tf.Session(config=config)\n init = tf.global_variables_initializer()\n \n sess.run(init)\n \n # Saver for storing checkpoints of the model.\n saver = tf.train.Saver(var_list=restore_var, max_to_keep=10)\n \n # Load variables if the checkpoint is provided.\n if args.restore_from is not None:\n loader = tf.train.Saver(var_list=restore_var)\n load(loader, sess, args.restore_from)\n \n # Start queue threads.\n threads = tf.train.start_queue_runners(coord=coord, sess=sess)\n\n # Iterate over training steps.\n for step in range(args.num_steps):\n start_time = time.time()\n feed_dict = { step_ph : step }\n \n if step % args.save_pred_every == 0:\n loss_value, images, labels, preds, summary, _ = sess.run([reduced_loss, image_batch, label_batch, pred, total_summary, train_op], feed_dict=feed_dict)\n summary_writer.add_summary(summary, step)\n save(saver, sess, args.snapshot_dir, step)\n else:\n loss_value, _ = sess.run([reduced_loss, train_op], feed_dict=feed_dict)\n duration = time.time() - start_time\n print('step {:d} \\t loss = {:.3f}, ({:.3f} sec/step)'.format(step, loss_value, duration))\n coord.request_stop()\n coord.join(threads)\n \nif __name__ == '__main__':\n main()\n","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":10855,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"403006073","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2018/7/16 10:49\n# @Author : liugang9\n# @Email : mlcc330@hotmail.com\n# @File : demoScrapBaidu.py\n# @Software: PyCharm\n# @license: Apache Licence\n# @contact: 3323202070@qq.com\n# @Description: \n# \n# \n\nimport urllib.request\nimport re\nfrom bs4 import BeautifulSoup\n\ndef make_content_to_html(scrap_data):\n f = open('scrap_result.html','a+',encoding='utf-8')\n message = '''\n \n \n \n \n \n 爬取结果 \n \n
\"\n\n\nif __name__ == '__main__':\n app.run('127.0.0.1', port=5000)\n","sub_path":"main2.py","file_name":"main2.py","file_ext":"py","file_size_in_byte":3617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"578850875","text":"# (c) Copyright [2018-2020] Micro Focus or one of its affiliates. \n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# You may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# |_ |~) _ _| _ /~\\ _ |.\n# |_)\\/ |_)(_|(_|| \\_/|_|(_|||\n# / \n# ____________ ______\n# / __ `\\ / /\n# | \\/ / / /\n# |______ / / /\n# |____/ / /\n# _____________ / /\n# \\ / / /\n# \\ / / /\n# \\_______/ / /\n# ______ / /\n# \\ / / /\n# \\ / / /\n# \\/ / /\n# / /\n# / /\n# \\ /\n# \\ /\n# \\/\n# _\n# \\ / _ __|_. _ _ |_)\n# \\/ (/_| | |(_(_|| \\/\n# / \n# VerticaPy allows user to create vDataFrames (Virtual Dataframes). \n# vDataFrames simplify data exploration, data cleaning and MACHINE LEARNING \n# in VERTICA. It is an object which keeps in it all the actions that the user \n# wants to achieve and execute them when they are needed. \t\t\t\t\t\t\t\t\t\t\n#\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n# The purpose is to bring the logic to the data and not the opposite !\n#\n# \n# Modules\n#\n# Standard Python Modules\nimport os\n# VerticaPy Modules\nfrom verticapy.utilities import *\nfrom verticapy.toolbox import *\nfrom verticapy import vDataFrame\nfrom verticapy.connections.connect import read_auto_connect\n#---#\nclass DBSCAN:\n\t\"\"\"\n---------------------------------------------------------------------------\n[Beta Version]\nCreates a DBSCAN object by using the DBSCAN algorithm as defined by Martin \nEster, Hans-Peter Kriegel, Jörg Sander and Xiaowei Xu. This object is using \npure SQL to compute all the distances and neighbors. It is also using Python \nto compute the cluster propagation (non scalable phase). This model is using \nCROSS JOIN and may be really expensive in some cases. It will index all the \nelements of the table in order to be optimal (the CROSS JOIN will happen only \nwith IDs which are integers). As DBSCAN is using the p-distance, it is highly \nsensible to un-normalized data. However, DBSCAN is really robust to outliers \nand can find non-linear clusters. It is a very powerful algorithm for outliers \ndetection and clustering. \n\nParameters\n----------\nname: str\n\tName of the the model. As it is not a built in model, this name will be used\n\tto build the final table.\ncursor: DBcursor, optional\n\tVertica DB cursor.\neps: float, optional\n\tThe radius of a neighborhood with respect to some point.\nmin_samples: int, optional\n\tMinimum number of points required to form a dense region.\np: int, optional\n\tThe p of the p-distance (distance metric used during the model computation).\n\nAttributes\n----------\nAfter the object creation, all the parameters become attributes. \nThe model will also create extra attributes when fitting the model:\n\nn_cluster: int\n\tNumber of clusters created during the process.\nn_noise: int\n\tNumber of points with no clusters.\ninput_relation: str\n\tTrain relation.\nX: list\n\tList of the predictors.\nkey_columns: list\n\tColumns not used during the algorithm computation but which will be used\n\tto create the final relation.\n\t\"\"\"\n\t#\n\t# Special Methods\n\t#\n\t#---#\n\tdef __init__(self,\n\t\t\t\t name: str,\n\t\t\t\t cursor = None,\n\t\t\t\t eps: float = 0.5,\n\t\t\t\t min_samples: int = 5,\n\t\t\t\t p: int = 2):\n\t\tcheck_types([\n\t\t\t(\"name\", name, [str], False),\n\t\t\t(\"eps\", eps, [int, float], False),\n\t\t\t(\"min_samples\", min_samples, [int, float], False),\n\t\t\t(\"p\", p, [int, float], False)])\n\t\tif not(cursor):\n\t\t\tcursor = read_auto_connect().cursor()\n\t\telse:\n\t\t\tcheck_cursor(cursor)\n\t\tself.type = \"clustering\"\n\t\tself.name = name\n\t\tself.cursor = cursor\n\t\tself.eps = eps\n\t\tself.min_samples = min_samples\n\t\tself.p = p \n\t#---#\n\tdef __repr__(self):\n\t\ttry:\n\t\t\trep = \"\\nNumber of Clusters: {}\\nNumber of Outliers: {}\".format(self.n_cluster, self.n_noise)\n\t\t\treturn (rep)\n\t\texcept:\n\t\t\treturn \"\"\n\t#\n\t# Methods\n\t#\n\t#---#\n\tdef fit(self, \n\t\t\tinput_relation: str, \n\t\t\tX: list, \n\t\t\tkey_columns: list = [], \n\t\t\tindex: str = \"\"):\n\t\t\"\"\"\n\t---------------------------------------------------------------------------\n\tTrains the model.\n\n\tParameters\n\t----------\n\tinput_relation: str\n\t\tTrain relation.\n\tX: list\n\t\tList of the predictors.\n\tkey_columns: list, optional\n\t\tColumns not used during the algorithm computation but which will be used\n\t\tto create the final relation.\n\tindex: str, optional\n\t\tIndex used to identify each row separately. It is highly recommanded to\n\t\thave one already in the main table to avoid creation of temporary tables.\n\n\tReturns\n\t-------\n\tobject\n \t\tself\n\t\t\"\"\"\n\t\tcheck_types([\n\t\t\t(\"input_relation\", input_relation, [str], False),\n\t\t\t(\"X\", X, [list], False),\n\t\t\t(\"key_columns\", key_columns, [list], False),\n\t\t\t(\"index\", index, [str], False)])\n\t\tX = [str_column(column) for column in X]\n\t\tself.X = X\n\t\tself.key_columns = [str_column(column) for column in key_columns]\n\t\tself.input_relation = input_relation\n\t\tschema, relation = schema_relation(input_relation)\n\t\trelation_alpha = ''.join(ch for ch in relation if ch.isalnum())\n\t\tcursor = self.cursor\n\t\tif not(index):\n\t\t\tindex = \"id\"\n\t\t\tmain_table = \"VERTICAPY_MAIN_{}\".format(relation_alpha)\n\t\t\ttry:\n\t\t\t\tcursor.execute(\"DROP TABLE IF EXISTS v_temp_schema.{}\".format(main_table))\n\t\t\texcept:\n\t\t\t\tpass\n\t\t\tsql = \"CREATE LOCAL TEMPORARY TABLE {} ON COMMIT PRESERVE ROWS AS SELECT ROW_NUMBER() OVER() AS id, {} FROM {} WHERE {}\".format(main_table, \", \".join(X + key_columns), input_relation, \" AND \".join([\"{} IS NOT NULL\".format(item) for item in X]))\n\t\t\tcursor.execute(sql)\n\t\telse:\n\t\t\tcursor.execute(\"SELECT {} FROM {} LIMIT 10\".format(\", \".join(X + key_columns + [index]), input_relation))\n\t\t\tmain_table = input_relation\n\t\tsql = [\"POWER(ABS(x.{} - y.{}), {})\".format(X[i], X[i], self.p) for i in range(len(X))] \n\t\tdistance = \"POWER({}, 1 / {})\".format(\" + \".join(sql), self.p)\n\t\tsql = \"SELECT x.{} AS node_id, y.{} AS nn_id, {} AS distance FROM {} AS x CROSS JOIN {} AS y\".format(index, index, distance, main_table, main_table)\n\t\tsql = \"SELECT node_id, nn_id, SUM(CASE WHEN distance <= {} THEN 1 ELSE 0 END) OVER (PARTITION BY node_id) AS density, distance FROM ({}) distance_table\".format(self.eps, sql)\n\t\tsql = \"SELECT node_id, nn_id FROM ({}) x WHERE density > {} AND distance < {} AND node_id != nn_id\".format(sql, self.min_samples, self.eps)\n\t\tcursor.execute(sql)\n\t\tgraph = cursor.fetchall()\n\t\tmain_nodes = list(dict.fromkeys([elem[0] for elem in graph] + [elem[1] for elem in graph]))\n\t\tclusters = {}\n\t\tfor elem in main_nodes:\n\t\t\tclusters[elem] = None\n\t\ti = 0\n\t\twhile (graph):\n\t\t\tnode = graph[0][0]\n\t\t\tnode_neighbor = graph[0][1]\n\t\t\tif (clusters[node] == None) and (clusters[node_neighbor] == None):\n\t\t\t\tclusters[node] = i \n\t\t\t\tclusters[node_neighbor] = i\n\t\t\t\ti = i + 1\n\t\t\telse:\n\t\t\t\tif (clusters[node] != None and clusters[node_neighbor] == None):\n\t\t\t\t\tclusters[node_neighbor] = clusters[node]\n\t\t\t\telif (clusters[node_neighbor] != None and clusters[node] == None):\n\t\t\t\t\tclusters[node] = clusters[node_neighbor]\n\t\t\tdel(graph[0])\n\t\ttry:\n\t\t\tf = open(\"VERTICAPY_DBSCAN_CLUSTERS_ID.csv\", 'w')\n\t\t\tfor elem in clusters:\n\t\t\t\tf.write(\"{}, {}\\n\".format(elem, clusters[elem]))\n\t\t\tf.close()\n\t\t\ttry:\n\t\t\t\tcursor.execute(\"DROP TABLE IF EXISTS v_temp_schema.VERTICAPY_DBSCAN_CLUSTERS\")\n\t\t\texcept:\n\t\t\t\tpass\n\t\t\tcursor.execute(\"CREATE LOCAL TEMPORARY TABLE VERTICAPY_DBSCAN_CLUSTERS(node_id int, cluster int) ON COMMIT PRESERVE ROWS\")\n\t\t\tif (\"vertica_python\" in str(type(cursor))):\n\t\t\t\twith open('./VERTICAPY_DBSCAN_CLUSTERS_ID.csv', \"r\") as fs:\n\t\t\t\t\tcursor.copy(\"COPY v_temp_schema.VERTICAPY_DBSCAN_CLUSTERS(node_id, cluster) FROM STDIN DELIMITER ',' ESCAPE AS '\\\\';\", fs)\n\t\t\telse:\n\t\t\t\tcursor.execute(\"COPY v_temp_schema.VERTICAPY_DBSCAN_CLUSTERS(node_id, cluster) FROM LOCAL './VERTICAPY_DBSCAN_CLUSTERS_ID.csv' DELIMITER ',' ESCAPE AS '\\\\';\")\n\t\t\ttry:\n\t\t\t\tcursor.execute(\"COMMIT\")\n\t\t\texcept:\n\t\t\t\tpass\n\t\t\tos.remove(\"VERTICAPY_DBSCAN_CLUSTERS_ID.csv\")\n\t\texcept:\n\t\t\tos.remove(\"VERTICAPY_DBSCAN_CLUSTERS_ID.csv\")\n\t\t\traise\n\t\tself.n_cluster = i\n\t\tcursor.execute(\"CREATE TABLE {} AS SELECT {}, COALESCE(cluster, -1) AS dbscan_cluster FROM v_temp_schema.{} AS x LEFT JOIN v_temp_schema.VERTICAPY_DBSCAN_CLUSTERS AS y ON x.{} = y.node_id\".format(self.name, \", \".join(self.X + self.key_columns), main_table, index))\n\t\tcursor.execute(\"SELECT COUNT(*) FROM {} WHERE dbscan_cluster = -1\".format(self.name))\n\t\tself.n_noise = cursor.fetchone()[0]\n\t\tcursor.execute(\"DROP TABLE IF EXISTS v_temp_schema.VERTICAPY_MAIN_{}\".format(relation_alpha))\n\t\tcursor.execute(\"DROP TABLE IF EXISTS v_temp_schema.VERTICAPY_DBSCAN_CLUSTERS\")\n\t\treturn (self)\n\t#---#\n\tdef info(self):\n\t\t\"\"\"\n\t---------------------------------------------------------------------------\n\tDisplays some information about the model.\n\t\t\"\"\"\n\t\ttry:\n\t\t\tprint(\"DBSCAN was successfully achieved by building {} cluster(s) and by identifying {} elements as noise.\\nIf you are not happy with the result, do not forget to normalise the data before applying DBSCAN. As this algorithm is using the p-distance, it is really sensible to the data distribution.\".format(self.n_cluster, self.n_noise))\n\t\texcept:\n\t\t\tprint(\"Please use the 'fit' method to start the algorithm.\")\n\t#---#\n\tdef plot(self):\n\t\t\"\"\"\n\t---------------------------------------------------------------------------\n\tDraws the model is the number of predictors is 2 or 3.\n\t\t\"\"\"\n\t\tif (2 <= len(self.X) <= 3):\n\t\t\tvDataFrame(self.name, self.cursor).scatter(columns = self.X, catcol = \"dbscan_cluster\", max_cardinality = 100, max_nb_points = 10000)\n\t\telse:\n\t\t\traise ValueError(\"Clustering Plots are only available in 2D or 3D\")\n\t#---#\n\tdef to_vdf(self):\n\t\t\"\"\"\n\t---------------------------------------------------------------------------\n\tCreates a vDataFrame of the model.\n\n\tReturns\n\t-------\n\tvDataFrame\n \t\tmodel vDataFrame\n\t\t\"\"\"\n\t\treturn (vDataFrame(self.name, self.cursor))\n#---#\nclass KMeans:\n\t\"\"\"\n---------------------------------------------------------------------------\nCreates a KMeans object by using the Vertica Highly Distributed and Scalable \nKMeans on the data. K-means clustering is a method of vector quantization, \noriginally from signal processing, that aims to partition n observations into \nk clusters in which each observation belongs to the cluster with the nearest \nmean (cluster centers or cluster centroid), serving as a prototype of the \ncluster. This results in a partitioning of the data space into Voronoi cells. \n\nParameters\n----------\nname: str\n\tName of the the model. The model will be stored in the DB.\ncursor: DBcursor, optional\n\tVertica DB cursor.\nn_cluster: int, optional\n\tNumber of clusters\ninit: str/list, optional\n\tThe method used to find the initial cluster centers.\n\t\tkmeanspp : Uses the KMeans++ method to initialize the centers.\n\t\trandom : The initial centers.\n\tIt can be also a list with the initial cluster centers to use.\nmax_iter: int, optional\n\tThe maximum number of iterations the algorithm performs.\ntol: float, optional\n\tDetermines whether the algorithm has converged. The algorithm is considered \n\tconverged after no center has moved more than a distance of 'tol' from the \n\tprevious iteration. \n\nAttributes\n----------\nAfter the object creation, all the parameters become attributes. \nThe model will also create extra attributes when fitting the model:\n\ncluster_centers: tablesample\n\tClusters result of the algorithm.\nmetrics: tablesample\n\tDifferent metrics to evaluate the model.\ninput_relation: str\n\tTrain relation.\nX: list\n\tList of the predictors.\n\t\"\"\"\n\tdef __init__(self,\n\t\t\t\t name: str,\n\t\t\t\t cursor = None,\n\t\t\t\t n_cluster: int = 8,\n\t\t\t\t init: str = \"kmeanspp\",\n\t\t\t\t max_iter: int = 300,\n\t\t\t\t tol: float = 1e-4):\n\t\tcheck_types([\n\t\t\t(\"name\", name, [str], False),\n\t\t\t(\"n_cluster\", n_cluster, [int, float], False),\n\t\t\t(\"max_iter\", max_iter, [int, float], False),\n\t\t\t(\"tol\", tol, [int, float], False)])\n\t\tif not(cursor):\n\t\t\tcursor = read_auto_connect().cursor()\n\t\telse:\n\t\t\tcheck_cursor(cursor)\n\t\tself.type = \"clustering\"\n\t\tself.name = name\n\t\tself.cursor = cursor\n\t\tself.n_cluster = n_cluster\n\t\tif (type(init) == str):\n\t\t\tself.init = init.lower()\n\t\telse:\n\t\t\tself.init = init\n\t\tself.max_iter = max_iter \n\t\tself.tol = tol \n\t#---#\n\tdef __repr__(self):\n\t\ttry:\n\t\t\tself.cursor.execute(\"SELECT GET_MODEL_SUMMARY(USING PARAMETERS model_name = '\" + self.name + \"')\")\n\t\t\treturn (self.cursor.fetchone()[0])\n\t\texcept:\n\t\t\treturn \"\"\n\t#\n\t# Methods\n\t#\n\t#---#\n\tdef deploySQL(self):\n\t\t\"\"\"\n\t---------------------------------------------------------------------------\n\tReturns the SQL code needed to deploy the model. \n\n\tReturns\n\t-------\n\tstr\n \t\tthe SQL code needed to deploy the model.\n\t\t\"\"\"\n\t\tsql = \"APPLY_KMEANS({} USING PARAMETERS model_name = '{}', match_by_pos = 'true')\"\n\t\treturn (sql.format(\", \".join(self.X), self.name))\n\t#---#\n\tdef drop(self):\n\t\t\"\"\"\n\t---------------------------------------------------------------------------\n\tDrops the model from the Vertica DB.\n\t\t\"\"\"\n\t\tdrop_model(self.name, self.cursor, print_info = False)\n\t#---#\n\tdef fit(self, \n\t\t\tinput_relation: str, \n\t\t\tX: list):\n\t\t\"\"\"\n\t---------------------------------------------------------------------------\n\tTrains the model.\n\n\tParameters\n\t----------\n\tinput_relation: str\n\t\tTrain relation.\n\tX: list\n\t\tList of the predictors.\n\n\tReturns\n\t-------\n\tobject\n \t\tself\n\t\t\"\"\"\n\t\tcheck_types([\n\t\t\t(\"input_relation\", input_relation, [str], False),\n\t\t\t(\"X\", X, [list], False)])\n\t\tself.input_relation = input_relation\n\t\tself.X = [str_column(column) for column in X]\n\t\tquery = \"SELECT KMEANS('{}', '{}', '{}', {} USING PARAMETERS max_iterations = {}, epsilon = {}\".format(self.name, input_relation, \", \".join(self.X), self.n_cluster, self.max_iter, self.tol)\n\t\tschema = schema_relation(self.name)[0]\n\t\tname = \"VERTICAPY_KMEANS_INITIAL\"\n\t\tif (type(self.init) != str):\n\t\t\ttry:\n\t\t\t\tself.cursor.execute(\"DROP TABLE IF EXISTS {}.{}\".format(schema, name))\n\t\t\texcept:\n\t\t\t\tpass\n\t\t\tif (len(self.init) != self.n_cluster):\n\t\t\t\traise ValueError(\"'init' must be a list of 'n_cluster' = {} points\".format(self.n_cluster))\n\t\t\telse:\n\t\t\t\tfor item in self.init:\n\t\t\t\t\tif (len(X) != len(item)):\n\t\t\t\t\t\traise ValueError(\"Each points of 'init' must be of size len(X) = {}\".format(len(self.X)))\n\t\t\t\tquery0 = []\n\t\t\t\tfor i in range(len(self.init)):\n\t\t\t\t\tline = []\n\t\t\t\t\tfor j in range(len(self.init[0])):\n\t\t\t\t\t\tline += [str(self.init[i][j]) + \" AS \" + X[j]]\n\t\t\t\t\tline = \",\".join(line)\n\t\t\t\t\tquery0 += [\"SELECT \" + line]\n\t\t\t\tquery0 = \" UNION \".join(query0)\n\t\t\t\tquery0 = \"CREATE TABLE {}.{} AS {}\".format(schema, name, query0)\n\t\t\t\tself.cursor.execute(query0)\n\t\t\t\tquery += \", initial_centers_table = '{}.{}'\".format(schema, name)\n\t\telse:\n\t\t\tquery += \", init_method = '{}'\".format(self.init)\n\t\tquery += \")\"\n\t\tself.cursor.execute(query)\n\t\ttry:\n\t\t\tself.cursor.execute(\"DROP TABLE IF EXISTS {}.{}\".format(schema, name))\n\t\texcept:\n\t\t\tpass\n\t\tself.cluster_centers = to_tablesample(query = \"SELECT GET_MODEL_ATTRIBUTE(USING PARAMETERS model_name = '{}', attr_name = 'centers')\".format(self.name), cursor = self.cursor)\n\t\tself.cluster_centers.table_info = False\n\t\tquery = \"SELECT GET_MODEL_ATTRIBUTE(USING PARAMETERS model_name = '{}', attr_name = 'metrics')\".format(self.name)\n\t\tself.cursor.execute(query)\n\t\tresult = self.cursor.fetchone()[0]\n\t\tvalues = {\"index\": [\"Between-Cluster Sum of Squares\", \"Total Sum of Squares\", \"Total Within-Cluster Sum of Squares\", \"Between-Cluster SS / Total SS\", \"converged\"]}\n\t\tvalues[\"value\"] = [float(result.split(\"Between-Cluster Sum of Squares: \")[1].split(\"\\n\")[0]), float(result.split(\"Total Sum of Squares: \")[1].split(\"\\n\")[0]), float(result.split(\"Total Within-Cluster Sum of Squares: \")[1].split(\"\\n\")[0]), float(result.split(\"Between-Cluster Sum of Squares: \")[1].split(\"\\n\")[0]) / float(result.split(\"Total Sum of Squares: \")[1].split(\"\\n\")[0]), result.split(\"Converged: \")[1].split(\"\\n\")[0] == \"True\"] \n\t\tself.metrics = tablesample(values, table_info = False)\n\t\treturn (self)\n\t#---#\n\tdef plot(self, \n\t\t\t voronoi: bool = False):\n\t\t\"\"\"\n\t---------------------------------------------------------------------------\n\tDraws the KMeans clusters.\n\n\tParameters\n\t----------\n\tvoronoi: bool, optional\n\t\tIf set to true, a voronoi plot will be drawn. It is only available for\n\t\tKMeans using 2 predictors.\n\t\t\"\"\"\n\t\tif (voronoi):\n\t\t\tif (len(self.X) == 2):\n\t\t\t\tfrom verticapy.learn.plot import voronoi_plot\n\t\t\t\tquery = \"SELECT GET_MODEL_ATTRIBUTE(USING PARAMETERS model_name = '{}', attr_name = 'centers')\".format(self.name)\n\t\t\t\tself.cursor.execute(query)\n\t\t\t\tclusters = self.cursor.fetchall()\n\t\t\t\tvoronoi_plot(clusters = clusters, columns = self.X)\n\t\t\telse:\n\t\t\t\traise ValueError(\"Voronoi Plots are only available in 2D\")\n\t\telse:\n\t\t\tvdf = vDataFrame(self.input_relation, self.cursor)\n\t\t\tself.predict(vdf, \"kmeans_cluster\")\n\t\t\tif (len(self.X) <= 3):\n\t\t\t\tvdf.scatter(columns = self.X, catcol = \"kmeans_cluster\", max_cardinality = 100, max_nb_points = 10000)\n\t\t\telse:\n\t\t\t\traise ValueError(\"Clustering Plots are only available in 2D or 3D\")\n\t#---#\n\tdef predict(self, \n\t\t\t\tvdf, \n\t\t\t\tname: str = \"\"):\n\t\t\"\"\"\n\t---------------------------------------------------------------------------\n\tAdds the prediction in a vDataFrame.\n\n\tParameters\n\t----------\n\tvdf: vDataFrame\n\t\tObject used to insert the prediction as a vcolumn.\n\tname: str, optional\n\t\tName of the added vcolumn. If empty, a name will be generated.\n\n\tReturns\n\t-------\n\tvDataFrame\n\t\tthe input object.\n\t\t\"\"\"\n\t\tcheck_types([\n\t\t\t(\"name\", name, [str], False)],\n\t\t\tvdf = [\"vdf\", vdf])\n\t\tname = \"KMeans_\" + ''.join(ch for ch in self.name if ch.isalnum()) if not (name) else name\n\t\treturn (vdf.eval(name, self.deploySQL()))","sub_path":"verticapy/learn/cluster.py","file_name":"cluster.py","file_ext":"py","file_size_in_byte":18142,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"389528857","text":"import os\r\n\r\n#给定一个整数数组nums和一个目标值target,找出数组中和为目标值的那两个数并返回数组下标\r\n#方法一:暴力破解法\r\ndef twoSum(nums, target):\r\n\tfor i in range(len(nums)):\r\n\t\tfor j in range(i + 1, len(nums)):\r\n\t\t\tsums = nums[i] + nums[j]\r\n\t\t\tif sums == target:\r\n\t\t\t\tprint(i, j)\r\n\t\t\t\treturn i, j\r\n\t\t\t\t\r\ndef main(*args, **kw):\r\n\ttwoSum(\t [17, 3, 8, 11, 2, 5, 19, 20], 30)\r\n\t\r\nif __name__ == '__main__':\r\n\tmain()\r\n","sub_path":"twoSum.py","file_name":"twoSum.py","file_ext":"py","file_size_in_byte":461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"99652093","text":"class GameObject:\r\n \"\"\" A general game object with x and y coordinates representing its location on the gameboard.\r\n\r\n Attributes:\r\n x (int): Represents the x-coordinate on the gameboard.\r\n y (int): Represents the y-coordinate on the gameboard.\r\n \"\"\"\r\n def __init__(self, x, y):\r\n self.x = x\r\n self.y = y\r\n\r\n\r\nclass PowerUp(GameObject):\r\n \"\"\" A general object representing power-ups with x and y coordinates representing its location on the gameboard.\r\n\r\n Inherits from GameObject.\r\n\r\n Attributes:\r\n power_up_type (Enum): The type of power-up as either SHIELD, LASER, or TELEPORT .\r\n \"\"\"\r\n def __init__(self, x, y, power_up_type):\r\n super().__init__(x, y)\r\n self.power_up_type = power_up_type\r\n\r\n\r\nclass Turret(GameObject):\r\n \"\"\" An object representing a turret on the gameboard.\r\n\r\n A turret fires for fire_time consecutive turns and is inactive for cooldown_time turns.\r\n\r\n Inherits from GameObject.\r\n\r\n Attributes:\r\n is_firing_next_turn (boolean): True iff this turret is firing the next turn.\r\n is_dead (boolean): True iff this turret has been destroyed and is no longer active.\r\n did_fire (boolean): True iff this turret fired in the last turn.\r\n fire_time (int): The number of consecutive turns the turret will fire.\r\n cooldown_time (int): The number of consecutive turns the turret will not fire.\r\n \"\"\"\r\n def __init__(self, x, y, is_firing_next_turn, is_dead, did_fire, fire_time, cooldown_time):\r\n super().__init__(x, y)\r\n self.is_firing_next_turn = is_firing_next_turn\r\n self.is_dead = is_dead\r\n self.did_fire = did_fire\r\n self.fire_time = fire_time\r\n self.cooldown_time = cooldown_time\r\n\r\n\r\nclass Wall(GameObject):\r\n \"\"\" An object representing a wall and its location on the gameboard.\r\n\r\n Inherits from GameObject.\r\n \"\"\"\r\n def __init__(self, x, y):\r\n super().__init__(x, y)\r\n\r\n\r\nclass DirectionalGameObject(GameObject):\r\n \"\"\" An object representing any GameObject with a direction.\r\n\r\n Inherits from GameObject.\r\n\r\n Attributes:\r\n direction (Enum): The direction the object is facing as either UP, DOWN, LEFT, RIGHT.\r\n \"\"\"\r\n def __init__(self, x, y, direction):\r\n super().__init__(x, y)\r\n self.direction = direction\r\n\r\n\r\nclass Bullet(DirectionalGameObject):\r\n \"\"\" An object representing a bullet on the gameboard.\r\n\r\n Attributes:\r\n shooter (Combatant): A reference to the Combatant that fired the bullet.\r\n \"\"\"\r\n def __init__(self, x, y, direction, shooter):\r\n super().__init__(x, y, direction)\r\n self.shooter = shooter\r\n\r\n\r\nclass Combatant(DirectionalGameObject):\r\n \"\"\" A general object representing a player on the gameboard.\r\n\r\n Attributes:\r\n score (int): This Combatant's current score.\r\n hp (int): This Combatant's current hp (remaining lives/hit points).\r\n shield_active (boolean): True iff this Combatant's shield is currently active.\r\n laser_count (int): The number of laser power-ups this Combatant has.\r\n teleport_count (int): The number of teleport power-ups this Combatant has.\r\n shield_count (int): The number of shield power-ups this Combatant has.\r\n \"\"\"\r\n def __init__(self, x, y, direction, score, hp, shield_active, laser_count, teleport_count, shield_count):\r\n super().__init__(x, y, direction)\r\n self.score = score\r\n self.hp = hp\r\n self.shield_active = shield_active\r\n self.laser_count = laser_count\r\n self.teleport_count = teleport_count\r\n self.shield_count = shield_count\r\n\r\n\r\nclass Opponent(Combatant):\r\n \"\"\" An object representing a player's Opponent in the game.\r\n\r\n Inherits from Combatant.\r\n\r\n Attributes:\r\n last_move (Enum) = The last move this Opponent made. All possible moves are listed in Move in Enums.py.\r\n \"\"\"\r\n\r\n def __init__(self, x, y, direction, score, hp, shield_active, last_move, laser_count, teleport_count, shield_count):\r\n super().__init__(x, y, direction, score, hp, shield_active, laser_count, teleport_count, shield_count)\r\n self.last_move = last_move\r\n\r\n\r\nclass Player(Combatant):\r\n \"\"\" An object representing the player (you) on the gameboard.\r\n\r\n Inherits from Combatant.\r\n\r\n Attributes:\r\n projectiles (List[Enum]): A list of projectiles this Player was hit by, either a LASER or BULLET.\r\n shooters (List[Combatant]): A list of Combatants that fired the projectiles in projectiles. Each index contains\r\n a reference to the combatant that fired it or None (where a Turret fired that projectile).\r\n This list is the same length as projectiles and the indices are mapped such that\r\n projectiles[x] was fired by shooters[x], where x an index.\r\n did_make_a_move (boolean): True iff this Player made a valid move last turn.\r\n \"\"\"\r\n def __init__(self, x, y, direction, score, hp, laser_count, teleport_count, shield_count, did_make_a_move,\r\n projectiles,\r\n shooters, shield_active):\r\n super().__init__(x, y, direction, score, hp, shield_active, laser_count, teleport_count, shield_count)\r\n self.projectiles = projectiles\r\n self.shooters = shooters\r\n self.did_make_a_move = did_make_a_move\r\n\r\n def was_hit(self):\r\n if (len(self.projectiles) > 0):\r\n return True\r\n else:\r\n return False\r\n","sub_path":"orbis/starterKit/Validator/PythonClientAPI/libs/Game/GameObjects.py","file_name":"GameObjects.py","file_ext":"py","file_size_in_byte":5575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"432089905","text":"#! /usr/bin/env python\n\n\ndef get_functions_from_module(mod, pattern=None):\n import inspect, re\n\n funcs = {}\n for name, func in inspect.getmembers(mod, inspect.isroutine):\n if pattern is None or re.match(pattern, name):\n funcs[name] = func\n return funcs\n\n\ndef add_functions_to_class(cls, funcs):\n for name, func in funcs.items():\n setattr(cls, name, func)\n\n\ndef add_module_functions_to_class(cls, module, pattern=None):\n import inspect, imp, os\n\n caller = inspect.stack()[1]\n path = os.path.join(os.path.dirname(caller[1]), os.path.dirname(module))\n\n (module, _) = os.path.splitext(os.path.basename(module))\n\n mod = imp.load_module(module, *imp.find_module(module, [path]))\n\n funcs = get_functions_from_module(mod, pattern=pattern)\n add_functions_to_class(cls, funcs)\n\n\n","sub_path":"landlab/core/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"209519552","text":"#!/usr/bin/env python\n# coding=utf-8\n\n# System imports\nimport threading\nimport queue\nfrom time import sleep\nfrom sys import exit\n\n# PyQt5 imports\nfrom PyQt5.QtCore import pyqtSignal\nfrom PyQt5.QtCore import QObject\n\n# PySerial imports\nimport serial\nfrom serial.serialutil import SerialException\n\nfrom config import config\n\nclass Model(threading.Thread, QObject):\n\n # This signal emitted when program fail to read serial port (self.port)\n error = pyqtSignal(object)\n # Signal emitted when port configuratin changes\n port_conf_change = pyqtSignal(object)\n\n def __init__(self):\n threading.Thread.__init__(self)\n QObject.__init__(self)\n # Queue with data (lines) received from serial port\n self.queue = queue.Queue()\n self.paused = threading.Event()\n self.paused.set()\n\n # Communications settings\n self._port = config['port']\n self._br = config['baudrate']\n self._parity = config['parity']\n self._bytesize = config['bytesize']\n self._stopbits = config['stopbits']\n self.timeout = config['timeout']\n # Line ending id\n self.eol = config['eol'][0]\n\n # PySerial object\n self.ser = None\n # Flag for main cycle\n self.running = True\n\n def run(self):\n '''\n Run thread.\n In every iteration trying to read one line from serial port and put\n it in queue.\n '''\n try:\n while self.running:\n self.paused.wait()\n if not self.ser.isOpen():\n sleep(0.05)\n continue\n\n try:\n data = self.readline()\n except SerialException as e:\n print('Error occured while reading data. ' + str(e))\n continue\n\n if data:\n\n if config['in_hex']:\n # Only for Python 3.5 and newer\n decoded = data.hex().upper()\n else:\n if config['encode'].upper() in ['ASCII', 'UTF-8']:\n try:\n decoded = data.decode(config['encode'])\n except UnicodeError as e:\n print('Fail to decode bytes. Error: {}'.format(\n e))\n else:\n print('Wrong decoding format. Using ASCII.')\n decoded = data.decode('ASCII')\n\n # One not formated and formated string for hex\n # representation\n hex_repr = self.add_html_colors(decoded)\n # print(hex_repr)\n result = [decoded, hex_repr]\n\n self.queue.put(result)\n\n except KeyboardInterrupt:\n if self.ser:\n self.ser.close()\n exit()\n\n def pause(self):\n self.ser.close()\n if self.paused.isSet():\n self.paused.clear()\n\n def resume(self):\n self.ser.open()\n if not self.paused.isSet():\n self.paused.set()\n\n def stop(self):\n '''\n Stop thread.\n '''\n self.running = False\n self.paused.set()\n if self.ser:\n self.ser.close()\n\n def begin(self):\n '''\n Initializate PySerial object\n '''\n try:\n self.ser = serial.Serial(\n port=self._port, baudrate=self._br, timeout=self.timeout,\n bytesize=self._bytesize, stopbits=self._stopbits\n )\n except SerialException:\n print('Fail to open default port.')\n self.ser = serial.Serial(\n baudrate=self._br, timeout=self.timeout)\n\n#==============================================================================\n# Attributes\n#==============================================================================\n\n @property\n def br(self):\n return self._br\n\n @br.setter\n def br(self, baudrate):\n self.ser.reset_input_buffer()\n if int(baudrate) in serial.Serial.BAUDRATES:\n self._br = baudrate\n self.ser.baudrate = baudrate\n\n self.emit_port_conf_change(self.port_config())\n\n @property\n def port(self):\n return self._port\n\n @port.setter\n def port(self, port):\n if self.ser and self.ser.isOpen():\n self.ser.close()\n\n if self._port != port:\n self._port = port\n else:\n return\n\n try:\n self.ser.port = port\n self.resume()\n except SerialException as e:\n self.emit_error('Can\\'t open this port: ' + str(port) + '.')\n print(e)\n self.ser.close()\n\n self.emit_port_conf_change(self.port_config())\n\n def get_queue(self):\n return self.queue\n\n def set_eol(self, index):\n if index < len(config['eol']) and index >= 0:\n self.eol = config['eol'][index]\n else:\n print('Can\\t set up this type of End Of Line. Because it\\'s not in'\n 'standart list.')\n\n def get_eol(self):\n return self.eol\n\n#==============================================================================\n# PySerial communication\n#==============================================================================\n def read(self, size=1):\n '''\n Read bytes from port. \n Args:\n size: integer specify number of bytes to read. Default is 1.\n Returns:\n String\n '''\n data = None\n\n try:\n if self.ser.isOpen():\n try:\n data = self.ser.read(size)\n except TypeError:\n print('Strange bug in the library.')\n else:\n print('Can\\'t read from the port. Port isn\\'t open.')\n except SerialException as e:\n print('Exception occured, while reading from serial port. ' \n + str(e))\n\n return data\n\n def readline(self):\n '''\n Read line from serial port. Read byte by byte until program get '\\n'\n symbol.\n Returns:\n String\n '''\n data = b''\n\n try:\n if self.ser.isOpen():\n sym = self.read()\n while sym != b'\\n' and sym and len(data) < 256:\n data += sym\n sym = self.read()\n else:\n print('Can\\'t read from the port. Port isn\\'t open.')\n\n except SerialException as e:\n print('Exception occured, while reading line from serial port.')\n\n # return data.decode('UTF-8')\n # return data.decode('ASCII')\n return data\n\n def write(self, data):\n '''\n Write data to serial port.\n Args:\n data: data to send\n '''\n try:\n if self.ser.isOpen():\n self.ser.write(\n bytes(data, 'ASCII') + \n bytes(self.get_eol(), 'ASCII')\n )\n else:\n print('Can\\'t write to the port. Port isn\\'t open.')\n except SerialExceptin as e:\n print('Exception occured, while writing to serial port.')\n print(e)\n\n#==============================================================================\n# Utils\n#==============================================================================\n \n def add_html_colors(self, string):\n '''\n Use predifiend dictionary to find regex at the string and add color HTML\n tags to them.\n Args:\n string: String to parse\n Returns:\n HTML parsed string.\n '''\n clr_set = {\n 0xA: '#0000AA',\n 0xD: '#00AA00'\n }\n\n i = 0\n line = list(string)\n result = list()\n for i, sym in enumerate(line):\n if ord(sym) in clr_set.keys():\n result.append(''.format(\n clr_set[ord(sym)]))\n result.append('{0:02x}'.format(ord(sym)).upper())\n result.append('')\n else:\n result.append('{0:02x}'.format(ord(sym)).upper())\n\n if (i + 1)%2 == 0:\n result.append(' ')\n\n return ''.join(result)\n\n\n def divide_text_in_blocks(self, string, length=4):\n \"\"\"\n Divide string into substring of the 'length'.\n Args:\n string: string to divide.\n Returns:\n Divided string.\n \"\"\"\n if length < 0:\n return None\n\n if len(string) < length:\n return string\n\n return ' '.join(string[i:i + length] for i in range(\n 0, len(string), length))\n\n\n def port_config(self):\n '''\n Generate port configuration dictinoary. (Used in view)\n Returns:\n Dictionary.\n '''\n return {'baudrate': self._br, 'num_of_bits': self._bytesize, \n 'parity': self._parity, 'num_of_stop': self._stopbits}\n\n#==============================================================================\n# Signals\n#==============================================================================\n\n def emit_error(self, value):\n self.error.emit(value)\n\n def emit_port_conf_change(self, value):\n self.port_conf_change.emit(value)\n\n","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":9600,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"193536252","text":"class NestedIterator(object):\n\n def __init__(self, nestedList):\n # Initialize your data structure here.\n self.ds = nestedList\n self.idx = 0\n self.stack = []\n self.find_next()\n\n def find_next(self):\n while ((self.idx < len(self.ds) and isinstance(self.ds[self.idx], list)) or\n (self.idx == len(self.ds) and self.stack)):\n if self.idx == len(self.ds):\n self.ds, self.idx = self.stack.pop()\n continue\n if self.idx < len(self.ds) - 1:\n self.stack.append((self.ds, self.idx+1))\n self.ds = self.ds[self.idx]\n self.idx = 0\n\n # @return {int} the next element in the iteration\n def next(self):\n # Write your code here\n ans = self.ds[self.idx]\n self.idx += 1\n if self.idx < len(self.ds) and isinstance(self.ds[self.idx], int):\n return ans\n self.find_next()\n return ans\n\n\n # @return {boolean} true if the iteration has more element or false\n def hasNext(self):\n # Write your code here\n return self.idx < len(self.ds)\n","sub_path":"data_structures/array/nested_list_iterator.py","file_name":"nested_list_iterator.py","file_ext":"py","file_size_in_byte":1136,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"76794430","text":"import math\nimport numpy as np\nimport createPower as cp\nimport pylab as plt\n\ndef moSin(frequency=50.0, amplitude=5.0, phase=1, timePerDetect=1e-04, duration=0.5):\n\n sinSignal=[]\n cycleTime=1.0/frequency\n freSin=float(2*math.pi*frequency)\n for detectNum in xrange(int(cycleTime/timePerDetect+0.5)):\n value=amplitude*math.sin(freSin*detectNum*timePerDetect+phase)\n # if value<0:\n # value=0\n sinSignal.append(value)\n\n # plt.figure()\n # plt.plot(range(int(cycleTime/timePerDetect+0.5)), sinSignal, \"oc-\")\n # plt.show()\n\n return sinSignal\n\ndef FM():\n selFre = 10000.0\n selNum = 1000.0\n\n # signal1 = moSin(50, 10.0)\n signal1 = [1]\n signal2 = moSin(500, 10.0)\n\n signalTot = []\n for detectTime in xrange(int(selNum)):\n signalTot.append( signal1[detectTime % len(signal1)] * signal2[detectTime % len(signal2)] )\n\n plt.figure()\n plt.plot(range(int(selNum)), signalTot, \"oc-\")\n plt.xlim((0, 500))\n plt.show()\n\n signalTot = np.array(signalTot)\n\n freqNums = [i * selFre / selNum for i in xrange(int(selNum/2+1))]\n\n ffts = [1]\n fftNums = np.fft.fft(signalTot)\n ffts[0] = np.abs(fftNums[0]) / selNum\n ffts.extend([np.abs(fftNum) / (selNum/2) for fftNum in fftNums[1 : ]])\n\n flt = plt.figure()\n flt = plt.subplot(111)\n\n for index, frequency in enumerate(freqNums):\n flt.plot([frequency, frequency], [0, ffts[index]], \"ok-\", linewidth = 2)\n\n plt.xlim((0, 500))\n\n plt.show()\n\nif __name__==\"__main__\":\n FM()","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"584048244","text":"from django.db import models\n\n\nclass Album(models.Model):\n name = models.TextField(\n blank=False,\n db_index=True,\n unique=True,\n help_text=\"Album name - can alternatively use 'id' field set to id of existing album when creating new lyrics\")\n\n objects = models.Manager()\n\n\nclass Song(models.Model):\n name = models.TextField(\n blank=False,\n db_index=True,\n unique=True,\n help_text=\"Song name - can alternatively use 'id' field set to id of existing song when creating new lyrics\")\n\n album = models.ForeignKey(\n Album,\n related_name='songs',\n null=True,\n on_delete=models.CASCADE,\n help_text=\"Album\")\n\n objects = models.Manager()\n\n\nclass Lyric(models.Model):\n text = models.TextField(\n blank=False,\n db_index=True,\n help_text=\"Lyrics from a song/album\")\n\n song = models.ForeignKey(\n Song,\n related_name='lyrics',\n null=True,\n on_delete=models.CASCADE,\n help_text=\"Song\")\n\n votes = models.IntegerField(\n default=0\n )\n\n objects = models.Manager()\n","sub_path":"lyrics_api/swift_lyrics/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1126,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"605189146","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nModule that contains theme implementation\n\"\"\"\n\nfrom __future__ import print_function, division, absolute_import\n\nimport os\n\nfrom Qt.QtCore import *\nfrom Qt.QtGui import *\n\nfrom six import string_types\n\nimport tpDcc\nfrom tpDcc.libs import qt\nfrom tpDcc.libs.python import yamlio, color, python\nfrom tpDcc.libs.qt.core import style, qtutils, cache, color as qt_color\n\n\nclass Theme(QObject, object):\n\n class Sizes(object):\n\n TINY = 18\n SMALL = 24\n MEDIUM = 32\n LARGE = 40\n HUGE = 48\n\n class Colors(object):\n\n BLUE = '#1890FF'\n PURPLE = '#722ED1'\n CYAN = '#13C2C2'\n GREEN = '#52C41A'\n MAGENTA = '#EB2F96'\n PINK = '#EF5B97'\n RED = '#F5222D'\n ORANGE = '#FA8C16'\n YELLOW = '#FADB14'\n VOLCANO = '#FA541C'\n GEEK_BLUE = '#2F54EB'\n LIME = '#A0D911'\n GOLD = '#FAAD14'\n\n updated = Signal()\n\n EXTENSION = 'yml'\n DEFAULT_ACCENT_COLOR = QColor(0, 175, 255)\n DEFAULT_SIZE = Sizes.SMALL\n\n def __init__(self, theme_file=None, accent_color=None, dark_mode=True):\n super(Theme, self).__init__()\n\n self._name = 'Default'\n self._style = 'default'\n self._file = theme_file\n self._dpi = 1\n self._background_color = None\n self._overrides = list()\n\n self._init_colors()\n self._init_sizes()\n self._init_font()\n\n accent_color = accent_color or self.Colors.BLUE\n if dark_mode:\n self.set_dark(accent_color)\n else:\n self.set_light(accent_color)\n\n self._init_icons()\n\n self.unit = 'px'\n self.default_size = self.small\n self.text_color_inverse = '#FFF'\n\n self._load_theme_data_from_file(theme_file)\n\n def __getattr__(self, item):\n options = self.options(skip_instance_attrs=True)\n if not options or item not in options:\n return super(Theme, self).__getattribute__(item)\n\n option_value = options[item]\n if isinstance(option_value, string_types):\n if option_value.startswith('^'):\n return qtutils.dpi_scale(int(option_value[1:]))\n if color.string_is_hex(option_value):\n return color.hex_to_rgb(option_value)\n else:\n return option_value\n\n def _load_theme_data_from_file(self, theme_file):\n \"\"\"\n Internal function that laods file data from given file\n :param theme_file: str\n :return: dict\n \"\"\"\n\n if not theme_file or not os.path.isfile(theme_file):\n return\n\n try:\n theme_data = yamlio.read_file(theme_file)\n except Exception:\n qt.logger.warning('Impossible to load theme data from file: \"{}\"!'.format(theme_file))\n return None\n\n theme_name = theme_data.get('name', None)\n if not theme_name:\n qt.logger.warning('Impossible to retrieve them name from theme file: \"{}\"!'.format(theme_file))\n return None\n accent_color = theme_data.get('accent_color', None)\n if not accent_color:\n qt.logger.warning('No theme color definitions found in theme file: \"{}\"'.format(theme_file))\n return None\n self._style = theme_data.get('style', 'default.css')\n self._overrides = theme_data.get('overrides', list())\n\n self.set_name(theme_name)\n self.set_accent_color(accent_color)\n\n def name(self):\n \"\"\"\n Returns the name for this theme\n :return: str\n \"\"\"\n\n return self._name\n\n def set_name(self, name):\n \"\"\"\n Sets the name for this theme\n :param name: str\n \"\"\"\n\n self._name = name\n\n def dpi(self):\n \"\"\"\n Returns zoom amount for this theme\n :return: float\n \"\"\"\n\n return self._dpi\n\n def set_dpi(self, dpi):\n \"\"\"\n Sets the zoom amount for this theme\n :param dpi: float\n \"\"\"\n\n self._dpi = dpi\n\n def set_accent_color(self, accent_color):\n \"\"\"\n Sets the main/accent color of the theme\n :param accent_color:\n \"\"\"\n\n self._update_accent_color(accent_color)\n self.updated.emit()\n\n def is_dark(self):\n \"\"\"\n Returns whether the current theme is dark or not\n :return: bool\n \"\"\"\n\n bg_color = qt_color.Color(self.background_color)\n\n red = bg_color.redF() * 0.299\n green = bg_color.greenF() * 0.587\n blue = bg_color.blueF() * 0.114\n\n darkness = red + green + blue\n if darkness < 0.6:\n return True\n\n return False\n\n def set_dark(self, accent_color):\n \"\"\"\n Sets the current theme to the default dark color\n \"\"\"\n\n self.background_color = '#323232'\n self.background_selected_color = '#292929'\n self.background_in_color = '#3A3A3A'\n self.background_out_color = '#494949'\n self.mask_color = self._fade_color(self.background_color, '90%')\n self.toast_color = '#555555'\n self.title_color = \"#FFFFFF\"\n self.primary_text_color = \"#D9D9D9\"\n self.secondary_text_color = \"#A6A6A6\"\n self.disable_color = \"#737373\"\n self.border_color = \"#1E1E1E\"\n self.divider_color = \"#262626\"\n self.header_color = \"#0A0A0A\"\n self.icon_color = \"#A6A6A6\"\n self.window_dragger_color = \"#232323\"\n self.window_dragger_label_color = \"#D9D9D9\"\n\n self.set_accent_color(accent_color)\n\n def set_light(self, accent_color):\n \"\"\"\n Sets the current theme to the default light color\n \"\"\"\n\n self.background_color = '#F8F8F9'\n self.background_selected_color = '#BFBFBF'\n self.background_in_color = '#FFFFFF'\n self.background_out_color = '#EEEEEE'\n self.mask_color = self._fade_color(self.background_color, '90%')\n self.toast_color = '#333333'\n self.title_color = \"#262626\"\n self.primary_text_color = \"#595959\"\n self.secondary_text_color = \"#8C8C8C\"\n self.disable_color = \"#E5E5E5\"\n self.border_color = \"#D9D9D9\"\n self.divider_color = \"#E8E8E8\"\n self.header_color = \"#FAFAFA\"\n self.icon_color = \"#8C8C8C\"\n self.window_dragger_color = \"#f2f2fd\"\n self.window_dragger_label_color = \"#262626\"\n\n self.set_accent_color(accent_color)\n\n def _init_colors(self):\n \"\"\"\n Internal function that initializes all theme colors\n \"\"\"\n\n self.info_color = self.Colors.BLUE\n self.success_color = self.Colors.GREEN\n self.processing_color = self.Colors.BLUE\n self.error_color = self.Colors.RED\n self.warning_color = self.Colors.GOLD\n\n self.info_1 = self._fade_color(self.info_color, '15%')\n self.info_2 = qt_color.generate_color(self.info_color, 2)\n self.info_3 = self._fade_color(self.info_color, '35%')\n self.info_4 = qt_color.generate_color(self.info_color, 4)\n self.info_5 = qt_color.generate_color(self.info_color, 5)\n self.info_6 = qt_color.generate_color(self.info_color, 6)\n self.info_7 = qt_color.generate_color(self.info_color, 7)\n self.info_8 = qt_color.generate_color(self.info_color, 8)\n self.info_9 = qt_color.generate_color(self.info_color, 9)\n self.info_10 = qt_color.generate_color(self.info_color, 10)\n\n self.success_1 = self._fade_color(self.success_color, '15%')\n self.success_2 = qt_color.generate_color(self.success_color, 2)\n self.success_3 = self._fade_color(self.success_color, '35%')\n self.success_4 = qt_color.generate_color(self.success_color, 4)\n self.success_5 = qt_color.generate_color(self.success_color, 5)\n self.success_6 = qt_color.generate_color(self.success_color, 6)\n self.success_7 = qt_color.generate_color(self.success_color, 7)\n self.success_8 = qt_color.generate_color(self.success_color, 8)\n self.success_9 = qt_color.generate_color(self.success_color, 9)\n self.success_10 = qt_color.generate_color(self.success_color, 10)\n\n self.warning_1 = self._fade_color(self.warning_color, '15%')\n self.warning_2 = qt_color.generate_color(self.warning_color, 2)\n self.warning_3 = self._fade_color(self.warning_color, '35%')\n self.warning_4 = qt_color.generate_color(self.warning_color, 4)\n self.warning_5 = qt_color.generate_color(self.warning_color, 5)\n self.warning_6 = qt_color.generate_color(self.warning_color, 6)\n self.warning_7 = qt_color.generate_color(self.warning_color, 7)\n self.warning_8 = qt_color.generate_color(self.warning_color, 8)\n self.warning_9 = qt_color.generate_color(self.warning_color, 9)\n self.warning_10 = qt_color.generate_color(self.warning_color, 10)\n\n self.error_1 = self._fade_color(self.error_color, '15%')\n self.error_2 = qt_color.generate_color(self.error_color, 2)\n self.error_3 = self._fade_color(self.error_color, '35%')\n self.error_4 = qt_color.generate_color(self.error_color, 4)\n self.error_5 = qt_color.generate_color(self.error_color, 5)\n self.error_6 = qt_color.generate_color(self.error_color, 6)\n self.error_7 = qt_color.generate_color(self.error_color, 7)\n self.error_8 = qt_color.generate_color(self.error_color, 8)\n self.error_9 = qt_color.generate_color(self.error_color, 9)\n self.error_10 = qt_color.generate_color(self.error_color, 10)\n\n def _init_sizes(self):\n \"\"\"\n Internal function that initializes all themes sizes\n \"\"\"\n\n self.border_radius_large = 8\n self.border_radius_base = 4\n self.border_radius_small = 2\n self.tiny = self.Sizes.TINY\n self.small = self.Sizes.SMALL\n self.medium = self.Sizes.MEDIUM\n self.large = self.Sizes.LARGE\n self.huge = self.Sizes.HUGE\n self.tiny_icon = self.tiny - 8\n self.small_icon = self.small - 10\n self.medium_icon = self.medium - 12\n self.large_icon = self.large - 16\n self.huge_icon = self.huge - 20\n self.window_dragger_rounded_corners = 5\n self.window_dragger_font_size = 12\n self.window_rounded_corners = 5\n self.button_padding = 4\n\n def _init_font(self):\n \"\"\"\n Internal function that initializes all theme fonts\n \"\"\"\n\n self.font_family = 'BlinkMacSystemFont,\"Segoe UI\",\"PingFang SC\",\"Hiragino Sans GB\",\"Microsoft YaHei\",' \\\n '\"Helvetica Neue\",Helvetica,Arial,sans-serif'\n self.font_size_base = 14\n self.font_size_large = self.font_size_base + 2\n self.font_size_small = self.font_size_base - 2\n self.h1_size = int(self.font_size_base * 2.71)\n self.h2_size = int(self.font_size_base * 2.12)\n self.h3_size = int(self.font_size_base * 1.71)\n self.h4_size = int(self.font_size_base * 1.41)\n\n def _init_icons(self):\n \"\"\"\n Internal function that initializes all theme icons\n \"\"\"\n\n self.radio_checked_icon = 'radio_button_checked.png'\n self.radio_unchecked_icon = 'radio_button_unchecked.png'\n self.up_icon = 'collapse.png'\n self.down_icon = 'expand.png'\n self.up_arrow_icon = 'up_arrow.png'\n self.down_arrow_icon = 'down_arrow.png'\n self.left_icon = 'back.png'\n self.right_icon = 'next.png'\n self.calendar_icon = 'calendar.png'\n self.check_icon = 'check.png'\n self.uncheck_icon = 'uncheck.png'\n self.circle_icon = 'circle.png'\n self.splitter_icon = 'splitter.png'\n\n def _update_accent_color(self, accent_color):\n accent_color = qt_color.convert_2_hex(accent_color)\n self.accent_color = accent_color\n self.accent_color_1 = qt_color.generate_color(accent_color, 1)\n self.accent_color_2 = qt_color.generate_color(accent_color, 2)\n self.accent_color_3 = qt_color.generate_color(accent_color, 3)\n self.accent_color_4 = qt_color.generate_color(accent_color, 4)\n self.accent_color_5 = qt_color.generate_color(accent_color, 5)\n self.accent_color_6 = qt_color.generate_color(accent_color, 6)\n self.accent_color_7 = qt_color.generate_color(accent_color, 7)\n self.accent_color_8 = qt_color.generate_color(accent_color, 8)\n self.accent_color_9 = qt_color.generate_color(accent_color, 9)\n self.accent_color_10 = qt_color.generate_color(accent_color, 10)\n self.item_hover_background_color = self.accent_color_1\n\n def _get_color(self, color_value, alpha=None):\n \"\"\"\n Internal function that returns a color value in proper format to be handled by theme\n :param color_value: variant, str or QColor or color.Color\n \"\"\"\n\n if isinstance(color_value, (str, unicode)):\n color_value = qt_color.Color.from_string(color_value)\n elif isinstance(color_value, QColor):\n color_value = qt_color.Color.from_color(color_value)\n elif isinstance(color_value, (list, tuple)):\n color_value = qt_color.Color(*color_value)\n\n return color_value\n\n def _fade_color(self, color, alpha):\n \"\"\"\n Internal function that fades given color\n :param color: QColor\n :param alpha: float\n :return:\n \"\"\"\n\n qt_color = QColor(color)\n return 'rgba({}, {}, {}, {})'.format(qt_color.red(), qt_color.green(), qt_color.blue(), alpha)\n\n def foreground_color(self):\n \"\"\"\n Returns the foreground color for this theme\n :return: color.Color\n \"\"\"\n\n if self.is_dark():\n return qt_color.Color(250, 250, 250, 255)\n else:\n return qt_color.Color(0, 40, 80, 180)\n\n # def icon_color(self):\n # \"\"\"\n # Returns the icon color for this theme\n # :return: color.Color\n # \"\"\"\n #\n # return self.foreground_color()\n #\n # def accent_foreground_color(self):\n # \"\"\"\n # Returns the foregound color for the accent color\n # \"\"\"\n #\n # return qt_color.Color(255, 255, 255, 255)\n #\n # def item_background_color(self):\n # \"\"\"\n # Returns the item background color\n # :return: color.Color\n # \"\"\"\n #\n # if self.is_dark():\n # return qt_color.Color(255, 255, 255, 20)\n # else:\n # return qt_color.Color(255, 255, 255, 120)\n #\n # def item_background_hover_color(self):\n # \"\"\"\n # Returns the item background color when the mouse hovers over the item\n # :return: color.Color\n # \"\"\"\n #\n # return qt_color.Color(255, 255, 255, 60)\n #\n # def settings(self):\n # \"\"\"\n # Returns a dictionary of settings for the current theme\n # :return: dict\n # \"\"\"\n #\n # return {\n # 'name': self.name(),\n # 'accentColor': self.accent_color().to_string(),\n # 'backgroundColor': self.background_color().to_string()\n # }\n\n def set_settings(self, settings):\n \"\"\"\n Sets a dictionary of settings for the current theme\n :param settings: dict\n \"\"\"\n\n for theme_sett_name, theme_sett_value in settings.items():\n if hasattr(self, theme_sett_name):\n setattr(self, theme_sett_name, theme_sett_value)\n\n def get_theme_option(self, option_name, default_value=None):\n \"\"\"\n Returns option of the style\n :return: object\n \"\"\"\n\n theme_options = self.options()\n if not theme_options:\n return default_value\n\n return theme_options[option_name] if option_name in theme_options else default_value\n\n def options(self, skip_instance_attrs=False):\n \"\"\"\n Returns the variables used to customize the style sheet\n :return: dict\n \"\"\"\n if self.is_dark():\n darkness = 'white'\n else:\n darkness = 'black'\n\n theme_resources_dir = ''\n if self._file and os.path.isfile(self._file):\n theme_dir = os.path.dirname(self._file)\n theme_name = os.path.splitext(os.path.basename(self._file))[0]\n theme_resources_dir = os.path.join(theme_dir, 'resources', theme_name)\n\n style_resources_dir = ''\n style_path = self.stylesheet_file()\n if style_path and os.path.isfile(style_path):\n style_dir = os.path.dirname(style_path)\n style_name = os.path.splitext(os.path.basename(style_path))[0]\n style_resources_dir = os.path.join(style_dir, 'resources', style_name)\n\n options = {\n 'darkness': darkness,\n 'theme_resources': theme_resources_dir,\n 'style_resources': style_resources_dir\n }\n\n if not skip_instance_attrs:\n inst_attrs = python.get_instance_user_attributes(self)\n for attr in inst_attrs:\n if isinstance(attr[1], QColor):\n options[attr[0]] = qt_color.Color(attr[1]).to_string()\n else:\n options[attr[0]] = str(attr[1])\n\n # options = {\n # \"ACCENT_COLOR\": accent_color.to_string(),\n # \"ACCENT_COLOR_DARKER\": qt_color.Color(accent_color.darker(150)).to_string(),\n # \"ACCENT_COLOR_LIGHTER\": qt_color.Color(accent_color.lighter(150)).to_string(),\n # \"ACCENT_COLOR_R\": str(accent_color.red()),\n # \"ACCENT_COLOR_G\": str(accent_color.green()),\n # \"ACCENT_COLOR_B\": str(accent_color.blue()),\n #\n # \"ACCENT_FOREGROUND_COLOR\": accent_foreground_color.to_string(),\n # \"ACCENT_FOREGROUND_COLOR_DARKER\": qt_color.Color(accent_foreground_color.darker(150)).to_string(),\n #\n # \"FOREGROUND_COLOR\": foreground_color.to_string(),\n # \"FOREGROUND_COLOR_R\": str(foreground_color.red()),\n # \"FOREGROUND_COLOR_G\": str(foreground_color.green()),\n # \"FOREGROUND_COLOR_B\": str(foreground_color.blue()),\n #\n # \"BACKGROUND_COLOR\": background_color.to_string(),\n # \"BACKGROUND_COLOR_LIGHTER\": qt_color.Color(background_color.lighter(150)).to_string(),\n # \"BACKGROUND_COLOR_DARKER\": qt_color.Color(background_color.darker(150)).to_string(),\n # \"BACKGROUND_COLOR_R\": str(background_color.red()),\n # \"BACKGROUND_COLOR_G\": str(background_color.green()),\n # \"BACKGROUND_COLOR_B\": str(background_color.blue()),\n #\n # \"ITEM_TEXT_COLOR\": foreground_color.to_string(),\n # \"ITEM_TEXT_SELECTED_COLOR\": accent_foreground_color.to_string(),\n #\n # \"ITEM_BACKGROUND_COLOR\": item_background_color.to_string(),\n # \"ITEM_BACKGROUND_HOVER_COLOR\": item_background_hover_color.to_string(),\n # \"ITEM_BACKGROUND_SELECTED_COLOR\": accent_color.to_string(),\n # }\n #\n overrides = self._overrides or dict()\n options.update(overrides)\n #\n return options\n\n def stylesheet_file(self):\n \"\"\"\n Returns path where theme stylesheet is located\n :return: str\n \"\"\"\n\n style_name = self._style or 'default'\n style_extension = style.StyleSheet.EXTENSION\n if not style_extension.startswith('.'):\n style_extension = '.{}'.format(style_extension)\n style_file_name = '{}{}'.format(style_name, style_extension)\n style_path = tpDcc.ResourcesMgr().get('styles', style_file_name)\n\n return style_path\n\n def stylesheet(self):\n \"\"\"\n Returns the style sheet for this theme\n :return: str\n \"\"\"\n\n style_path = self.stylesheet_file()\n options = self.options()\n\n stylesheet = style.StyleSheet.from_path(style_path, options=options, theme_name=self._name, dpi=self.dpi())\n\n return stylesheet.data()\n #\n # def create_color_dialog(self, parent, standard_colors=None, current_color=None):\n # \"\"\"\n # Creates a new instance of color dialog\n # :param parent: QWidget\n # :param standard_colors: list(int)\n # :param current_color: QColor\n # :return: QColorDialog\n # \"\"\"\n #\n # dlg = QColorDialog(parent)\n # if standard_colors:\n # index = -1\n # for r, g, b in standard_colors:\n # index += 1\n # clr = QColor(r, g, b).rgba()\n # try:\n # clr = QColor(clr)\n # dlg.setStandardColor(index, clr)\n # except Exception:\n # clr = QColor(clr).rgba()\n # dlg.setStandardColor(index, clr)\n #\n # # PySide2 does not supports d.open(), we pass a blank slot\n # dlg.open(self, Slot('blankSlot()'))\n #\n # if current_color:\n # dlg.setCurrentColor(current_color)\n #\n # return dlg\n #\n # def browse_accent_color(self, parent=None):\n # \"\"\"\n # Shows the color dialog for changing the accent color\n # :param parent: QWidget\n # \"\"\"\n #\n # standard_colors = [\n # (230, 60, 60), (210, 40, 40), (190, 20, 20), (250, 80, 130),\n # (230, 60, 110), (210, 40, 90), (255, 90, 40), (235, 70, 20),\n # (215, 50, 0), (240, 100, 170), (220, 80, 150), (200, 60, 130),\n # (255, 125, 100), (235, 105, 80), (215, 85, 60), (240, 200, 150),\n # (220, 180, 130), (200, 160, 110), (250, 200, 0), (230, 180, 0),\n # (210, 160, 0), (225, 200, 40), (205, 180, 20), (185, 160, 0),\n # (80, 200, 140), (60, 180, 120), (40, 160, 100), (80, 225, 120),\n # (60, 205, 100), (40, 185, 80), (50, 180, 240), (30, 160, 220),\n # (10, 140, 200), (100, 200, 245), (80, 180, 225), (60, 160, 205),\n # (130, 110, 240), (110, 90, 220), (90, 70, 200), (180, 160, 255),\n # (160, 140, 235), (140, 120, 215), (180, 110, 240), (160, 90, 220),\n # (140, 70, 200), (210, 110, 255), (190, 90, 235), (170, 70, 215)\n # ]\n #\n # current_color = self.accent_color()\n #\n # dialog = self.create_color_dialog(parent, standard_colors, current_color)\n # dialog.currentColorChanged.connect(self.set_accent_color)\n #\n # if dialog.exec_():\n # self.set_accent_color(dialog.selectedColor())\n # else:\n # self.set_accent_color(current_color)\n #\n # def browse_background_color(self, parent=None):\n # \"\"\"\n # Shows the color dialog for changing the background color\n # :param parent: QWidget\n # \"\"\"\n #\n # standard_colors = [\n # (0, 0, 0), (20, 20, 20), (40, 40, 40), (60, 60, 60),\n # (80, 80, 80), (100, 100, 100), (20, 20, 30), (40, 40, 50),\n # (60, 60, 70), (80, 80, 90), (100, 100, 110), (120, 120, 130),\n # (0, 30, 60), (20, 50, 80), (40, 70, 100), (60, 90, 120),\n # (80, 110, 140), (100, 130, 160), (0, 60, 60), (20, 80, 80),\n # (40, 100, 100), (60, 120, 120), (80, 140, 140), (100, 160, 160),\n # (0, 60, 30), (20, 80, 50), (40, 100, 70), (60, 120, 90),\n # (80, 140, 110), (100, 160, 130), (60, 0, 10), (80, 20, 30),\n # (100, 40, 50), (120, 60, 70), (140, 80, 90), (160, 100, 110),\n # (60, 0, 40), (80, 20, 60), (100, 40, 80), (120, 60, 100),\n # (140, 80, 120), (160, 100, 140), (40, 15, 5), (60, 35, 25),\n # (80, 55, 45), (100, 75, 65), (120, 95, 85), (140, 115, 105)\n # ]\n #\n # current_color = self.background_color()\n #\n # dialog = self.create_color_dialog(parent, standard_colors, current_color)\n # dialog.currentColorChanged.connect(self.set_background_color)\n #\n # if dialog.exec_():\n # self.set_background_color(dialog.selectedColor())\n # else:\n # self.set_background_color(current_color)\n\n\nThemeCache = cache.CacheResource(Theme)\n","sub_path":"tpDcc/libs/qt/core/theme.py","file_name":"theme.py","file_ext":"py","file_size_in_byte":24033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"111901568","text":"import time\nfrom firebase_admin import credentials\nfrom firebase_admin import db\nfrom firebase_admin import auth\nfrom firebase_admin import storage\nfrom firebase_admin import firestore\nimport paho.mqtt.client as mqtt\nfrom google.cloud import storage\nimport pyrebase\nimport threading\nimport os\nimport json\n\n\nPATH_CRED = '/Users/douglaskorgut/Desktop/TCC/newDevCourse/pythonScripts/cred.json'\nURL_DB = 'https://tccfirstattempt.firebaseio.com'\nURL_STORAGE = 'gs://tccfirstattempt.appspot.com'\nREF_HOME = 'tccfirstattempt'\nREF_VIDEOS = 'video_publicacoes'\nREF_IMAGES = 'imagens'\nREF_PUBLICACOES = 'publicacoes'\nREF_USER = 'user'\nPATH_TO_IMAGE= \"/Users/douglaskorgut/Desktop/TCC/newDevCourse/pythonScripts/FacialRecognitionProject/MqttProject/img3.png\"\n\n\nconfig = {\n \"apiKey\": \"AIzaSyCp0RIPqODI_Zxsrfu48Yt087XD8orxXWg\",\n \"authDomain\": \"tccfirstattempt.firebaseapp.com\",\n \"databaseURL\": \"https://tccfirstattempt.firebaseio.com\",\n \"projectId\": \"tccfirstattempt\",\n \"storageBucket\": \"tccfirstattempt.appspot.com\",\n \"messagingSenderId\": \"994487973781\",\n \"serviceAccount\": \"./cred.json\"\n}\n\nfirebase = pyrebase.initialize_app(config)\nstorage = firebase.storage()\ndatabase = firebase.database()\n\n\nclass FirebaseManager(threading.Thread):\n\n def __init__(self):\n super(FirebaseManager, self).__init__()\n\n\n def run(self):\n while True:\n time.sleep(0.05)\n\n\n @staticmethod\n def doPublishVideo():\n userEmail = \"ZG91Z2xhc2tvcmd1dHRAZ21haWwuY29t\"\n database.child(\"video_publicacoes\").child(userEmail).push({\"titulo\":\"newVideo\"})\n data = database.child(\"video_publicacoes/\" + userEmail).order_by_key().limit_to_last(1).get()\n print(data.val())\n print(data.key())\n\n\n for x in data.each():\n imageName = x.key()\n\n storage.child(\"videos/\"+imageName).put(\"output.mp4\")\n\n if os.path.exists(\"output.mp4\"):\n os.remove(\"output.mp4\")\n else:\n print(\"The file does not exist\")\n\n\n @staticmethod\n def doPublishImage():\n userEmail = \"ZG91Z2xhc2tvcmd1dHRAZ21haWwuY29t\"\n print(\"User email: \"+userEmail)\n database.child(\"publicacoes\").child(userEmail).push({\"titulo\":\"TituloAgora!!!!\"})\n data = database.child(\"publicacoes/\"+userEmail).order_by_key().limit_to_last(1).get()\n print(data.val())\n print(data.key())\n\n for x in data.each():\n imageName = x.key()\n\n storage.child(\"imagens/\"+str(imageName)).put(\"pictureTaken.png\")\n if os.path.exists(\"pictureTaken.png\"):\n os.remove(\"pictureTaken.png\")\n else:\n print(\"The file does not exist\")\n #doPublishImage()\n\n @staticmethod\n def downloadUsersPictures():\n macAddress = \"b8:27:eb:4d:f9:09\"\n users = []\n fbUsers = database.child(\"usuario_detalhe\").get()\n for fbUser in fbUsers.each():\n users.append(fbUser.key())\n #print(len(users))\n\n systemUsers = []\n for user in users:\n data = database.child(\"usuario_detalhe\").child(user).child(\"usuario\").child(\"mac_address\").get()\n if ( data.val() == macAddress ):\n systemUsers.append(user)\n\n storage.child(\"perfil_images/\"+\"-LPSnAc2on_8hzJ_g80J\").download(\"teste/IMAGE_DOWN.jpg\")\n print(\"downloaded\")\n\n # for systemUser in systemUsers:\n # #download all files from user using its reference, download must be made in this same loop\n # recognitionReferences = database.child(\"perfil_publicacoes\").child(systemUser).get()\n # for recognitionReference in recognitionReferences.each():\n # print(recognitionReference.key())\n # storage.child(\"perfil_images/\"+recognitionReference.key()).download(\"imageafd\")\n # for recognitionReference in data():\n # regonitionReferences.append(recognitionReference.val())\n # print(recognitionReference.val())\n","sub_path":"FacialRecognitionProject/TccFinal_working_1/firebase.py","file_name":"firebase.py","file_ext":"py","file_size_in_byte":3972,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"506532104","text":"#!/usr/bin/env python\n\n\n\"\"\"\nTHIS IS A SKELETON OF WHAT A NORMAL PYTHON SCRIPT SHOULD LOOK LIKE\n\"\"\"\n\nimport time, os, sys\nfrom datetime import datetime\nfrom optparse import OptionParser\nimport time\nimport Tutils\n\ndef main():\n usage_str = \"%prog\"\n parser = OptionParser(usage = usage_str)\n \n (options, args) = parser.parse_args()\n \n if len(args) < 0:\n parser.print_help()\n sys.exit(2)\n\n cur_time = time.time()\n yesterday_time = cur_time - (3600*7) - 86400 - (cur_time%86400)\n dt = Tutils.epoch2tme(yesterday_time, \"%Y%m%d\")\n\n output_file = \"C:\\\\Users\\\\Tom\\\\programming\\\\OP_basketball_games\\%s_nba_scores.csv\" % dt\n # RUN NBA SCRAPING\n cmd = \"python C:\\\\Users\\\\Tom\\\\programming\\\\scripts\\\\python\\\\scrape_nba_game_scores.py %s %s\" % (dt, output_file)\n ret = os.system(cmd)\n\n # Sleep for 1 minute\n time.sleep(60)\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"scripts/python/run_scrape_nba_game_scores.py","file_name":"run_scrape_nba_game_scores.py","file_ext":"py","file_size_in_byte":909,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"265816842","text":"from functools import reduce # for code_metric; map and filter do not need to be imported\n\ndef is_sorted(s):\n if s == []:\n return True\n if len(s) == 1:\n return True\n else:\n return(s[0] <= s[1] and is_sorted(s[1:]))\n\ndef merge (l1,l2):\n if not l1:\n return l2\n elif not l2:\n return l1\n else:\n if l1[0] <= l2[0]:\n return [l1[0]] + merge(l1[1:],l2)\n if l2[0] < l1[0]:\n return [l2[0]] + merge(l1,l2[1:])\n\ndef sort(l):\n l1 = l[0:len(l)//2]\n l2 = l[len(l)//2:]\n if len(l) > 1:\n x = sort(l1)\n y = sort(l2)\n else:\n if not l1:\n return merge(l1,l2)\n if not l2:\n return merge(l1,l2)\n if len(l1) == 1:\n return merge(l1,l2)\n if len(l2) == 1:\n return merge(l1,l2)\n return(merge(x,y))\n \ndef compare(a,b):\n if not a and not b:\n return '='\n if a and not b:\n return '>'\n if b and not a:\n return '<'\n if a[0] < b[0]:\n return '<'\n if a[0] > b[0]:\n return '>'\n elif a[0] == b[0]:\n return compare(a[1:],b[1:])\n\ndef code_metric(file):\n infile = open(file,'r').read().splitlines()\n filt = filter(lambda x: x != '', infile)\n m = map(lambda x: (1,len(x)), filt)\n r = reduce(lambda x,y: (x[0]+y[0],x[1]+y[1]), m)\n return r\n\n\nif __name__==\"__main__\":\n import predicate,random,driver\n from goody import irange\n \n print('\\nTesting is_sorted')\n print(is_sorted([]))\n print(is_sorted([1,2,3,4,5,6,7]))\n print(is_sorted([1,2,3,7,4,5,6]))\n print(is_sorted([1,2,3,4,5,6,5]))\n print(is_sorted([7,6,5,4,3,2,1]))\n \n print('\\nTesting merge')\n print(merge([],[]))\n print(merge([],[1,2,3]))\n print(merge([1,2,3],[]))\n print(merge([1,2,3,4],[5,6,7,8]))\n print(merge([5,6,7,8],[1,2,3,4]))\n print(merge([1,3,5,7],[2,4,6,8]))\n print(merge([2,4,6,8],[1,3,5,7]))\n print(merge([1,2,5,7,10],[1,2,6,10,12]))\n\n\n print('\\nTesting sort')\n print(sort([1,2,3,4,5,6,7]))\n print(sort([7,6,5,4,3,2,1]))\n print(sort([4,5,3,1,2,7,6]))\n print(sort([1,7,2,6,3,5,4]))\n l = list(range(20)) # List of values 0-19\n for i in range(10): # Sort 10 times\n random.shuffle(l)\n print(sort(l),sep='-->')\n \n \n print('\\nTesting compare')\n print(compare('',''))\n print(compare('','abc'))\n print(compare('abc',''))\n print(compare('abc','abc'))\n print(compare('bc','abc'))\n print(compare('abc','bc'))\n print(compare('aaaxc','aaabc'))\n print(compare('aaabc','aaaxc'))\n \n \n print('\\nTesting code_metric')\n print(code_metric('cmtest.py'))\n print(code_metric('collatz.py'))\n print(code_metric('q5solution.py')) # A function analyzing the file it is in\n","sub_path":"q5helper/q5solution.py","file_name":"q5solution.py","file_ext":"py","file_size_in_byte":2781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"102928831","text":"import os,random,shutil,json,re\nfrom hume_corpus import make_sgm_file,make_txt_file\n\ndef main():\n input_dir = \"/nfs/raid88/u10/users/hqiu_ad/raw_corpus/covid/chinese_news_vol2.extracted\"\n output_folder = \"/nfs/raid88/u10/users/hqiu_ad/raw_corpus/covid/chinese_news_vol2_sgms/\"\n shutil.rmtree(output_folder)\n os.makedirs(output_folder)\n booking_arr = list()\n sgm_path_list = list()\n breaking_point = 10000\n for file in os.listdir(input_dir):\n with open(os.path.join(input_dir,file)) as fp:\n for i in fp:\n i = i.strip()\n crawl_en = json.loads(i)\n # print(crawl_en)\n if \"extracted_text\" not in crawl_en:\n continue\n\n extracted_text = crawl_en[\"extracted_text\"]\n chars = re.findall(\"[\\u4e00-\\u9FFF]\", extracted_text)\n if len(chars) < 40:\n continue\n doc_uuid = crawl_en[\"BBN_docid\"]\n document_creation_time = crawl_en[\"BBN_website_creation_date\"]\n source_uri = crawl_en[\"url\"]\n author = crawl_en[\"BBN_website_name\"]\n make_sgm_file(extracted_text, doc_uuid, output_folder,\n \"NON_CDR\", source_uri,\n \"news_{}\".format(doc_uuid), \"CHS_NW_WM\", document_creation_time, author, booking_arr,sgm_path_list, breaking_point)\n make_txt_file(extracted_text, doc_uuid, output_folder)\n\n\n\n with open(os.path.join(output_folder, \"metadata.txt\"), 'w') as wfp:\n for i in booking_arr:\n wfp.write(\"{}\\n\".format(i))\n random.shuffle(sgm_path_list)\n with open(os.path.join(output_folder, 'sgms.list'), 'w') as wfp:\n for i in sgm_path_list:\n wfp.write(\"{}\\n\".format(i))\n with open(os.path.join(output_folder, 'txts.list'), 'w') as wfp:\n for i in sgm_path_list:\n wfp.write(\"{}\\n\".format(i))\n\nif __name__ == \"__main__\":\n main()","sub_path":"src/python/data_digestion/common_crawl_chinese_vol2.py","file_name":"common_crawl_chinese_vol2.py","file_ext":"py","file_size_in_byte":1995,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"436348632","text":"# -*- coding: UTF-8 -*-\n\nimport numpy as np # python 矩阵操作lib\n\n\nclass Simplex():\n def __init__(self):\n self._A = \"\" # 系数矩阵\n self._b = \"\" #\n self._c = '' # 约束\n self._B = '' # 基变量的下标集合\n self.row = 0 # 约束个数\n\n def run(self, filename):\n # 读取文件内容,文件结构前两行分别为 变量数 和 约束条件个数\n # 接下来是系数矩阵\n # 然后是b数组\n # 然后是约束条件c\n\n A = []\n b = []\n c = []\n with open(filename, 'r') as f:\n self.var = int(f.readline())\n self.row = int(f.readline())\n\n for i in range(self.row):\n x = [int(item) for item in f.readline().strip().split(' ')]\n A.append(x)\n b = [int(item) for item in f.readline().split(' ')]\n c = [int(item) for item in f.readline().split(' ')]\n\n self._A = np.array(A, dtype=float)\n self._b = np.array(b, dtype=float)\n self._c = np.array(c, dtype=float)\n\n (x, obj) = self.Simplex(self._A, self._b, self._c)\n self.print_result(x, obj)\n\n @staticmethod\n def print_result(x, obj):\n px = ['x[%d] = %f' % (i + 1, x[i]) for i in range(len(x))]\n print(','.join(px))\n print('objective value is : %f' % obj)\n\n # 添加人工变量得到一个初始解\n def InitializeSimplex(self, A, b):\n\n # 添加松弛变量\n slacks = np.eye(self.row)\n A = np.concatenate((A, slacks), axis=1)\n c = np.concatenate((np.zeros(self.var), np.ones(self.row)), axis=0)\n # c [0. 0. 0. 0. 0. 0. 1. 1. 1.]\n\n b_min, min_pos = (np.min(b), np.argmin(b)) # 得到最小bi\n\n # 将bi全部转化成正数(相当于高斯行变换)\n if b_min < 0:\n for i in range(self.row):\n if i != min_pos:\n A[i] = A[i] - A[min_pos]\n b[i] = b[i] - b[min_pos]\n A[min_pos] *= -1\n b[min_pos] *= -1\n\n # 松弛变量全部加入基,初始解为b\n new_B = [i + self.var for i in range(self.row)]\n # new_B [6, 7, 8]\n\n # 辅助方程的目标函数值\n obj = - np.sum(b)\n\n c = c - c[new_B].reshape(1, -1).dot(A)\n c = c[0]\n # c [ 8. -7. 3. -4. -1. -1. -2. 2. 2.]\n\n # 入基, 要求ce<0 entering basis\n e = np.argmin(c)\n\n while c[e] < 0:\n theta = []\n for i in range(len(b)):\n # b [ 3. 7. 15.]\n if A[i][e] > 0:\n theta.append(b[i] / A[i][e])\n else:\n theta.append(float(\"inf\"))\n\n l = np.argmin(np.array(theta))\n\n if theta[l] == float('inf'):\n print('unbounded')\n return False\n\n (new_B, A, b, c, obj) = self.PIVOT(new_B, A, b, c, obj, l, e)\n\n e = np.argmin(c)\n\n return new_B, A[:, 0:self.var], b\n\n # 算法入口\n def Simplex(self, A, b, c):\n\n (B, A, b) = self.InitializeSimplex(A, b)\n\n # 函数目标值 -cTB-1b\n obj = - np.dot(c[B], b)\n # -26\n\n # reshape(1, -1) 让数组c变成一行\n c = c - c[B].reshape(1, -1).dot(A)\n c = c[0]\n # c [-11. 0. 0. -10. -11. 0.]\n\n # entering basis\n e = np.argmin(c)\n\n # 如果不存在检验数小于0, 则返回\n while c[e] < 0:\n theta = []\n for i in range(len(b)):\n if A[i][e] > 0:\n theta.append(b[i] / A[i][e])\n else:\n theta.append(float(\"inf\"))\n\n l = np.argmin(np.array(theta))\n\n if theta[l] == float('inf'):\n print('unbounded')\n return False\n\n (B, A, b, c, obj) = self.PIVOT(B, A, b, c, obj, l, e)\n\n e = np.argmin(c)\n\n x = self._CalculateX(B, A, b, c)\n return x, - obj # 左上角是负的目标函数\n\n # 得到完整解\n def _CalculateX(self, B, A, b, c):\n x = np.zeros(self.var, dtype=float)\n x[B] = b\n return x\n\n # 基变换\n def PIVOT(self, B, A, b, c, z, l, e):\n # main element is a_le\n # l 出基\n # e 入基\n\n main_elem = A[l][e]\n # scaling the l-th line\n A[l] = A[l] / main_elem\n b[l] = b[l] / main_elem\n\n # 进行高斯消元\n for i in range(self.row):\n if i != l:\n b[i] = b[i] - A[i][e] * b[l]\n A[i] = A[i] - A[i][e] * A[l]\n\n # 更新目标值\n z -= b[l] * c[e]\n c -= c[e] * A[l]\n\n # change the basis\n B[l] = e\n\n return B, A, b, c, z\n\n\nif __name__ == \"__main__\":\n s = Simplex()\n s.run('dualPro.txt')\n","sub_path":"DualSimplex/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"456544475","text":"# Copyright 2016 Intel Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ------------------------------------------------------------------------------\n\nimport logging\nimport sys\n\nfrom journal import transaction\n\nfrom sawtooth.exceptions import InvalidTransactionError\n\nfrom mktplace.transactions import holding_update\nfrom mktplace.transactions import liability_update\nfrom mktplace.transactions import market_place_object_update\nfrom mktplace.transactions import participant_update\n\nlogger = logging.getLogger(__name__)\n\n\nclass SellOfferObject(market_place_object_update.MarketPlaceObject):\n ObjectTypeName = 'SellOffer'\n ExecutionStyle = ['Any', 'ExecuteOnce', 'ExecuteOncePerParticipant']\n\n @classmethod\n def is_valid_object(cls, store, objectid):\n obj = cls.get_valid_object(store, objectid)\n if not obj:\n return False\n\n if not participant_update.ParticipantObject.is_valid_object(\n store, obj.get('creator')):\n return False\n\n if not liability_update.LiabilityObject.is_valid_object(\n store, obj.get('input')):\n return False\n\n if not holding_update.HoldingObject.is_valid_object(store,\n obj.get('output')):\n return False\n\n if float(obj.get('ratio', 0)) <= 0:\n return False\n\n if obj.get('minimum') < 0 or obj.get('maximum') < 0:\n return False\n\n if obj.get('maximum') < obj.get('minimum'):\n return False\n\n if obj.get('execution') not in cls.ExecutionStyle:\n return False\n\n return True\n\n def __init__(self, objectid=None, minfo=None):\n if minfo is None:\n minfo = {}\n super(SellOfferObject, self).__init__(objectid, minfo)\n\n self.CreatorID = minfo.get('creator', '**UNKNOWN**')\n self.InputID = minfo.get('input', '**UNKNOWN**')\n self.OutputID = minfo.get('output', '**UNKNOWN**')\n self.Ratio = float(minfo.get('ratio', 0))\n self.Description = minfo.get('description', '')\n self.Name = minfo.get('name', '')\n self.Minimum = int(minfo.get('minimum', 0))\n self.Maximum = int(minfo.get('maximum', sys.maxint))\n self.Execution = minfo.get('execution', 'Any')\n self.ExecutionState = {'ParticipantList': []}\n\n def dump(self):\n result = super(SellOfferObject, self).dump()\n\n result['creator'] = self.CreatorID\n result['input'] = self.InputID\n result['output'] = self.OutputID\n result['ratio'] = float(self.Ratio)\n result['description'] = self.Description\n result['name'] = self.Name\n result['minimum'] = int(self.Minimum)\n result['maximum'] = int(self.Maximum)\n result['execution'] = self.Execution\n result['execution-state'] = self.ExecutionState\n\n return result\n\n\nclass Register(transaction.Update):\n UpdateType = 'RegisterSellOffer'\n ObjectType = SellOfferObject\n CreatorType = participant_update.ParticipantObject\n\n def __init__(self,\n update_type,\n input_id,\n output_id,\n creator_id=None,\n ratio=1,\n description=None,\n name=None,\n minimum=0,\n maximum=None,\n execution=None):\n super(Register, self).__init__(update_type)\n\n self._creator_id = creator_id or '**UNKNOWN**'\n self._input_id = input_id\n self._output_id = output_id\n self._ratio = ratio\n self._description = description or ''\n self._name = name or ''\n self._minimum = minimum\n self._maximum = maximum or sys.maxint\n self._execution = execution or 'Any'\n\n @property\n def References(self):\n return [self._creator_id, self._input_id, self._output_id]\n\n def check_valid(self, store, txn):\n if txn.Identifier in store:\n raise InvalidTransactionError(\"ObjectId already in store\")\n\n if not market_place_object_update.global_is_permitted(\n store, txn, self._creator_id, self.CreatorType):\n raise InvalidTransactionError(\n \"Creator address is different from txn.OriginatorID\")\n\n if not market_place_object_update.global_is_valid_name(\n store, self._name, self.ObjectType, self._creator_id):\n raise InvalidTransactionError(\n \"Name, {}, is not valid\".format(self._name))\n\n if not liability_update.LiabilityObject.is_valid_object(\n store, self._input_id):\n raise InvalidTransactionError(\n \"{} is not a Liability\".format(self._input_id))\n\n obj = liability_update.LiabilityObject.get_valid_object(store,\n self._input_id)\n if not self.CreatorType.is_valid_creator(store, obj.get('creator'),\n txn.OriginatorID):\n logger.info('%s does not have permission to modify liability %s',\n txn.OriginatorID, self._input_id)\n raise InvalidTransactionError(\n \"Txn.OriginatorID not allowed to modify liability\")\n\n if not holding_update.HoldingObject.is_valid_object(store,\n self._output_id):\n raise InvalidTransactionError(\n \"OutputId is not a valid Holding\")\n\n obj = holding_update.HoldingObject.get_valid_object(\n store, self._output_id)\n if not self.CreatorType.is_valid_creator(store, obj.get('creator'),\n txn.OriginatorID):\n logger.info('%s does not have permission to modify liability %s',\n txn.OriginatorID, self._output_id)\n raise InvalidTransactionError(\n \"Txn.OriginatorID does not have permission to modify \"\n \"liability\")\n\n if self._ratio <= 0:\n logger.debug('invalid ratio %s in offer %s', self._ratio,\n txn.Identifier)\n raise InvalidTransactionError(\n \"Ratio < 0\")\n\n if self._minimum < 0 or self._maximum < 0 or \\\n self._maximum < self._minimum:\n logger.debug('inconsistent range %s < %s in offer %s',\n self._minimum, self._maximum, txn.Identifier)\n raise InvalidTransactionError(\n \"Minimum and Maximum are inconsistent\")\n if self._execution not in SellOfferObject.ExecutionStyle:\n logger.debug('invalid execution style %s in offer %s',\n self._execution, txn.Identifier)\n raise InvalidTransactionError(\n \"Execution not a valid ExecutionStyle\")\n\n def apply(self, store, txn):\n pobj = self.ObjectType(txn.Identifier)\n\n pobj.CreatorID = self._creator_id\n pobj.InputID = self._input_id\n pobj.OutputID = self._output_id\n pobj.Ratio = float(self._ratio)\n pobj.Description = self._description\n pobj.Name = self._name\n pobj.Minimum = self._minimum\n pobj.Maximum = self._maximum\n pobj.Execution = self._execution\n\n store[txn.Identifier] = pobj.dump()\n\n\nclass Unregister(transaction.Update):\n UpdateType = 'UnregisterSellOffer'\n ObjectType = SellOfferObject\n CreatorType = participant_update.ParticipantObject\n\n def __init__(self,\n update_type,\n object_id,\n creator_id):\n super(Unregister, self).__init__(update_type)\n self._object_id = object_id\n self._creator_id = creator_id\n\n @property\n def References(self):\n return []\n\n def check_valid(self, store, txn):\n if not market_place_object_update.global_is_permitted(\n store,\n txn,\n self._creator_id,\n self.CreatorType):\n raise InvalidTransactionError(\n \"Creator Address not the same as txn.OriginatorID\"\n )\n\n def apply(self, store, txn):\n del store[self._object_id]\n\n\nclass UpdateDescription(market_place_object_update.UpdateDescription):\n UpdateType = 'UpdateSellOfferDescription'\n ObjectType = SellOfferObject\n CreatorType = participant_update.ParticipantObject\n\n\nclass UpdateName(market_place_object_update.UpdateName):\n UpdateType = 'UpdateSellOfferName'\n ObjectType = SellOfferObject\n CreatorType = participant_update.ParticipantObject\n","sub_path":"extensions/mktplace/mktplace/transactions/sell_offer_update.py","file_name":"sell_offer_update.py","file_ext":"py","file_size_in_byte":9150,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"489735139","text":"from task import Task\nfrom node import *\nfrom collections import deque\nimport math\nfrom constants import *\n\nclass Env:\n def __init__(self):\n # 系统设备状况\n self.transmitStore=TransmitStore\n self.mobileNum=MOBILE_NUM\n self.mecNum=MEC_NUM\n self.mobiles=list()\n self.mecs=list()\n for i in range(self.mobileNum):\n self.mobiles.append(Mobile(\"mobile\"+str(i),\"1.1.1.\"+str(i),{\"mem\":4}, 2))\n for i in range(self.mecNum):\n self.mecs.append(MEC(\"mec\"+str(i),\"1.1.2.\"+str(i), {\"mem\":16}, 3))\n # 时隙设定\n self.interval=1\n self.timeSlots=100\n self.currentTime=0\n # 信道带宽-MHz,信噪比-db\n self.bandwidth=20\n self.SNR=10\n # 执行功率 传输功率\n self.processPower=50\n self.transmitPower=20\n # 平衡系数\n self.W1=0.5\n self.W2=1-self.W1\n\n def reset(self):\n self.currentTime=0\n for mobile in self.mobiles:\n mobile.processQueue.clear()\n mobile.resourceAvil[\"mem\"]=4\n for mec in self.mecs:\n mec.processQueue.clear()\n mec.total_compdelay=0\n mec.resourceAvil[\"mem\"]=16\n self.transmitStore.storeQueue.clear()\n\n def create_tasks(self):\n tasks=list()\n for i in range(self.mobileNum):\n tasks.append(self.mobiles[i].createTask(self.currentTime))\n return tasks\n\n def createAllTasks(self):\n allTasks=list()\n for i in range(self.timeSlots):\n tasks=list()\n for j in range(self.mobileNum):\n tasks.append(self.mobiles[j].createTask(i))\n allTasks.append(tasks)\n return allTasks\n\n def step(self,tasks):\n for task in tasks:\n if task.offload_action==0:\n self.mobiles[int(task.node_from[-1])].updateByTime(task, self.currentTime, 'c', self.get_transpeed(), self.transmitStore.store_queue)\n else:\n self.mobiles[int(task.node_from[-1])].updateByTime(task, self.currentTime, 't', self.get_transpeed(), self.transmitStore.store_queue)\n self.mecs[task.offload_action-1].updateByTime(self.currentTime, self.transmitStore.store_queue)\n next_states=[]\n for mobile in self.mobiles:\n next_states.append([mobile.total_compdelay,mobile.total_trandelay,self.mecs[0].total_compdelay,self.mecs[1].total_compdelay])\n return next_states\n\n def end(self):\n if self.currentTime>self.timeSlots:\n return True\n return False\n\n def get_reward(self):\n total_delay=0\n total_consume=0\n for mobile in self.mobiles:\n total_delay=max(mobile.total_compdelay,total_delay)\n total_consume+= self.processPower * mobile.total_compdelay + self.transmitPower * mobile.total_trandelay\n for mec in self.mecs:\n total_delay=max(mec.total_compdelay,total_delay)\n total_consume+= self.processPower * mec.total_compdelay\n return -self.W1*total_delay-self.W2*total_consume\n\n def get_transpeed(self):\n return self.bandwidth*math.log(1+self.SNR,2)\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"rl/env1.py","file_name":"env1.py","file_ext":"py","file_size_in_byte":3197,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"180494390","text":"# -*- coding: utf-8 -*-\n# Copyright (C) 2018 by\n# Marta Grobelna \n# Petre Petrov \n# Rudi Floren \n# Tobias Winkler \n# All rights reserved.\n# BSD license.\n#\n# Authors: Marta Grobelna \n# Petre Petrov \n# Rudi Floren \n# Tobias Winkler \n\nimport pyboltzmann as pybo\n\nfrom planar_graph_sampler.combinatorial_classes.one_connected_graph import OneConnectedPlanarGraph\nfrom planar_graph_sampler.grammar.binary_tree_decomposition import EarlyRejectionControl\nfrom planar_graph_sampler.grammar.grammar_utils import underive\nfrom planar_graph_sampler.grammar.two_connected_decomposition import two_connected_graph_grammar\n\n\nclass Merger(pybo.DefaultBuilder):\n \"\"\"\n Merges a set of l-derived graphs at their marked vertices.\n \"\"\"\n def set(self, graphs):\n # Merge a set of l-derived one-connected planar graphs at their marked vertices.\n # If the set is empty, return a single-node graph.\n if len(graphs) is 0:\n g = OneConnectedPlanarGraph()\n return pybo.LDerivedClass(OneConnectedPlanarGraph(), g.half_edge)\n result = graphs.pop()\n for g in graphs:\n result.marked_atom.insert_all_after(g.marked_atom)\n assert isinstance(result, pybo.LDerivedClass)\n return result\n\n\ndef merge(prod):\n \"\"\"Merges l-derived one-connected graphs at their marked vertices\"\"\"\n # lhs is a bi-derived connected and rhs a derived connected.\n lhs = prod.first\n rhs = prod.second\n if not lhs.marked_atom.is_trivial:\n rhs.marked_atom.insert_all_after(lhs.marked_atom)\n return lhs\n\n\ndef subs_marked_vertex(decomp):\n # decomp is of form (G_1_dx + L * G_1_dx_dx) * G_2_dx_dx.\n\n if isinstance(decomp.first, pybo.LDerivedClass):\n one_connected = decomp.first\n plug_in_he = one_connected.marked_atom\n if not plug_in_he.is_trivial:\n decomp.second.base_class_object.marked_atom.insert_all_after(plug_in_he)\n else:\n one_connected = decomp.first.second\n plug_in_he = one_connected.marked_atom\n if not plug_in_he.is_trivial:\n decomp.second.base_class_object.marked_atom.insert_all_after(plug_in_he)\n decomp.second.base_class_object.marked_atom = one_connected.base_class_object.marked_atom\n return decomp.second\n\n\ndef subs_marked_vertex_2(decomp):\n # decomp is of form ((G_1_dx + L * G_1_dx_dx) * (G_1_dx + L * G_1_dx_dx)) * G_2_dx_dx_dx.\n if isinstance(decomp.first.first, pybo.LDerivedClass):\n plug_in_he1 = decomp.first.first.marked_atom\n else:\n plug_in_he1 = decomp.first.first.second.marked_atom\n if isinstance(decomp.first.second, pybo.LDerivedClass):\n plug_in_he2 = decomp.first.second.marked_atom\n else:\n plug_in_he2 = decomp.first.second.second.marked_atom\n if not plug_in_he1.is_trivial:\n decomp.second.base_class_object.base_class_object.marked_atom.insert_all_after(plug_in_he1)\n if not plug_in_he2.is_trivial:\n decomp.second.base_class_object.marked_atom.insert_all_after(plug_in_he2)\n return decomp.second\n\n\ndef rej_to_G_1(g):\n return pybo.bern(1 / (g.l_size + 1))\n\n\ndef one_connected_graph_grammar():\n \"\"\"Constructs the grammar for connected planar graphs.\n\n Returns\n -------\n DecompositionGrammar\n The grammar for sampling from G_1_dx and G_1_dx_dx.\n \"\"\"\n\n # Some shortcuts to make the grammar more readable.\n L = pybo.LAtomSampler\n Rule = pybo.AliasSampler\n G_2_dx = Rule('G_2_dx')\n G_2_dx_dx = Rule('G_2_dx_dx')\n G_2_dx_dx_dx = Rule('G_2_dx_dx_dx')\n G_1_dx = Rule('G_1_dx')\n G_1_dx_dx = Rule('G_1_dx_dx')\n G_1_dx_dx_dx = Rule('G_1_dx_dx_dx')\n Set = pybo.SetSampler\n LSubs = pybo.LSubsSampler\n Bij = pybo.BijectionSampler\n Rej = pybo.RejectionSampler\n\n grammar = pybo.DecompositionGrammar()\n grammar.rules = two_connected_graph_grammar().rules\n EarlyRejectionControl.grammar = grammar\n\n grammar.add_rules({\n\n 'G_1':\n Bij(\n Rej(\n G_1_dx,\n rej_to_G_1 # See lemma 15.\n ),\n underive\n ),\n\n 'G_1_dx':\n Set(\n 0,\n LSubs(\n G_2_dx,\n L() * G_1_dx\n )\n ),\n\n 'G_1_dx_dx':\n Bij(\n Bij(\n (G_1_dx + L() * G_1_dx_dx) * LSubs(G_2_dx_dx, L() * G_1_dx),\n subs_marked_vertex\n ) * G_1_dx,\n merge\n ),\n\n 'G_1_dx_dx_dx':\n Bij(\n Bij(\n (2 * G_1_dx_dx + L() * G_1_dx_dx_dx) * LSubs(G_2_dx_dx, L() * G_1_dx),\n subs_marked_vertex\n ) * G_1_dx,\n merge\n )\n\n + Bij(\n Bij(\n (G_1_dx + L() * G_1_dx_dx) ** 2 * LSubs(G_2_dx_dx_dx, L() * G_1_dx),\n subs_marked_vertex_2\n ) * G_1_dx,\n merge\n )\n\n + Bij(\n Bij(\n (G_1_dx + L() * G_1_dx_dx) * LSubs(G_2_dx_dx, L() * G_1_dx),\n subs_marked_vertex\n ) * G_1_dx_dx,\n merge\n ),\n\n })\n\n grammar.set_builder(['G_1_dx'], Merger())\n\n return grammar\n\n\nif __name__ == '__main__':\n from planar_graph_sampler.evaluations_planar_graph import *\n from timeit import default_timer as timer\n\n oracle = pybo.EvaluationOracle(my_evals_10)\n pybo.BoltzmannSamplerBase.oracle = oracle\n pybo.BoltzmannSamplerBase.debug_mode = False\n\n start = timer()\n grammar = one_connected_graph_grammar()\n symbolic_x = 'x'\n symbolic_y = 'y'\n sampled_class = 'G_1_dx_dx_dx'\n grammar.init(sampled_class, symbolic_x, symbolic_y)\n end = timer()\n print(\"Time init: {}\".format(end - start))\n\n try:\n print(\"expected avg. size: {}\\n\".format(oracle.get_expected_l_size(sampled_class, symbolic_x, symbolic_y)))\n except pybo.PyBoltzmannError:\n pass\n\n # random.seed(0)\n # boltzmann_framework_random_gen.seed(13)\n\n l_sizes = []\n i = 0\n samples = 100\n start = timer()\n while i < samples:\n obj = grammar.sample_iterative(sampled_class)\n l_sizes.append(obj.l_size)\n # print(obj.l_size)\n i += 1\n end = timer()\n print()\n print(\"avg. size: {}\".format(sum(l_sizes) / len(l_sizes)))\n print(\"time: {}\".format(end - start))\n\n # while True:\n # g = grammar.sample_iterative(sampled_class, symbolic_x, symbolic_y)\n # print(g)\n # for key, value in sorted(Stats.rules.items(), key=lambda x: x[1]):\n # print(\"{} : {}\".format(key, value))\n # print()\n # # if g.l_size >= 1000:\n # # g = g.underive_all()\n # # print(g)\n # # print(g.u_size / g.l_size)\n # # # assert g.is_consistent\n # # #g.plot(with_labels=False, use_planar_drawer=False, node_size=13)\n # # #plt.show()\n","sub_path":"planar_graph_sampler/grammar/one_connected_decomposition.py","file_name":"one_connected_decomposition.py","file_ext":"py","file_size_in_byte":7276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"9561078","text":"import discord, aiohttp\nimport config\n\nclass Chatbot:\n\n def __init__(self, bot):\n self.bot = bot\n\n async def message_handler(self, message):\n channel = message.channel\n content = message.content\n if message.author.bot:\n return\n\n if content.startswith(\"<@310039170792030211> \") or content.startswith(\"<@!310039170792030211> \"):\n commands = []\n for command in self.bot.commands:\n commands.append(command.name)\n hascommand = 0\n for command in commands:\n if str(message.content[22:]).startswith(command) or str(message.content[23:]).startswith(command):\n hascommand += 1\n if hascommand >= 1:\n return\n await channel.trigger_typing()\n async with aiohttp.ClientSession(headers={\"Authorization\": config.chatbot}) as cs:\n terms = str(message.content[22:]).replace(\" \", \"%20\")\n async with cs.get(f'https://api.dialogflow.com/v1/query?v=20150910&lang=en&query={terms}&sessionId=0') as r:\n res = await r.json()\n await channel.send(embed=discord.Embed(color=0xDEADBF, description=res['result']['fulfillment']['messages'][0]['speech']))\n\ndef setup(bot):\n n = Chatbot(bot)\n bot.add_listener(n.message_handler, \"on_message\")\n bot.add_cog(n)","sub_path":"old/unused/chatbot.py","file_name":"chatbot.py","file_ext":"py","file_size_in_byte":1394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"621126255","text":"\"\"\"\nSequence Of GET's\n+++++++++++++++++\n\nSend two SNMP GET requests in a row using the following options:\n\n* with SNMPv3, user 'usr-md5-none', MD5 authentication, no privacy\n* over IPv4/UDP\n* to an Agent at demo.snmplabs.com:161\n* for IF-MIB::ifInOctets.1 and IF-MIB::ifOutOctets.1 MIB objects\n\nUse a queue of MIB objects to query.\n\nThe next() call is used to forward Python iterator to the position where it\ncould consume input\n\nFunctionally similar to:\n\n| $ snmpget -v3 -l authNoPriv -u usr-md5-none -A authkey1 demo.snmplabs.com \\\n| IF-MIB::ifInOctets.1\n\n\"\"\"#\nfrom pysnmp.hlapi import *\n\nqueue = [ [ ObjectType(ObjectIdentity('IF-MIB', 'ifInOctets', 1)) ],\n [ ObjectType(ObjectIdentity('IF-MIB', 'ifOutOctets', 1)) ] ]\n\niter = getCmd(SnmpEngine(),\n UsmUserData('usr-md5-none', 'authkey1'),\n UdpTransportTarget(('demo.snmplabs.com', 161)),\n ContextData())\n\nnext(iter)\n\nwhile queue:\n errorIndication, errorStatus, errorIndex, varBinds = iter.send(queue.pop())\n if errorIndication:\n print(errorIndication)\n elif errorStatus:\n print('%s at %s' % (\n errorStatus.prettyPrint(),\n errorIndex and varBinds[int(errorIndex)-1][0] or '?'\n )\n )\n else:\n for varBind in varBinds:\n print(' = '.join([ x.prettyPrint() for x in varBind ]))\n","sub_path":"examples/hlapi/asyncore/sync/manager/cmdgen/multiple-get-calls.py","file_name":"multiple-get-calls.py","file_ext":"py","file_size_in_byte":1372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"646684442","text":"# -*- coding: utf-8 -*-\n\nimport os\nfrom xmls import Xmls\nfrom datetimes import Datetimes\nfrom logs import Logs\n\nREDENVELOPE_CONFIGURATION_FILE_PATH = \"project/configuration.xml\"\n# REDENVELOPE_NAME = \"root\"\nREDENVELOPE_NAME = \"redenvelope\"\nPURCHASE_NAME = \"purchase\"\n\nREDENVELOPE_ATTRIBUTE_FACTOR = \"factor\"\nREDENVELOPE_ATTRIBUTE_THRESHOLD = \"person_threshold\"\nREDENVELOPE_DEVICE_BONUS_PATH = \"device/bonus\"\nREDENVELOPE_EXTRA_MAX_PATH = \"extra/max\"\nREDENVELOPE_EXTRA_MIN_PATH = \"extra/min\"\nREDENVELOPE_EXTRA_POSSIBILITY_PATH = \"extra/possibility\"\nREDENVELOPE_EXTRA_THRESHOLD_PATH = \"extra/threshold\"\nREDENVELOPE_EXTRA_KEEP_PATH = \"extra/keep\"\nREDENVELOPE_RAIN_MAX_PATH = \"rain/max\"\nREDENVELOPE_RAIN_MIN_PATH = \"rain/min\"\nREDENVELOPE_RAIN_POSSIBILITY_PATH = \"rain/possibility\"\nREDENVELOPE_RAIN_THRESHOLD_PATH = \"rain/threshold\"\nREDENVELOPE_RAIN_START_DATE_PATH = \"rain/start_date\"\nREDENVELOPE_RAIN_END_DATE_PATH = \"rain/end_date\"\nREDENVELOPE_RAIN_WEEK_PATH = \"rain/week\"\nREDENVELOPE_RAIN_START_TIME_PATH = \"rain/start_time\"\nREDENVELOPE_RAIN_END_TIME_PATH = \"rain/end_time\"\nREDENVELOPE_RAIN_COUNT_PATH = \"rain/count\"\n\nPURCHASE_URL = \"url\"\n\nTIME_FORMAT = \"%H:%M:%S\"\nDATE_FORMAT = \"%Y:%m:%d\"\n\n\nclass RedEnvelopeConfiguration(object):\n modified_time = None\n text = None\n root = None\n RED_ENVELOPE_FACTOR = None\n RED_ENVELOPE_PERSON_THRESHOLD = None\n RED_ENVELOPE_BY_DEVICE = None\n RED_ENVELOPE_EXTRA_POSSIBILITY = None\n RED_ENVELOPE_EXTRA_THRESHOLD = None\n RED_ENVELOPE_EXTRA_MIN = None\n RED_ENVELOPE_EXTRA_MAX = None\n RED_ENVELOPE_EXTRA_KEEP = None\n RED_ENVELOPE_RAIN_POSSIBILITY = None\n RED_ENVELOPE_RAIN_THRESHOLD = None\n RED_ENVELOPE_RAIN_MIN = None\n RED_ENVELOPE_RAIN_MAX = None\n RED_ENVELOPE_RAIN_START_STR = None\n RED_ENVELOPE_RAIN_START = None\n RED_ENVELOPE_RAIN_END_STR = None\n RED_ENVELOPE_RAIN_END = None\n RED_ENVELOPE_RAIN_COUNT = None\n RED_ENVELOPE_RAIN_WEEK = []\n RED_ENVELOPE_RAIN_WEEK_STR = \"\"\n RED_ENVELOPE_RAIN_START_DATE = \"\"\n RED_ENVELOPE_RAIN_START_DATE_STR = \"\"\n RED_ENVELOPE_RAIN_END_DATE = \"\"\n RED_ENVELOPE_RAIN_END_DATE_STR = \"\"\n PURCHASE_URL = \"\"\n\n @classmethod\n def get_file_text(cls):\n \"\"\"\n 查看文件是否被改变, 如果改变了,重新读取;否则用原来的数据\n \"\"\"\n modified_time = os.path.getmtime(REDENVELOPE_CONFIGURATION_FILE_PATH)\n # Logs.print_log(\"modified_time\", modified_time)\n if cls.modified_time is not None and cls.modified_time == modified_time:\n return cls.text\n else:\n cls.modified_time = modified_time\n cls.text = Xmls.get_text(REDENVELOPE_CONFIGURATION_FILE_PATH)\n\n @classmethod\n def get_file_root(cls):\n cls.get_file_text()\n cls.root = Xmls.get_root(cls.text)\n\n @classmethod\n def set_red_envelope_configuration(cls):\n try:\n cls.get_file_root()\n redenvelope = Xmls.get_all_sub_nodes(cls.root, REDENVELOPE_NAME)[0]\n cls.RED_ENVELOPE_FACTOR = int(Xmls.get_node_attribute_value(redenvelope, REDENVELOPE_ATTRIBUTE_FACTOR))\n cls.RED_ENVELOPE_PERSON_THRESHOLD = float(\n Xmls.get_node_attribute_value(redenvelope, REDENVELOPE_ATTRIBUTE_THRESHOLD))\n cls.RED_ENVELOPE_BY_DEVICE = float(Xmls.get_some_node_text(redenvelope, REDENVELOPE_DEVICE_BONUS_PATH))\n\n cls.RED_ENVELOPE_EXTRA_POSSIBILITY = float(\n Xmls.get_some_node_text(redenvelope, REDENVELOPE_EXTRA_POSSIBILITY_PATH))\n cls.RED_ENVELOPE_EXTRA_THRESHOLD = float(\n Xmls.get_some_node_text(redenvelope, REDENVELOPE_EXTRA_THRESHOLD_PATH))\n cls.RED_ENVELOPE_EXTRA_MIN = float(Xmls.get_some_node_text(redenvelope, REDENVELOPE_EXTRA_MIN_PATH))\n cls.RED_ENVELOPE_EXTRA_MAX = float(Xmls.get_some_node_text(redenvelope, REDENVELOPE_EXTRA_MAX_PATH))\n cls.RED_ENVELOPE_EXTRA_KEEP = float(Xmls.get_some_node_text(redenvelope, REDENVELOPE_EXTRA_KEEP_PATH))\n cls.RED_ENVELOPE_RAIN_POSSIBILITY = float(\n Xmls.get_some_node_text(redenvelope, REDENVELOPE_RAIN_POSSIBILITY_PATH))\n cls.RED_ENVELOPE_RAIN_THRESHOLD = float(\n Xmls.get_some_node_text(redenvelope, REDENVELOPE_RAIN_THRESHOLD_PATH))\n cls.RED_ENVELOPE_RAIN_MIN = float(Xmls.get_some_node_text(redenvelope, REDENVELOPE_RAIN_MIN_PATH))\n cls.RED_ENVELOPE_RAIN_MAX = float(Xmls.get_some_node_text(redenvelope, REDENVELOPE_RAIN_MAX_PATH))\n # time_format = \"%H:%M:%S\"\n cls.RED_ENVELOPE_RAIN_START_STR = Xmls.get_some_node_text(redenvelope, REDENVELOPE_RAIN_START_TIME_PATH)\n cls.RED_ENVELOPE_RAIN_START = Datetimes.string_to_time(\n Xmls.get_some_node_text(redenvelope, REDENVELOPE_RAIN_START_TIME_PATH))\n cls.RED_ENVELOPE_RAIN_END_STR = Xmls.get_some_node_text(redenvelope, REDENVELOPE_RAIN_END_TIME_PATH)\n cls.RED_ENVELOPE_RAIN_END = Datetimes.string_to_time(\n Xmls.get_some_node_text(redenvelope, REDENVELOPE_RAIN_END_TIME_PATH))\n cls.RED_ENVELOPE_RAIN_COUNT = int(Xmls.get_some_node_text(redenvelope, REDENVELOPE_RAIN_COUNT_PATH))\n\n week = Xmls.get_some_node_text(redenvelope, REDENVELOPE_RAIN_WEEK_PATH)\n cls.RED_ENVELOPE_RAIN_WEEK_STR = week\n if week:\n cls.RED_ENVELOPE_RAIN_WEEK = week.split(\",\")\n else:\n cls.RED_ENVELOPE_RAIN_WEEK = []\n cls.RED_ENVELOPE_RAIN_WEEK_STR = \"\"\n # date_format = \"%Y:%m:%d\"\n start_date = Xmls.get_some_node_text(redenvelope, REDENVELOPE_RAIN_START_DATE_PATH)\n cls.RED_ENVELOPE_RAIN_START_DATE_STR = start_date\n if start_date:\n cls.RED_ENVELOPE_RAIN_START_DATE = Datetimes.string_to_date(start_date)\n else:\n cls.RED_ENVELOPE_RAIN_START_DATE = \"\"\n cls.RED_ENVELOPE_RAIN_START_DATE_STR = \"\"\n end_date = Xmls.get_some_node_text(redenvelope, REDENVELOPE_RAIN_END_DATE_PATH)\n cls.RED_ENVELOPE_RAIN_END_DATE_STR = end_date\n if end_date:\n cls.RED_ENVELOPE_RAIN_END_DATE = Datetimes.string_to_date(end_date)\n else:\n cls.RED_ENVELOPE_RAIN_END_DATE = \"\"\n cls.RED_ENVELOPE_RAIN_END_DATE_STR = \"\"\n\n purchase = Xmls.get_all_sub_nodes(cls.root, PURCHASE_NAME)[0]\n url = Xmls.get_some_node_text(purchase, PURCHASE_URL)\n if url:\n cls.PURCHASE_URL = url\n else:\n cls.PURCHASE_URL = \"\"\n except Exception as ex:\n Logs.print_current_function_name_and_line_number(ex)\n return False\n return True\n\n @classmethod\n def set_node_attribute(cls, root, path, key, value):\n try:\n node = Xmls.get_the_first_node(root, path)\n Xmls.set_node_attribute(node, key, value)\n return True\n except Exception as ex:\n Logs.print_current_function_name_and_line_number(ex)\n return False\n\n @classmethod\n def set_node_text(cls, root, path, text):\n try:\n node = Xmls.get_the_first_node(root, path)\n Xmls.set_node_text(node, text)\n return True\n except Exception as ex:\n Logs.print_current_function_name_and_line_number(ex)\n return False\n\n @classmethod\n def modify_configuration(cls, new_items):\n root = Xmls.parse_xml(REDENVELOPE_CONFIGURATION_FILE_PATH)\n if root:\n pass\n else:\n return False\n for k, v in new_items.iteritems():\n try:\n if k == \"factor\":\n cls.set_node_attribute(root, REDENVELOPE_NAME, k, v)\n elif k == \"person_threshold\":\n cls.set_node_attribute(root, REDENVELOPE_NAME, k, v)\n\n elif k == \"device_bonus\":\n cls.set_node_text(root, REDENVELOPE_DEVICE_BONUS_PATH, v)\n\n elif k == \"extra_possibility\":\n cls.set_node_text(root, REDENVELOPE_EXTRA_POSSIBILITY_PATH, v)\n elif k == \"extra_threshold\":\n cls.set_node_text(root, REDENVELOPE_EXTRA_THRESHOLD_PATH, v)\n elif k == \"extra_max\":\n cls.set_node_text(root, REDENVELOPE_EXTRA_MAX_PATH, v)\n elif k == \"extra_min\":\n cls.set_node_text(root, REDENVELOPE_EXTRA_MIN_PATH, v)\n elif k == \"extra_keep\":\n cls.set_node_text(root, REDENVELOPE_EXTRA_KEEP_PATH, v)\n\n elif k == \"rain_possibility\":\n cls.set_node_text(root, REDENVELOPE_RAIN_POSSIBILITY_PATH, v)\n elif k == \"rain_threshold\":\n cls.set_node_text(root, REDENVELOPE_RAIN_THRESHOLD_PATH, v)\n elif k == \"rain_max\":\n cls.set_node_text(root, REDENVELOPE_RAIN_MAX_PATH, v)\n elif k == \"rain_min\":\n cls.set_node_text(root, REDENVELOPE_RAIN_MIN_PATH, v)\n\n elif k == \"rain_start\":\n cls.set_node_text(root, REDENVELOPE_RAIN_START_TIME_PATH, v)\n elif k == \"rain_end\":\n cls.set_node_text(root, REDENVELOPE_RAIN_END_TIME_PATH, v)\n elif k == \"rain_count\":\n cls.set_node_text(root, REDENVELOPE_RAIN_COUNT_PATH, v)\n\n elif k == \"rain_week\":\n cls.set_node_text(root, REDENVELOPE_RAIN_WEEK_PATH, v)\n elif k == \"rain_start_date\":\n cls.set_node_text(root, REDENVELOPE_RAIN_START_DATE_PATH, v)\n elif k == \"rain_end_date\":\n cls.set_node_text(root, REDENVELOPE_RAIN_END_DATE_PATH, v)\n elif k == \"purchase_url\":\n cls.set_node_text(root, PURCHASE_URL, v)\n except Exception as ex:\n Logs.print_current_function_name_and_line_number(ex)\n\n root.write(REDENVELOPE_CONFIGURATION_FILE_PATH)\n return True\n\n @classmethod\n def get_configuration(cls):\n cls.set_red_envelope_configuration()\n redenvelope = dict()\n redenvelope[\"factor\"] = cls.RED_ENVELOPE_FACTOR\n redenvelope[\"person_threshold\"] = cls.RED_ENVELOPE_PERSON_THRESHOLD\n redenvelope[\"device_bonus\"] = cls.RED_ENVELOPE_BY_DEVICE\n redenvelope[\"extra_possibility\"] = cls.RED_ENVELOPE_EXTRA_POSSIBILITY\n redenvelope[\"extra_threshold\"] = cls.RED_ENVELOPE_EXTRA_THRESHOLD\n redenvelope[\"extra_min\"] = cls.RED_ENVELOPE_EXTRA_MIN\n redenvelope[\"extra_max\"] = cls.RED_ENVELOPE_EXTRA_MAX\n redenvelope[\"extra_keep\"] = cls.RED_ENVELOPE_EXTRA_KEEP\n redenvelope[\"rain_possibility\"] = cls.RED_ENVELOPE_RAIN_POSSIBILITY\n redenvelope[\"rain_threshold\"] = cls.RED_ENVELOPE_RAIN_THRESHOLD\n redenvelope[\"rain_min\"] = cls.RED_ENVELOPE_RAIN_MIN\n redenvelope[\"rain_max\"] = cls.RED_ENVELOPE_RAIN_MAX\n redenvelope[\"rain_start\"] = cls.RED_ENVELOPE_RAIN_START_STR\n redenvelope[\"rain_end\"] = cls.RED_ENVELOPE_RAIN_END_STR\n redenvelope[\"rain_count\"] = cls.RED_ENVELOPE_RAIN_COUNT\n redenvelope[\"rain_week\"] = cls.RED_ENVELOPE_RAIN_WEEK_STR\n redenvelope[\"rain_start_date\"] = cls.RED_ENVELOPE_RAIN_START_DATE_STR\n redenvelope[\"rain_end_date\"] = cls.RED_ENVELOPE_RAIN_END_DATE_STR\n redenvelope[\"purchase_url\"] = cls.PURCHASE_URL\n return redenvelope\n\n @classmethod\n def print_configuration(cls):\n pass\n # print cls.RED_ENVELOPE_FACTOR\n # print cls.RED_ENVELOPE_PERSON_THRESHOLD\n # print cls.RED_ENVELOPE_BY_DEVICE\n # print cls.RED_ENVELOPE_EXTRA_POSSIBILITY\n # print cls.RED_ENVELOPE_EXTRA_THRESHOLD\n # print cls.RED_ENVELOPE_EXTRA_MIN\n # print cls.RED_ENVELOPE_EXTRA_MAX\n # print cls.RED_ENVELOPE_EXTRA_KEEP\n # print cls.RED_ENVELOPE_RAIN_POSSIBILITY\n # print cls.RED_ENVELOPE_RAIN_THRESHOLD\n # print cls.RED_ENVELOPE_RAIN_MIN\n # print cls.RED_ENVELOPE_RAIN_MAX\n # print cls.RED_ENVELOPE_RAIN_START\n # print cls.RED_ENVELOPE_RAIN_END\n # print cls.RED_ENVELOPE_RAIN_COUNT\n # print cls.RED_ENVELOPE_RAIN_WEEK\n # print cls.RED_ENVELOPE_RAIN_START_DATE\n # print cls.RED_ENVELOPE_RAIN_END_DATE\n # print cls.PURCHASE_URL\n","sub_path":"xiuxiu_api/server/project/eye/common/red_envelope_configuration.py","file_name":"red_envelope_configuration.py","file_ext":"py","file_size_in_byte":12457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"227045723","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport django_extensions.db.fields.json\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('contenttypes', '0002_remove_content_type_name'),\n ('data_importer', '0002_auto_20151125_1354'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='CrossValidationResult',\n fields=[\n ('id', models.AutoField(verbose_name='ID', auto_created=True, serialize=False, primary_key=True)),\n ('created', models.DateTimeField(verbose_name='date created', auto_now_add=True)),\n ('modified', models.DateTimeField(verbose_name='last modified', auto_now=True)),\n ('object_pk', models.IntegerField(db_index=True)),\n ('diff', django_extensions.db.fields.json.JSONField()),\n ('errors', django_extensions.db.fields.json.JSONField()),\n ('content_type', models.ForeignKey(to='contenttypes.ContentType')),\n ('old', models.ForeignKey(to='data_importer.ImportedObjects', null=True)),\n ],\n options={\n 'ordering': ('-modified', '-created'),\n 'abstract': False,\n },\n ),\n migrations.CreateModel(\n name='CrossValidationRun',\n fields=[\n ('id', models.AutoField(verbose_name='ID', auto_created=True, serialize=False, primary_key=True)),\n ('created', models.DateTimeField(verbose_name='date created', auto_now_add=True)),\n ('modified', models.DateTimeField(verbose_name='last modified', auto_now=True)),\n ('checked_count', models.PositiveIntegerField(default=0)),\n ('invalid_count', models.PositiveIntegerField(default=0)),\n ('valid_count', models.PositiveIntegerField(default=0)),\n ],\n options={\n 'ordering': ('-modified', '-created'),\n 'abstract': False,\n },\n ),\n migrations.AddField(\n model_name='crossvalidationresult',\n name='run',\n field=models.ForeignKey(to='cross_validator.CrossValidationRun'),\n ),\n ]\n","sub_path":"src/ralph/cross_validator/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":2270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"151420415","text":"import pandas as pd\r\nimport numpy as np\r\nimport re\r\nimport pickle\r\n\r\nbankdata = pd.read_csv('D:\\\\data_science _data _set\\\\Loan_prediction\\\\final_source_data\\\\train.csv')\r\n\r\n#Train data\r\n#bankdata6.shape #o/p:(104999, 27)\r\ntrndt1 = bankdata.copy()\r\n#trndt1.shape #(104999, 27)\r\ntrndt1.head(10)\r\n\r\n#EDA for Train DATA\r\ntrndt1.shape #(104999, 27)\r\n#Found no duplicates in the dataframe\r\nany(trndt1.duplicated()) #o/p:False\r\n#Found duplicate records in all the columns of the dataframe\r\n#any(trndt1['Name'].duplicated()) #o/p:True\r\n#any(trndt1['ChgOffPrinGr'].duplicated()) #o/p:True\r\n#Number of missing/null values in the data\r\ntrndt1.isnull().sum()\r\n#Observed Null values in the following columns with their count:\r\n#Bank=111,BankState=112,RevLineCr=14,ChgOffDate=76722,DisbursementDate=156,MIS_Status=615\r\n\r\n#Removing null values from Train data\r\ntrndt1 = trndt1.dropna(axis=0,subset = ['Name','City','State','Bank','BankState','RevLineCr','DisbursementDate','MIS_Status'])\r\n#trndt1.isnull().sum() #o/p:only in ChgOffDate column:76035 na values are present now\r\n#After removing na values\r\n#trndt1.shape #o/p:(104145, 27)\r\ntrndt1.head(1)\r\n\r\n#Dropping unnecessary columns from Train Dataset\r\n#Dropping 'Unnamed: 0' column(as it is unnecessary for our analysis),and 'ChgOffDate' column as it has more than 75% missing data,from the Dataframe\r\ntrndt1.drop(columns = ['Unnamed: 0','ChgOffDate'], axis=1, inplace=True)\r\ntrndt1.shape #o/p:(104145, 25)\r\n\r\n#Train Data\r\n#converting MIS_Status column to numeric\r\n#trndt1.MIS_Status.value_counts()\r\n#o/p:P I F 76962\r\n#CHGOFF 27183\r\n#Name: MIS_Status, dtype: int64\r\ntrndt1['MIS_Status'] = trndt1['MIS_Status'].apply(lambda x: 1 if x == 'CHGOFF' else 0)\r\n#trndt1.MIS_Status.value_counts()\r\n#o/p:1 76962\r\n#0 27183\r\n#Name: MIS_Status, dtype: int64\r\ntrndt1.head(1)\r\n\r\n#Train Data\r\ntrndt1['MIS_Status'].value_counts(normalize=True)\r\ntrndt1['MIS_Status'].value_counts()\r\n#1 0.261011\r\n#Name: MIS_Status, dtype: float64\r\n\r\n#From Train Data removing the dollar sign and converting the following columns to numeric \r\ntrndt1[['DisbursementGross','BalanceGross','ChgOffPrinGr','GrAppv','SBA_Appv']] = trndt1[['DisbursementGross','BalanceGross','ChgOffPrinGr','GrAppv','SBA_Appv']].replace('[\\$,]','',regex=True).astype(float)\r\n#trndt1.head(1)\r\ntrndt1[['DisbursementGross','BalanceGross','ChgOffPrinGr','GrAppv','SBA_Appv']] = trndt1[['DisbursementGross','BalanceGross','ChgOffPrinGr','GrAppv','SBA_Appv']].astype(int)\r\ntrndt1.head(1)\r\n\r\n\r\n#In Train Data,converting the ApprovalDate column to numeric\r\ntrndt1['Approval_Date'],trndt1['Approval_Month'],trndt1['Approval_Year']=trndt1['ApprovalDate'].str.split('-',2).str\r\n#trndt1[['ApprovalDate','Approval_Date','Approval_Month','Approval_Year']].head(1)\r\n#converting the month names to month numbers in Approval Date column\r\nmonth_numbers = {'Jan': '01', 'Feb': '02', 'Mar': '03', 'Apr': '04', 'May': '05', 'Jun': '06', 'Jul': '07','Aug': '08','Sep': '09','Oct': '10','Nov': '11','Dec': '12'}\r\nfor k, v in month_numbers.items(): \r\n trndt1['Approval_Month'] = trndt1['Approval_Month'].replace(k, v)\r\n#trndt1['Approval_Month'].head(1)\r\n#concatenating all the 3 columns which are in numeric form as a single New column and checking it's datatype\r\ntrndt1['Approval_NewDate'] = trndt1[['Approval_Date','Approval_Month','Approval_Year']].apply(lambda x: ''.join(x),axis=1)\r\n#trndt1[['ApprovalDate','Approval_NewDate']].head(1)\r\n#trndt1['Approval_NewDate'].dtypes #o/p:dtype('O')\r\n#converting the newly created column from object(string) to int and checking it's datatype\r\ntrndt1['Approval_Date'] = trndt1['Approval_Date'].astype(int)\r\n#trndt1['Approval_Date'].dtypes\r\ntrndt1['Approval_Month'] = trndt1['Approval_Month'].astype(int)\r\n#trndt1['Approval_Month'].dtypes\r\ntrndt1['Approval_Year'] = trndt1['Approval_Year'].astype(int)\r\n#trndt1['Approval_Year'].dtypes\r\ntrndt1['Approval_NewDate'] = trndt1['Approval_NewDate'].astype(int)\r\ntrndt1['Approval_NewDate'].dtypes #o/p:dtype('int64')\r\n\r\n#In Train Data,converting the DisbursementDate column to numeric\r\ntrndt1['DisbursementDate'] = trndt1['DisbursementDate'].replace(0, np.NaN)\r\ntrndt1['Disbursement_Date'],trndt1['Disbursement_Month'],trndt1['Disbursement_Year']=trndt1['DisbursementDate'].str.split('-',2).str\r\n#trndt1[['DisbursementDate','Disbursement_Date','Disbursement_Month','Disbursement_Year']].head(1)\r\n#converting the month names to month numbers in Disbursement Date column\r\nmonth_numbers = {'Jan': '01', 'Feb': '02', 'Mar': '03', 'Apr': '04', 'May': '05', 'Jun': '06', 'Jul': '07','Aug': '08','Sep': '09','Oct': '10','Nov': '11','Dec': '12'}\r\nfor k, v in month_numbers.items(): \r\n trndt1['Disbursement_Month'] = trndt1['Disbursement_Month'].replace(k, v)\r\n#trndt1['Disbursement_Month'].head(1)\r\n#concatenating all the 3 columns which are in numeric form as a single New column and checking it's datatype\r\ntrndt1['Disbursement_NewDate'] = trndt1[['Disbursement_Date','Disbursement_Month','Disbursement_Year']].apply(lambda x: ''.join(x),axis=1)\r\n#trndt1[['DisbursementDate','Disbursement_NewDate']].head(1)\r\n#o/p:DisbursementDate\tDisbursement_Date1\r\n#0\t 31-Jul-98\t 310798\r\ntrndt1['Disbursement_NewDate'].dtypes #o/p:dtype('O')\r\n#converting the newly created column from object(string) to int and checking it's datatype\r\ntrndt1['Disbursement_Date'] = trndt1['Disbursement_Date'].astype(int)\r\n#trndt1['Disbursement_Date'].dtypes \r\ntrndt1['Disbursement_Month'] = trndt1['Disbursement_Month'].astype(int)\r\n#trndt1['Disbursement_Month'].dtypes\r\ntrndt1['Disbursement_Year'] = trndt1['Disbursement_Year'].astype(int)\r\n#trnd1['Disbursement_Year'].dtypes \r\ntrndt1['Disbursement_NewDate'] = trndt1['Disbursement_NewDate'].astype(int)\r\ntrndt1['Disbursement_NewDate'].dtypes #o/p:dtype('int64')\r\n\r\n\r\n\r\n#Train Data\r\ntrndt1.shape #o/p:(104145, 33)\r\n#trndt1.dtypes\r\n#trndt1.info() #o/p:dtypes: int64(23), object(9)\r\n\r\n#listendata woe and iv article coding\r\n#After removing null values and removing a column('chgoffdate') as it has more than 75% missing data\r\n#After converting $ columns,ApprovalDate and DisbursementDate columns to numeric \r\n##checking for woe and iv \r\ndef iv18_woe18(data, target, bins=10, show_woe=False):\r\n \r\n #Empty Dataframe\r\n newDF64,newDF65,newDF66,newDF67,woeivDF18 = pd.DataFrame(), pd.DataFrame(), pd.DataFrame(), pd.DataFrame(), pd.DataFrame()\r\n \r\n #Extract Column Names\r\n cols = data.columns\r\n \r\n #Run WOE and IV on all the independent variables\r\n for ivars in cols[~cols.isin([target])]:\r\n if (data[ivars].dtype.kind in 'bifc') and (len(np.unique(data[ivars]))>10):\r\n binned_x = pd.qcut(data[ivars], bins, duplicates='drop')\r\n d0 = pd.DataFrame({'x': binned_x, 'y': data[target]})\r\n else:\r\n d0 = pd.DataFrame({'x': data[ivars], 'y': data[target]})\r\n d = d0.groupby(\"x\", as_index=False).agg({\"y\": [\"count\", \"sum\"]})\r\n d.columns = ['Cutoff', 'N', 'Events']\r\n d['% of Events'] = np.maximum(d['Events'], 0.5) / d['Events'].sum()\r\n d['Non-Events'] = d['N'] - d['Events']\r\n d['% of Non-Events'] = np.maximum(d['Non-Events'], 0.5) / d['Non-Events'].sum()\r\n d['WoE_18'] = np.log(d['% of Events']/d['% of Non-Events'])\r\n d['IV_18'] = d['WoE_18'] * (d['% of Events'] - d['% of Non-Events'])\r\n d.insert(loc=0, column='Variable', value=ivars)\r\n print(\"Information value of \" + ivars + \" is \" + str(round(d['IV_18'].sum(),6)))\r\n print(\"Weight Of Evidence of \" + ivars + \" is \" + str(round(d['WoE_18'].sum(),6)))\r\n temp_9 =pd.DataFrame({\"Variable\" : [ivars], \"IV_18\" : [d['IV_18'].sum()]}, columns = [\"Variable\", \"IV_18\"])\r\n tempWOE_18 =pd.DataFrame({\"variable\" : [ivars], \"WoE_18\" : [d['WoE_18'].sum()]}, columns = [\"variable\", \"WoE_18\"])\r\n tempIV_18 =pd.DataFrame({\"variable\" : [ivars], \"IV_18\" : [d['IV_18'].sum()]}, columns = [\"variable\", \"IV_18\"]) \r\n tempIVWOE_18 =pd.DataFrame({\"variable\" : [ivars], \"IV_18\" : [d['IV_18'].sum()], \"WoE_18\" : [d['WoE_18'].sum()]}, columns = [\"variable\", \"IV_18\", \"WoE_18\"])\r\n\r\n newDF64=pd.concat([newDF64,temp_9], axis=0)\r\n newDF65=pd.concat([newDF65,tempWOE_18], axis=0)\r\n newDF66=pd.concat([newDF66,tempIV_18], axis=0)\r\n newDF67=pd.concat([newDF67,tempIVWOE_18], axis=0)\r\n woeivDF18=pd.concat([woeivDF18,d], axis=0)\r\n ##newDF3=pd.concat([newDF3,tempWOE], axis=0)\r\n #woeivDF1=pd.concat([woeivDF1,d], axis=0)\r\n\r\n #Show WOE Table\r\n if show_woe == True:\r\n print(d)\r\n return newDF64,newDF65,newDF66,newDF67,woeivDF18\r\n\r\nfinal_18,woe_18,iv_18,IvWoe_18,woeiv_18 = iv18_woe18(data = trndt1, target = 'MIS_Status', bins=10, show_woe = True)\r\nfinal_18\r\n#print(woe_18)\r\n#print(iv_18)\r\n#print(IvWoe_18)\r\n#print(woeiv_18)\r\n\r\ntype(final_18) #o/p:pandas.core.frame.DataFrame\r\n\r\n#Weight of Evidence(WOE):\r\n#11-columns with +ve WOE:Name,City,DisbursementDate,ApprovalDate,ChgOffPrinGr,RetainedJob,RevLineCr,CCSC,BalanceGross,CreateJob,Disbursement_Month.\r\n#21-columns with _ve WOE:Approval_NewDate,Approval_Date,Approval_Month,Zip,Disbursement_NewDate,Disbursement_Date,DisbursementGross,NoEmp,FranschiseCode,\r\n#UrbanRural,ApprovalFY,Term,GrAppv,SBA_Appv,Approval_Year,NewExist,LowDoc,Disbursement_Year,State,BankState,Bank.\r\nwoe_18.sort_values(by='WoE_18',ascending=False)\r\n\r\n#INFORMATION VALUE(IV):\r\n##Rules related to Information Value\r\n#Information Value\t Variable Predictiveness columns\r\n#Less than 0.02\t Not useful for prediction BalanceGross,Approval_NewDate,Approval_Date,Disbursement_Month,NewExist,FranchiseCode,Approval_Month,Disbursement_Date(8)\r\n#0.02 to 0.1\t Weak predictive Power CreateJob,Disbursement_NewDate,Zip,NoEmp,State,DisbursementGross,CCSC(7)\r\n#0.1 to 0.3\t Medium predictive Power LowDoc,GrAppv,RetainedJob,SBA_Appv,RevLineCr(5)\r\n#0.3 to 0.5\t Strong predictive Power BankState,UrbanRural,Approval_Year,ApprovalFY,Disbursement_Year(5)\r\n#>0.5\t Suspicious Predictive Power City,Bank,ApprovalDate,DisbursementDate,Name,Term,ChgOffPrinGr(7)\r\n\r\n##predictors\r\n#not considering:notuseful(8)+weak(7) = Total = 15\r\n#considering:medium(5)+strong(5) = Total = 10\r\n#if we consider:suspicious(7) = Total = 10+7 = 17\r\n#or else(medium+strong=5+5=10)\r\niv_18.sort_values(by='IV_18',ascending=False)\r\n\r\nIvWoe_18.sort_values(by='WoE_18',ascending=False)\r\n\r\nIvWoe_18.sort_values(by='IV_18',ascending=False)\r\n\r\nwoeiv_18 #o/p:120074 rows × 9 columns\r\n\r\n#Train Data\r\ntrndt2 = trndt1.copy()\r\ntrndt2.shape #o/p:(104145, 33)\r\n\r\n##Based on Information Value Interpretation choosen the following columns for model building\r\n#Information Value Variable Predictiveness Columns\r\n#0.1 to 0.3\t Medium predictive Power LowDoc,GrAppv,RetainedJob,SBA_Appv,RevLineCr(5)\r\n#0.3 to 0.5\t Strong predictive Power BankState,UrbanRural,Approval_Year,ApprovalFY,Disbursement_Year(5)\r\n\r\n#In Train Data,converting the categorical columns 'BankState','RevLineCr','LowDoc' into numeric by using label encoder\r\ntrndt1.BankState.value_counts()\r\ntrndt1.BankState.value_counts().count() #o/p:52\r\n\r\n#Train Data\r\nfrom sklearn.preprocessing import LabelEncoder\r\nle_0 = LabelEncoder()\r\n# Encode labels in column 'BankState'\r\n## instantiate an encoder - here we use labelencoder()\r\ntrndt1[\"BankState\"] = trndt1[\"BankState\"].astype(str)\r\ntrndt1['BankState_code']= le_0.fit_transform(trndt1['BankState'])\r\ntrndt1[['BankState_code','BankState']]\r\ntrndt1.head(1)\r\n\r\n#Train Data\r\ntrndt1[\"RevLineCr\"].value_counts()\r\n#trndt1[\"RevLineCr\"].value_counts().count() #o/p:7\r\n\r\n#Train Data\r\n#from sklearn.preprocessing import LabelEncoder\r\nle_1 = LabelEncoder()\r\n# Encode labels in column 'RevLineCr'\r\n## instantiate an encoder - here we use labelencoder()\r\ntrndt1[\"RevLineCr\"] = trndt1[\"RevLineCr\"].astype(str)\r\ntrndt1['RevLineCr_code']= le_1.fit_transform(trndt1['RevLineCr'])\r\ntrndt1.head(1)\r\n\r\n#Train Data\r\ntrndt1[\"LowDoc\"].value_counts()\r\n#trndt1[\"LowDoc\"].value_counts().count() #o/p:4\r\n\r\n#Train Data\r\n#from sklearn.preprocessing import LabelEncoder\r\nle_2 = LabelEncoder()\r\n# Encode labels in column 'LowDoc'\r\n## instantiate an encoder - here we use labelencoder()\r\ntrndt1[\"LowDoc\"] = trndt1[\"LowDoc\"].astype(str)\r\ntrndt1['LowDoc_code']= le_2.fit_transform(trndt1['LowDoc'])\r\ntrndt1.head(1)\r\n\r\n\r\n#Train Data\r\n#Scaling the numerical columns before building the model\r\nfrom sklearn import preprocessing\r\nscaler = preprocessing.MinMaxScaler()\r\ntrndt1[['GrAppv', 'SBA_Appv']] = scaler.fit_transform(trndt1[['GrAppv', 'SBA_Appv']])\r\ntrndt1.head(2)\r\n\r\n#Train Data\r\n#After scaling the numerical variable columns save as copy\r\ntrndt4 = trndt1.copy()\r\ntrndt4.shape #o/p:(104145, 36)\r\n#trndt4.info() \r\n#The scaled 2-numerical variables have a data type:float \r\n#o/p:dtypes: float64(2), int64(25), object(9)\r\n\r\n#Train Data\r\n#selecting the columns based on Information Value\r\n#medium predictors-LowDoc,GrAppv,RetainedJob,SBA_Appv,RevLineCr\r\n#strong predictors-BankState,UrbanRural,Approval_Year,ApprovalFY,Disbursement_Year\r\ntrndt1.head(1)\r\n\r\n#Train Data\r\n#Remove the columns as they are not required for analysis \r\ntrndt1.drop(trndt1.columns[[0, 1, 2, 3, 4, 5, 6, 7, 9, 10, 11, 12, 14, 16, 17, 18, 19, 20, 22, 25, 26, 28, 29, 30, 32]], axis = 1, inplace = True)\r\n\r\n#Train Data\r\n#After retaining the medium and strong predictive columns based on information value\r\ntrndt1.head(1)\r\n\r\n#Train Data\r\n#trndt1.shape #o/p:(104145, 11)\r\n#trndt1.info() #o/p:dtypes: float64(2), int64(9)\r\ntrndt5 = trndt1.copy()\r\ntrndt5.shape #o/p:(104145, 11)\r\n#trndt5.info() #o/p:dtypes: float64(2), int64(9)\r\n\r\n#trndt4.shape #(104145, 36)\r\n#testdt4.shape # (44900, 35)\r\ntrndt4.info() #o/p:dtypes: float64(2), int64(25), object(9)\r\n\r\n\r\n# Separate input features (X) and target variable (y)\r\nX_train_Features = trndt1.drop('MIS_Status', axis=1)\r\ny_train_label = trndt1.MIS_Status\r\n\r\n# import SMOTE module from imblearn library\r\nfrom imblearn.over_sampling import SMOTE \r\nsm = SMOTE(random_state = 2) \r\nX_train_Features_sm_1, y_train_label_sm_1 = sm.fit_sample(X_train_Features, y_train_label.ravel()) \r\n\r\nX_train_Features_sm_1.columns\r\n\r\n#Decision Tree Model\r\nfrom sklearn.tree import DecisionTreeClassifier\r\nfrom sklearn.metrics import accuracy_score,precision_score,recall_score,classification_report,confusion_matrix,roc_auc_score,roc_curve,auc\r\n# Create DecisionTree model\r\nclf_DT = DecisionTreeClassifier()\r\nclf_DT.fit(X_train_Features_sm_1, y_train_label_sm_1)\r\n# Train model and make predictions\r\npred_y_1 = clf_DT.predict(X_train_Features_sm_1)\r\nconfusion_matrix(y_train_label_sm_1,pred_y_1)\r\n\r\n\r\nprint(\"classification_report_DT:\\n\", classification_report(y_train_label_sm_1,pred_y_1))\r\n\r\n\r\nprint(\"AUC&ROC_DT:\", roc_auc_score(y_train_label_sm_1,pred_y_1 ))\r\n#o/p:AUC&ROC_DT: 0.8110519204972385\r\n\r\n# Saving model to disk\r\n\r\npickle.dump(clf_DT, open('model.pkl','wb'))\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"model_train.py","file_name":"model_train.py","file_ext":"py","file_size_in_byte":15137,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"476348972","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport xlrd\nimport csv\nimport os\n\nfrom datetime import date, datetime\n\n# In[23]:\n\ndef open_excel(file):\n\tprint(file.title())\n\tif file.title().startswith('Excel_Data/~$'):\n\t\t# print('Wrong')\n\t\treturn ['Wrong']\n\twb = xlrd.open_workbook(filename=file)\n\t# print(wb.sheet_names())\n\tsheets = wb.sheets()\n\t# sheet = wb.sheet_by_index(idx)\n\t# print(sheet)\n\n\t# print(sheet.row_values(0))\n\treturn sheets\n\n# In[88]:\ndef create_csv(sheet, prefix, prev_path):\n\tappend_csv_flag = False\n\tfor i in range(0, sheet.nrows):\n\t\ttitle = sheet.row_values(i)[0]\n\t\t# print(title)\n\t\t# print(sheet.row_values(i+2))\n\t\tpath = ''\n\t\tif title is not None and title.startswith('University'):\n\t\t\tpath = title\n\t\t\tif path == 'University of California, San Diego Survey of Parking Space Occupancy Levels':\n\t\t\t\tpath = sheet.row_values(i+1)[0].replace(':','_').replace(' ', '_')\n\t\t\t\t# print(path)\n\t\t\telse:\n\t\t\t\tpath = path.split(\"University of California, San Diego Survey of Parking Space Occupancy Levels \")[1].replace(':','_').replace(' ', '_')\n\t\t\t# print(path)\n\t\t\tbreak\n\tif path != '':\n\t\tpath = prefix + path + \".csv\"\n\t\tprev_path = path\n\t\t# print(path + \"--------------------------------\")\n\telse:\n\t\tpath = prev_path\n\t\tappend_csv_flag = True\n\t\t# print(path + \"--------------------------------\")\n\twith open(path, 'a', newline='') as f:\n\t\tcsv_write = csv.writer(f)\n\t\tcsv_head = [\"year\", \"quarter\", \"parking_spaces\", \"8am\", \"9am\", \"10am\", \"11am\", \"12pm\", \"1pm\", \"2pm\", \"3pm\",\n\t\t\t\t\t\"4pm\", \"5pm\", \"peak_empty_spaces\", \"peak_occupied_spaces\", \"%_occupied\"]\n\t\tif not append_csv_flag:\n\t\t\tcsv_write.writerow(csv_head)\n\t\tfor i in range(sheet.nrows):\n\t\t\ts = sheet.row_values(i)\n\t\t\tstart_word = s[2] if isinstance(s[2], str) and s[1] == '' else s[1]\n\t\t\t# print(start_word)\n\t\t\tif start_word.startswith('Summer') and len(start_word) > 15:\n\t\t\t\thandleCornerCase(csv_write, s)\n\t\t\t\tcontinue\n\n\t\t\tfall_list = list()\n\t\t\twinter_list = list()\n\n\t\t\tif start_word.startswith('Summer'):\n\t\t\t\tnext_row = sheet.row_values(i + 1)\n\t\t\telif start_word.startswith('Spring'):\n\t\t\t\tnext_row = sheet.row_values(i - 1)\n\t\t\telif start_word.startswith('Fall'):\n\t\t\t\tnext_row = sheet.row_values(i)\n\t\t\t\tfall_list.append(next_row[0])\n\t\t\t\twinter_list.append(next_row[0])\n\n\t\t\t\tfor idx, i in enumerate(s):\n\t\t\t\t\tif i != '':\n\t\t\t\t\t\tpattern = list()\n\t\t\t\t\t\tif isinstance(i, str):\n\t\t\t\t\t\t\tpattern = i.split('\\n')\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tpattern = [i for x in range(4)]\n\t\t\t\t\t\tif len(pattern) == 2:\n\t\t\t\t\t\t\tif idx == len(s) - 1:\n\t\t\t\t\t\t\t\tpattern = [float(i.strip('%')) / 100.0 for i in pattern]\n\t\t\t\t\t\t\tfall_list.append(pattern[0])\n\t\t\t\t\t\t\twinter_list.append(pattern[1])\n\t\t\telse:\n\t\t\t\tcontinue\n\t\t\tif len(fall_list) > 1:\n\t\t\t\tcsv_write.writerow(fall_list)\n\t\t\t\tcsv_write.writerow(winter_list)\n\t\t\t\tcontinue\n\t\t\tret_list = list()\n\t\t\tret_list.append(next_row[0])\n\t\t\tfor i in s:\n\t\t\t\tif i != '':\n\t\t\t\t\tret_list.append(i)\n\t\t\tcsv_write.writerow(ret_list)\n\treturn prev_path\n\n# In[86]:\ndef handleCornerCase(csv_write, s):\n\t# print('------------------')\n\tsummer_list = list()\n\tfall_list = list()\n\twinter_list = list()\n\tspring_list = list()\n\n\tif s[3] == '':\n\t\treturn\n\n\tfor i in range(len(s)):\n\t\tif i == 0:\n\t\t\tsummer_list.append(s[0])\n\t\t\tfall_list.append(s[0])\n\t\t\twinter_list.append(s[0])\n\t\t\tspring_list.append(s[0])\n\n\t\tif i > 1:\n\t\t\tpattern = list()\n\t\t\tif isinstance(s[i], str):\n\t\t\t\ttemp = s[i].replace('\\n', ' ')\n\t\t\t\tpattern = temp.split(' ')\n\t\t\t\tif len(pattern) == 1 and pattern[0] == '':\n\t\t\t\t\tcontinue\n\n\t\t\t\twhile len(pattern) != 4 and len(pattern) != 0:\n\t\t\t\t\ttemp_data = pattern[-1]\n\t\t\t\t\tpattern.append(temp_data)\n\t\t\telse:\n\t\t\t\tpattern = [s[i] for x in range(4)]\n\n\t\t\tif i == len(s) - 1 and isinstance(s[i], str):\n\t\t\t\tpattern = [float(i.strip('%')) / 100.0 if not (i == '#DIV/0!' or i == '') else 0.0 for i in pattern]\n\n\t\t\tif len(pattern) == 4:\n\n\t\t\t\tsummer_list.append(pattern[0])\n\t\t\t\tfall_list.append(pattern[1])\n\t\t\t\twinter_list.append(pattern[2])\n\t\t\t\tspring_list.append(pattern[3])\n\tcsv_write.writerow(summer_list)\n\tcsv_write.writerow(fall_list)\n\tcsv_write.writerow(winter_list)\n\tcsv_write.writerow(spring_list)\n\n\n# In[16]:\nif __name__ == '__main__':\n\tprefix = 'csv_data/'\n\texcel_location = 'excel_data/'\n\tfiles = os.listdir(excel_location)\n\n\t# for s in open_excel(excel_location + '3.1 Occupancy Scripps Institution Of Oceanography-Converted.Xlsx'):\n\t# \tcreate_csv(s, '', '')\n\n\n\tfor file in files:\n\t\tprev = ''\n\t\tfor sheet in open_excel(excel_location + file):\n\t\t\tif isinstance(sheet,str):\n\t\t\t\tbreak;\n\t\t\tfolder = ''\n\t\t\tif file.title().startswith('1'):\n\t\t\t\tfolder = prefix + 'University-wide/'\n\t\t\telif file.title().startswith('2'):\n\t\t\t\tfolder = prefix + 'By-Location/'\n\t\t\telif file.title().startswith('3'):\n\t\t\t\tfolder = prefix + 'By-Aera/'\n\t\t\telif file.title().startswith('4'):\n\t\t\t\tfolder = prefix + 'By-Neighborhood/'\n\t\t\tif not os.path.exists(folder):\n\t\t\t\tos.makedirs(folder)\n\t\t\tprev = create_csv(sheet, folder, prev)\n\n\n\n\t# print(open_excel('test1.xlsx')[-1].row_values(5)[1])\n\t# create_csv(open_excel('test1.xlsx')[-1])\n\n","sub_path":"DataExtraction.py","file_name":"DataExtraction.py","file_ext":"py","file_size_in_byte":4952,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"653575789","text":"import argparse\nimport json\nimport os\nimport tempfile\n\n\nstorage_path = os.path.join(tempfile.gettempdir(), 'storage.data')\n\n\ndef clear():\n os.remove(storage_path)\n\n\ndef get_data():\n if not os.path.exists(storage_path):\n return {}\n\n with open(storage_path, 'r') as f:\n raw_data = f.read()\n if raw_data:\n return json.loads(raw_data)\n\n return {}\n\n\ndef put(key, value):\n data = get_data()\n if key in data:\n data[key].append(value)\n else:\n data[key] = [value]\n\n with open(storage_path, 'w') as f:\n f.write(json.dumps(data))\n\n\ndef get(key):\n data = get_data()\n return data.get(key)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--key', '-k', help='Key')\n parser.add_argument('--val', '-v', help='Value')\n parser.add_argument('--clear', action='store_true', help='Clear')\n\n args = parser.parse_args()\n\n if args.clear:\n clear()\n elif args.key and args.val:\n put(args.key, args.val)\n elif args.key:\n print(', '.join(get(args.key)))\n else:\n print('Wrong command')\n\n\n\"\"\"\nTAK suka pisat ne nado\ngde functons? gde structura coda?\n\nparser = argparse.ArgumentParser()\nparser.add_argument('-k', '--key', help='a key')\nparser.add_argument('-v', '--value', help='a value to add to the key')\nargs = parser.parse_args()\n\nkey, value = args.key, args.value\n\nif value is None:\n if not os.path.isfile(storage_path):\n print(None)\n else:\n with open(storage_path, 'r') as f:\n kv = json.loads(f.read())\n\n if kv.get(key, None) is not None:\n print(', '.join(kv[key]))\n\nelse:\n kv = dict()\n if os.path.isfile(storage_path):\n with open(storage_path, 'r') as f:\n kv = json.loads(f.read())\n if kv.get(key, None) is None:\n kv[key] = [value]\n with open(storage_path, 'w') as f:\n json.dump(kv, f)\n else:\n kv[key].append(value)\n with open(storage_path, 'w') as f:\n json.dump(kv, f)\n\n else:\n with open(storage_path, 'w') as f:\n kv[key] = [value]\n json.dump(kv, f)\n \"\"\"\n","sub_path":"storage.py","file_name":"storage.py","file_ext":"py","file_size_in_byte":2241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"281511462","text":"from flask import Flask, jsonify, request, Response\nfrom models import db\nimport api\n\napp = Flask(__name__)\napp.config.from_object('config.DevelopmentConfig')\ndb.init_app(app)\n\n\n@app.route('/users', methods=['GET', 'POST'])\ndef index():\n if request.method == 'GET':\n users = api.users()\n return jsonify(users=[row._asdict() for row in users])\n else:\n try:\n user = api.create_user_from_params(request.get_json())\n return jsonify(user=user._asdict())\n except:\n return Response(response='Unable to create user.',\n status=400,\n mimetype=\"application/json\")\n\n\n@app.route('/users/', methods=['GET'])\ndef get_user(userid):\n user = api.find_user(userid)\n\n if user is None:\n return Response(response='User not found.',\n status=404,\n mimetype=\"application/json\")\n\n if request.method == 'GET':\n return jsonify(user=user._asdict())\n\n\n@app.route('/users/', methods=['PUT'])\ndef update_user(userid):\n user = api.find_user(userid)\n\n try:\n user = api.update_user_from_params(user, request.get_json())\n\n return jsonify(user=user._asdict())\n except:\n return Response(response='Unable to update user.',\n status=400,\n mimetype=\"application/json\")\n\n\n@app.route('/users/', methods=['DELETE'])\ndef delete_user(userid):\n user = api.find_user(userid)\n\n try:\n api.destroy_user(user)\n return Response(response='Successfully deleted user.',\n status=200,\n mimetype=\"application/json\")\n except:\n return Response(response='Unable to delete user.',\n status=400,\n mimetype=\"application/json\")\n\n\n@app.route('/groups/', methods=['GET'])\ndef get_group(name):\n group = api.find_group(name)\n\n if group is None:\n return Response(response='Group not found.',\n status=404,\n mimetype=\"application/json\")\n\n if not group.users.all():\n return Response(response='Group has no associated users.',\n status=404,\n mimetype=\"application/json\")\n\n return jsonify(group=group._asdict(),\n users=[row._asdict() for row in group.users])\n\n\n@app.route('/groups/', methods=['POST'])\ndef create_group(name):\n group = api.find_group(name)\n\n if group is not None:\n return Response(response='Group already exists.',\n status=404,\n mimetype=\"application/json\")\n else:\n group = api.create_group(name)\n return jsonify(group=group._asdict())\n\n\n@app.route('/groups/', methods=['PUT'])\ndef update_group(name):\n group = api.find_group(name)\n\n if group is None:\n return Response(response='Group not found.',\n status=404,\n mimetype=\"application/json\")\n\n params = request.get_json()\n users = params.get('users')\n if users:\n group = api.add_users_to_group(group, users)\n return jsonify(group=group._asdict(),\n users=[row._asdict() for row in group.users])\n else:\n return Response(response='No users provided.',\n status=404,\n mimetype=\"application/json\")\n\n\n@app.route('/groups/', methods=['DELETE'])\ndef delete_group(name):\n group = api.find_group(name)\n\n if group is None:\n return Response(response='Group not found.',\n status=404,\n mimetype=\"application/json\")\n\n if not group.users.all():\n return Response(response='Group has no associated users.',\n status=404,\n mimetype=\"application/json\")\n\n group = api.empty_users_in_group(group)\n return Response(response='All users removed from \"%s\".' % group.name,\n status=200,\n mimetype=\"application/json\")\n\n\nif __name__ == '__main__':\n app.run()\n","sub_path":"rest_user_groups/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"243918238","text":"\"\"\"projectash URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.9/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.conf.urls import url, include\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf.urls import url, include\nfrom django.contrib import admin\n\nfrom ash import views\nadmin.site.site_header = 'System Administrator'\nadmin.site.site_title = 'site admin'\n\nurlpatterns = [\n url(r'^admin/', include(admin.site.urls)),\n url(r'^$', views.user_login, name='login'),\n url(r'^home', views.home, name='home'),\n url(r'^register', views.createAccount, name='registration'),\n url(r'^logout', views.user_logout, name='logout'),\n url(r'^expense', views.expense, name='expense'),\n url(r'^income/', views.income, name='income'),\n url(r'^incomes/', views.totalIncome, name='incomes'),\n url(r'^contact', views.contact, name='contact'),\n url(r'^creditors', views.creditors, name='creditors'),\n url(r'^debtors', views.debtors, name='debtors'),\n url(r'^calendar', views.calendar, name='calendar'),\n url(r'^tasks', views.addTask, name='task'),\n url(r'^debit/(?P\\d+)/$', views.debit, name='debit'),\n url(r'^clear/(?P\\d+)/$', views.clear_debit, name='debit'),\n url(r'^viewDebt/(?P\\d+)/$', views.view_debit, name='debit'),\n url(r'^payDebt/(?P\\d+)/$', views.pay_debt, name='payment'),\n]","sub_path":"projectash/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1803,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"12914111","text":"\"\"\" plot episode evaluation log and save \"\"\"\n\nfrom pathlib import Path\nimport argparse\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser()\n parser.add_argument('--exp-id', help='Experiment ID', type=int)\n args = parser.parse_args()\n\n LOG_DIR = \"logs/exp_\"+str(args.exp_id)\n\n FILE_PATH = str(list(Path(LOG_DIR).rglob('res_episode_1.csv'))[0])\n SAVE_PATH = FILE_PATH.replace(\"res_episode_1.csv\", \"plot_episode_eval_log.png\")\n\n # # read data\n log_df = pd.read_csv(FILE_PATH)\n\n # plot\n fig, axs = plt.subplots(4, 4, figsize=(20, 10), dpi=300, sharex=True)\n\n log_df.plot(x='timestep', y='joint_pos1', ax=axs[0, 0])\n log_df.plot(x='timestep', y='joint1_min', ax=axs[0, 0], style=\"r--\")\n log_df.plot(x='timestep', y='joint1_max', ax=axs[0, 0], style=\"r--\")\n\n log_df.plot(x='timestep', y='joint_pos2', ax=axs[1, 0])\n log_df.plot(x='timestep', y='joint2_min', ax=axs[1, 0], style=\"r--\")\n log_df.plot(x='timestep', y='joint2_max', ax=axs[1, 0], style=\"r--\")\n\n log_df.plot(x='timestep', y='joint_pos3', ax=axs[2, 0])\n log_df.plot(x='timestep', y='joint3_min', ax=axs[2, 0], style=\"r--\")\n log_df.plot(x='timestep', y='joint3_max', ax=axs[2, 0], style=\"r--\")\n\n log_df.plot(x='timestep', y='joint_pos4', ax=axs[3, 0])\n log_df.plot(x='timestep', y='joint4_min', ax=axs[3, 0], style=\"r--\")\n log_df.plot(x='timestep', y='joint4_max', ax=axs[3, 0], style=\"r--\")\n\n log_df.plot(x='timestep', y='joint_pos5', ax=axs[0, 2])\n log_df.plot(x='timestep', y='joint5_min', ax=axs[0, 2], style=\"r--\")\n log_df.plot(x='timestep', y='joint5_max', ax=axs[0, 2], style=\"r--\")\n\n log_df.plot(x='timestep', y='joint_pos6', ax=axs[1, 2])\n log_df.plot(x='timestep', y='joint6_min', ax=axs[1, 2], style=\"r--\")\n log_df.plot(x='timestep', y='joint6_max', ax=axs[1, 2], style=\"r--\")\n\n log_df.plot(x='timestep', y='action_1', ax=axs[0, 1])\n log_df.plot(x='timestep', y='action_low1', ax=axs[0, 1], style=\"r--\")\n log_df.plot(x='timestep', y='action_high1', ax=axs[0, 1], style=\"r--\")\n\n log_df.plot(x='timestep', y='action_2', ax=axs[1, 1])\n log_df.plot(x='timestep', y='action_low2', ax=axs[1, 1], style=\"r--\")\n log_df.plot(x='timestep', y='action_high2', ax=axs[1, 1], style=\"r--\")\n\n log_df.plot(x='timestep', y='action_3', ax=axs[2, 1])\n log_df.plot(x='timestep', y='action_low3', ax=axs[2, 1], style=\"r--\")\n log_df.plot(x='timestep', y='action_high3', ax=axs[2, 1], style=\"r--\")\n\n log_df.plot(x='timestep', y='action_4', ax=axs[3, 1])\n log_df.plot(x='timestep', y='action_low4', ax=axs[3, 1], style=\"r--\")\n log_df.plot(x='timestep', y='action_high4', ax=axs[3, 1], style=\"r--\")\n\n log_df.plot(x='timestep', y='action_5', ax=axs[0, 3])\n log_df.plot(x='timestep', y='action_low5', ax=axs[0, 3], style=\"r--\")\n log_df.plot(x='timestep', y='action_high5', ax=axs[0, 3], style=\"r--\")\n\n log_df.plot(x='timestep', y='action_6', ax=axs[1, 3])\n log_df.plot(x='timestep', y='action_low6', ax=axs[1, 3], style=\"r--\")\n log_df.plot(x='timestep', y='action_high6', ax=axs[1, 3], style=\"r--\")\n\n log_df.plot(x='timestep', y='reward', ax=axs[2, 2], color=\"b\")\n log_df.plot(x='timestep', y='term1', ax=axs[2, 2], color=\"r\")\n log_df.plot(x='timestep', y='term2', ax=axs[2, 2], color=\"g\")\n # ax_1 = axs[2, 2].twinx()\n # log_df.plot(x='timestep', y='return', ax=ax_1, color=\"r\")\n\n log_df.plot(x='timestep', y='distance', ax=axs[2, 3], color=\"b\", marker=\"x\")\n\n log_df.plot(x='timestep', y='est_acc', ax=axs[3, 2], color=\"g\", marker=\"*\")\n ax_3 = axs[3, 2].twinx()\n log_df.plot(x='timestep', y='est_vel', ax=ax_3, color=\"r\", marker=\"+\")\n\n log_df.plot(x='timestep', y='goal_x', ax=axs[3, 3], style='or')\n log_df.plot(x='timestep', y='goal_y', ax=axs[3, 3], style='ob')\n log_df.plot(x='timestep', y='goal_z', ax=axs[3, 3], style='og')\n log_df.plot(x='timestep', y='tip_x', ax=axs[3, 3], style='xr')\n log_df.plot(x='timestep', y='tip_y', ax=axs[3, 3], style='xb')\n log_df.plot(x='timestep', y='tip_z', ax=axs[3, 3], style='xg')\n\n axs[0, 0].set_ylabel(\"joint1 pos (rad)\")\n axs[1, 0].set_ylabel(\"joint2 pos (rad)\")\n axs[2, 0].set_ylabel(\"joint3 pos (rad)\")\n axs[3, 0].set_ylabel(\"joint4 pos (rad)\")\n axs[0, 2].set_ylabel(\"joint5 pos (rad)\")\n axs[1, 2].set_ylabel(\"joint6 pos (rad)\")\n\n axs[0, 1].set_ylabel(\"action1 (rad)\")\n axs[1, 1].set_ylabel(\"action2 (rad)\")\n axs[2, 1].set_ylabel(\"action3 (rad)\")\n axs[3, 1].set_ylabel(\"action4 (rad)\")\n axs[0, 3].set_ylabel(\"action5 (rad)\")\n axs[1, 3].set_ylabel(\"action6 (rad)\")\n\n axs[2, 2].set_ylabel(\"reward\")\n # axs[2, 2].set_ylabel(\"reward (m^2)\", color=\"b\")\n # ax_1.set_ylabel(\"return (m^2)\", color=\"r\")\n # axs[2, 2].tick_params(axis='y', labelcolor=\"b\")\n # ax_1.tick_params(axis='y', labelcolor=\"r\")\n\n axs[2, 3].set_ylabel(\"distance (m)\")\n # axs[2, 3].tick_params(axis='y', labelcolor=\"b\")\n\n axs[3, 2].set_ylabel(\"acc (m/s^2)\", color=\"g\")\n ax_3.set_ylabel(\"vel (m/s)\", color=\"r\")\n axs[3, 2].tick_params(axis='y', labelcolor=\"g\")\n ax_3.tick_params(axis='y', labelcolor=\"r\")\n\n axs[3, 3].set_ylabel(\"coordinates (m)\")\n\n axs[3, 3].legend(loc=\"upper right\")\n # ax3.legend(bbox_to_anchor=(1, 1.05))\n # ax4.legend(bbox_to_anchor=(1.2, 1.05))\n\n plt.tight_layout()\n # plt.show()\n plt.savefig(SAVE_PATH, bbox_inches='tight')\n","sub_path":"code/scripts/plot_episode_eval_log.py","file_name":"plot_episode_eval_log.py","file_ext":"py","file_size_in_byte":5426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"390894944","text":"# Copyright (c) 2019 Red Hat, Inc.\n#\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\nfrom __future__ import absolute_import\n\nimport shlex\n\nfrom oslo_log import log\nimport paramiko\nfrom paramiko import channel\n\nimport tobiko\nfrom tobiko.shell.sh import _exception\nfrom tobiko.shell.sh import _execute\nfrom tobiko.shell.sh import _io\nfrom tobiko.shell.sh import _local\nfrom tobiko.shell.sh import _process\nfrom tobiko.shell import ssh\nimport typing # noqa\n\nLOG = log.getLogger(__name__)\n\n\ndef ssh_execute(ssh_client, command, environment=None,\n timeout: tobiko.Seconds = None, stdin=None, stdout=None,\n stderr=None, shell=None, expect_exit_status=0, **kwargs):\n \"\"\"Execute command on remote host using SSH client\"\"\"\n process = ssh_process(command=command,\n environment=environment,\n timeout=timeout,\n shell=shell,\n stdin=stdin,\n stdout=stdout,\n stderr=stderr,\n ssh_client=ssh_client,\n **kwargs)\n return _execute.execute_process(process=process,\n stdin=stdin,\n expect_exit_status=expect_exit_status)\n\n\ndef ssh_process(command, environment=None, current_dir=None,\n timeout: tobiko.Seconds = None, shell=None, stdin=None,\n stdout=None, stderr=None, ssh_client=None, sudo=None,\n network_namespace=None):\n if ssh_client is None:\n ssh_client = ssh.ssh_proxy_client()\n if ssh_client:\n return SSHShellProcessFixture(\n command=command, environment=environment, current_dir=current_dir,\n timeout=timeout, shell=shell, stdin=stdin, stdout=stdout,\n stderr=stderr, ssh_client=ssh_client, sudo=sudo,\n network_namespace=network_namespace)\n else:\n return _local.local_process(\n command=command, environment=environment, current_dir=current_dir,\n timeout=timeout, shell=shell, stdin=stdin, stdout=stdout,\n stderr=stderr, sudo=sudo, network_namespace=network_namespace)\n\n\nclass SSHShellProcessParameters(_process.ShellProcessParameters):\n\n ssh_client = None\n\n\nclass SSHShellProcessFixture(_process.ShellProcessFixture):\n\n def init_parameters(self, **kwargs) -> SSHShellProcessParameters:\n return SSHShellProcessParameters(**kwargs)\n\n def create_process(self):\n \"\"\"Execute command on a remote host using SSH client\"\"\"\n command = str(self.command)\n ssh_client = self.ssh_client\n parameters = self.parameters\n\n tobiko.check_valid_type(ssh_client, ssh.SSHClientFixture)\n tobiko.check_valid_type(parameters, SSHShellProcessParameters)\n environment = parameters.environment\n current_dir = parameters.current_dir\n\n for attempt in tobiko.retry(\n timeout=self.parameters.timeout,\n default_count=self.parameters.retry_count,\n default_interval=self.parameters.retry_interval,\n default_timeout=self.parameters.retry_timeout):\n\n timeout = attempt.time_left\n details = (f\"command='{command}', \"\n f\"current_dir='{current_dir}', \"\n f\"login={ssh_client.login}, \"\n f\"timeout={timeout}, \"\n f\"attempt={attempt}, \"\n f\"environment={environment}\")\n LOG.debug(f\"Create remote process... ({details})\")\n try:\n client = ssh_client.connect()\n process = client.get_transport().open_session()\n if environment:\n variables = \" \".join(\n f\"{name}={shlex.quote(value)}\"\n for name, value in self.environment.items())\n command = variables + \" \" + command\n if current_dir is not None:\n command = f\"cd {current_dir} && {command}\"\n process.exec_command(command)\n LOG.debug(f\"Remote process created. ({details})\")\n return process\n except Exception:\n # Before doing anything else cleanup SSH connection\n ssh_client.close()\n LOG.debug(f\"Error creating remote process. ({details})\",\n exc_info=1)\n try:\n attempt.check_limits()\n except tobiko.RetryTimeLimitError as ex:\n LOG.debug(f\"Timed out creating remote process. ({details})\")\n raise _exception.ShellTimeoutExpired(command=command,\n stdin=None,\n stdout=None,\n stderr=None,\n timeout=timeout) from ex\n\n def setup_stdin(self):\n self.stdin = _io.ShellStdin(\n delegate=StdinSSHChannelFile(self.process, 'wb'),\n buffer_size=self.parameters.buffer_size)\n\n def setup_stdout(self):\n self.stdout = _io.ShellStdout(\n delegate=StdoutSSHChannelFile(self.process, 'rb'),\n buffer_size=self.parameters.buffer_size)\n\n def setup_stderr(self):\n self.stderr = _io.ShellStderr(\n delegate=StderrSSHChannelFile(self.process, 'rb'),\n buffer_size=self.parameters.buffer_size)\n\n def poll_exit_status(self):\n exit_status = getattr(self.process, 'exit_status', None)\n if exit_status and exit_status < 0:\n exit_status = None\n return exit_status\n\n def _get_exit_status(self, timeout: tobiko.Seconds = None):\n process = self.process\n if not process.exit_status_ready():\n # Workaround for Paramiko timeout problem\n # CirrOS instances could close SSH channel without sending process\n # exit status\n if timeout is None:\n timeout = 120.\n else:\n timeout = min(timeout, 120.0)\n LOG.debug(f\"Waiting for command '{self.command}' exit status \"\n f\"(timeout={timeout})\")\n # recv_exit_status method doesn't accept timeout parameter\n # therefore here we wait for next channel event expecting it is\n # actually the exit status\n # TODO (fressi): we could use an itimer to set a timeout for\n # recv_exit_status\n if not process.status_event.wait(timeout=timeout):\n LOG.error(\"Timed out before status event being set \"\n f\"(timeout={timeout})\")\n if process.exit_status >= 0:\n return process.exit_status\n else:\n return None\n\n def kill(self, sudo=False):\n process = self.process\n LOG.debug('Killing remote process: %r', self.command)\n try:\n process.close()\n except Exception:\n LOG.exception(\"Failed killing remote process: %r\",\n self.command)\n\n\nclass SSHChannelFile(channel.ChannelFile):\n\n def fileno(self):\n return self.channel.fileno()\n\n\nclass StdinSSHChannelFile(SSHChannelFile):\n\n def close(self):\n super(StdinSSHChannelFile, self).close()\n self.channel.shutdown_write()\n\n @property\n def write_ready(self):\n return self.channel.send_ready()\n\n\nclass StdoutSSHChannelFile(SSHChannelFile):\n\n def fileno(self):\n return self.channel.fileno()\n\n def close(self):\n super(StdoutSSHChannelFile, self).close()\n self.channel.shutdown_read()\n\n @property\n def read_ready(self):\n return self.channel.recv_ready()\n\n\nclass StderrSSHChannelFile(SSHChannelFile, paramiko.channel.ChannelStderrFile):\n\n def fileno(self):\n return self.channel.fileno()\n\n @property\n def read_ready(self):\n return self.channel.recv_stderr_ready()\n","sub_path":"tobiko/shell/sh/_ssh.py","file_name":"_ssh.py","file_ext":"py","file_size_in_byte":8603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"505889716","text":"# Caesar Cipher\r\n# https://www.nostarch.com/crackingcodes (BSD Licensed)\r\nimport sys\r\n\r\n\r\n# The string to be encrypted/decrypted:\r\nmessage = \"esi n ehohe hoe hi ldvhonsdlAtcadosr rctf ias au scb dc b'p,o,ntepcetrcdtrypnr fa'uilaalmn ga r pnn prpaee Caror tEtiaeginoev iemsy iura e.\"\r\n# sys.argv[3:]\r\nmessage = ' '.join(message)\r\n#message = eval(message)\r\n\r\nkeylist = [1,2,3,4,5,6,7,8,9]\r\n# Whether the program encrypts or decrypts:\r\nmode = 'decrypt'\r\n# sys.argv[2] # Set to either 'encrypt' or 'decrypt'.\r\n\r\n# Every possible symbol that can be encrypted:\r\nSYMBOLS = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'\r\n\r\n# Stores the encrypted/decrypted form of the message:\r\ntranslated = ''\r\n# The encryption/decryption key:\r\nfor k in keylist:\r\n key = k\r\n\r\n# if sys.argv[2] != 'encrypt' and sys.argv[2] != 'decrypt':\r\n# print('Please enter encrypt or decrypt!')\r\n# sys.exit(0)\r\n\r\n\r\n\r\n\r\n for symbol in message:\r\n # Note: Only symbols in the `SYMBOLS` string can be encrypted/decrypted.\r\n if symbol in SYMBOLS:\r\n symbolIndex = SYMBOLS.find(symbol)\r\n # Perform encryption/decryption:\r\n if mode == 'encrypt':\r\n translatedIndex = symbolIndex + key\r\n elif mode == 'decrypt':\r\n translatedIndex = symbolIndex - key\r\n\r\n # Handle wrap-around, if needed:\r\n if translatedIndex >= len(SYMBOLS):\r\n translatedIndex = translatedIndex - len(SYMBOLS)\r\n elif translatedIndex < 0:\r\n translatedIndex = translatedIndex + len(SYMBOLS)\r\n\r\n if symbol.isupper():\r\n translated = translated + SYMBOLS[translatedIndex].upper()\r\n else:\r\n translated = translated + SYMBOLS[translatedIndex].lower()\r\n else:\r\n # Append the symbol without encrypting/decrypting:\r\n translated = translated + symbol\r\n\r\n # Output the translated string:\r\n print(translated)\r\n print(\"\")\r\n","sub_path":"as2/a2p1.py","file_name":"a2p1.py","file_ext":"py","file_size_in_byte":1992,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"83681724","text":"# write your code here\n\n\nclass TicTacToe:\n VALUE_X = \"X\"\n VALUE_O = \"O\"\n COORDS = [1, 2, 3]\n IMPOSSIBLE_STATE = \"Impossible\"\n X_WINS_STATE = f\"{VALUE_X} wins\"\n O_WINS_STATE = f\"{VALUE_O} wins\"\n DRAW_STATE = \"Draw\"\n GAME_NOT_FINISHED_STATE = \"Game not finished\"\n\n def __init__(self, state=None):\n self._state = state or \" \"\n self._cells = [\n list(self._state[i:i + 3]) for i in range(0, len(self._state), 3)\n ]\n self._value = self.VALUE_X\n\n def _count(self, value):\n return self._state.count(value)\n\n def _win(self, value):\n fill = [value, value, value]\n combinations = self._cells[:]\n combinations.extend([\n [self._cells[0][0], self._cells[1][1], self._cells[2][2]],\n [self._cells[0][2], self._cells[1][1], self._cells[2][0]],\n ])\n combinations.extend([\n [self._cells[0][i], self._cells[1][i], self._cells[2][i]]\n for i in range(len(self._cells))\n ])\n return fill in combinations\n\n @property\n def state(self):\n return self._state\n\n @property\n def completed(self):\n return (\n \" \" not in self._state or\n self.status != self.GAME_NOT_FINISHED_STATE\n )\n\n @property\n def status(self):\n x_wins = self._win(self.VALUE_X)\n x_count = self._count(self.VALUE_X)\n o_wins = self._win(self.VALUE_O)\n o_count = self._count(self.VALUE_O)\n\n if (x_wins and o_wins) or (abs(x_count - o_count) > 1):\n return self.IMPOSSIBLE_STATE\n elif x_wins:\n return self.X_WINS_STATE\n elif o_wins:\n return self.O_WINS_STATE\n elif \" \" not in self._state:\n return self.DRAW_STATE\n return self.GAME_NOT_FINISHED_STATE\n\n def _change_value(self):\n self._value = (\n self.VALUE_O if self._value == self.VALUE_X else self.VALUE_X\n )\n\n def _update_state(self):\n self._state = \"\".join(cell for row in self._cells for cell in row)\n\n def set(self, x, y):\n x, y = 3 - x, y - 1\n if self._cells[x][y] in [self.VALUE_X, self.VALUE_O]:\n return False\n self._cells[x][y] = self._value\n self._change_value()\n self._update_state()\n return True\n\n def __str__(self):\n return (\n f\"---------\\n\"\n f\"| {' '.join(self._state[:3])} |\\n\"\n f\"| {' '.join(self._state[3:6])} |\\n\"\n f\"| {' '.join(self._state[6:9])} |\\n\"\n f\"---------\"\n )\n\n\ndef parse_input_position(data):\n data = data.split()\n if not data or not data[0].isdigit() or not data[1].isdigit():\n return \"You should enter numbers!\", None, None\n y, x = int(data[0]), int(data[1])\n if x not in TicTacToe.COORDS or y not in TicTacToe.COORDS:\n return \"Coordinates should be from 1 to 3!\", None, None\n return None, x, y\n\n\ntic_tac_toe = TicTacToe()\nprint(tic_tac_toe)\n\nwhile not tic_tac_toe.completed:\n message, pos_x, pos_y = parse_input_position(input(\n \"Enter the coordinates: > \"\n ))\n if message is not None:\n print(message)\n else:\n if not tic_tac_toe.set(pos_x, pos_y):\n print(\"This cell is occupied! Choose another one!\")\n else:\n print(tic_tac_toe)\n\nprint(tic_tac_toe.status)\n","sub_path":"Easy/Tic-Tac-Toe/Tic-Tac-Toe/task/tictactoe/tictactoe.py","file_name":"tictactoe.py","file_ext":"py","file_size_in_byte":3373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"166161580","text":"import os\n\ndef fileExists(filename):\n\treturn os.path.isfile(filename)\n\ndef getCurveForImage(image):\n\ttmp = image.copy()\n\tif (tmp.dtype == cv2.CV_8UC3):\n\t\tgray = cv2.cvtColor(tmp, cv2.COLOR_BGR2GRAY)\n\telif (tmp.dtype == cv2.CV_8UC1):\n\t\tgray = tmp\n\telse:\n\t\traise Exception('Unsupported image format')\n\n\tgray = cv2.threshold(gray, 128, 255, cv2.THRESH_BINARY)\n\n\t(_, contours, _) = cv2.findContours(gray, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n\n\tif len(contours) <= 0:\n\t\treturn\n\n\tupperCurve = contours[0]\n\tif (len(upperCurve) <= 50):\n\t\treturn\t\n","sub_path":"compare.py","file_name":"compare.py","file_ext":"py","file_size_in_byte":545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"232158767","text":"# -*- coding: utf-8 -*-\n\nimport logging\n\nimport numpy as np\n\nfrom scilpy.samplingscheme.save_scheme import (save_scheme_bvecs_bvals,\n save_scheme_mrtrix)\nfrom scilpy.utils.filenames import split_name_with_nii\n\nDEFAULT_B0_THRESHOLD = 20\n\n\ndef is_normalized_bvecs(bvecs):\n \"\"\"\n Check if b-vectors are normalized.\n\n Parameters\n ----------\n bvecs : (N, 3) array\n input b-vectors (N, 3) array\n\n Returns\n -------\n True/False\n \"\"\"\n\n bvecs_norm = np.linalg.norm(bvecs, axis=1)\n return np.all(np.logical_or(np.abs(bvecs_norm - 1) < 1e-3,\n bvecs_norm == 0))\n\n\ndef normalize_bvecs(bvecs, filename=None):\n \"\"\"\n Normalize b-vectors\n\n Parameters\n ----------\n bvecs : (N, 3) array\n input b-vectors (N, 3) array\n filename : string\n output filename where to save the normalized bvecs\n\n Returns\n -------\n bvecs : (N, 3)\n normalized b-vectors\n \"\"\"\n\n bvecs_norm = np.linalg.norm(bvecs, axis=1)\n idx = bvecs_norm != 0\n bvecs[idx] /= bvecs_norm[idx, None]\n\n if filename is not None:\n logging.info('Saving new bvecs: {}'.format(filename))\n np.savetxt(filename, np.transpose(bvecs), \"%.8f\")\n\n return bvecs\n\n\ndef check_b0_threshold(force_b0_threshold, bvals_min):\n \"\"\"Check if the minimal bvalue is under zero or over the default threshold.\n If `force_b0_threshold` is true, don't raise an error even if the minimum\n bvalue is suspiciously high.\n\n Parameters\n ----------\n force_b0_threshold : bool\n If True, don't raise an error.\n bvals_min : float\n Minimum bvalue.\n\n Raises\n ------\n ValueError\n If the minimal bvalue is under zero or over the default threshold, and\n `force_b0_threshold` is False.\n \"\"\"\n if bvals_min != 0:\n if bvals_min < 0 or bvals_min > DEFAULT_B0_THRESHOLD:\n if force_b0_threshold:\n logging.warning(\n 'Warning: Your minimal bval is {}. This is highly '\n 'suspicious. The script will nonetheless proceed since '\n '--force_b0_threshold was specified.'.format(bvals_min))\n else:\n raise ValueError('The minimal bval is lesser than 0 or '\n 'greater than {}. This is highly ' +\n 'suspicious.\\n'\n 'Please check your data to ensure everything '\n 'is correct.\\n'\n 'Value found: {}\\n'\n 'Use --force_b0_threshold to execute '\n 'regardless.'\n .format(DEFAULT_B0_THRESHOLD, bvals_min))\n else:\n logging.warning('Warning: No b=0 image. Setting b0_threshold to '\n 'the minimum bval: {}'.format(bvals_min))\n\n\ndef get_shell_indices(bvals, shell, tol=10):\n \"\"\"\n Get shell indices\n\n Parameters\n ----------\n bvals: array (N,)\n array of bvals\n shell: list\n list of bvals\n tol: int\n tolerance to accept a bval\n\n Returns\n -------\n numpy.ndarray where shells are found\n \"\"\"\n\n return np.where(\n np.logical_and(bvals < shell + tol, bvals > shell - tol))[0]\n\n\ndef fsl2mrtrix(fsl_bval_filename, fsl_bvec_filename, mrtrix_filename):\n \"\"\"\n Convert a fsl dir_grad.bvec/.bval files to mrtrix encoding.b file.\n\n Parameters\n ----------\n fsl_bval_filename: str\n path to input fsl bval file.\n fsl_bvec_filename: str\n path to input fsl bvec file.\n mrtrix_filename : str, optional\n path to output mrtrix encoding.b file. Default is\n fsl_bvec_filename.b.\n\n Returns\n -------\n \"\"\"\n\n shells = np.loadtxt(fsl_bval_filename)\n points = np.loadtxt(fsl_bvec_filename)\n bvals = np.unique(shells).tolist()\n\n if not points.shape[0] == 3:\n points = points.transpose()\n logging.warning('WARNING: Your bvecs seem transposed. ' +\n 'Transposing them.')\n\n shell_idx = [int(np.where(bval == bvals)[0]) for bval in shells]\n\n basefilename, ext = split_name_with_nii(mrtrix_filename)\n\n save_scheme_mrtrix(points,\n shell_idx,\n bvals,\n basefilename,\n verbose=1)\n\n\ndef mrtrix2fsl(mrtrix_filename, fsl_bval_filename=None,\n fsl_bvec_filename=None):\n \"\"\"\n Convert a mrtrix encoding.b file to fsl dir_grad.bvec/.bval files.\n\n Parameters\n ----------\n mrtrix_filename : str\n path to mrtrix encoding.b file.\n fsl_bval_filename: str, optional\n path to the output fsl bval file. Default is\n mrtrix_filename.bval.\n fsl_bvec_filename: str, optional\n path to the output fsl bvec file. Default is\n mrtrix_filename.bvec.\n Returns\n -------\n \"\"\"\n\n mrtrix_b = np.loadtxt(mrtrix_filename)\n if not len(mrtrix_b.shape) == 2 or not mrtrix_b.shape[1] == 4:\n raise ValueError('mrtrix file must have 4 columns')\n\n points = np.array([mrtrix_b[:, 0], mrtrix_b[:, 1], mrtrix_b[:, 2]])\n shells = np.array(mrtrix_b[:, 3])\n\n bvals = np.unique(shells).tolist()\n shell_idx = [int(np.where(bval == bvals)[0]) for bval in shells]\n\n if fsl_bval_filename is None:\n fsl_bval_filename = mrtrix_filename + str(\".bval\")\n\n if fsl_bvec_filename is None:\n fsl_bvec_filename = mrtrix_filename + str(\".bvec\")\n\n save_scheme_bvecs_bvals(points,\n shell_idx,\n bvals,\n filename_bval=fsl_bval_filename,\n filename_bvec=fsl_bvec_filename,\n verbose=1)\n","sub_path":"scilpy/utils/bvec_bval_tools.py","file_name":"bvec_bval_tools.py","file_ext":"py","file_size_in_byte":5827,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"179314746","text":"import torch\nfrom transformers import BertTokenizer, BertModel, BertForQuestionAnswering\n\nimport os\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\n\ntokenizer = BertTokenizer.from_pretrained('./final_model_whole')\nmodel = BertForQuestionAnswering.from_pretrained('./final_model_whole')\n\ncount = 0\n\nsens = []\n\nfo = open(\"./data/evaluate/test_predict_whole.answer\", \"w+\", encoding=\"utf8\")\nwith open(\"./data/train data/processed_test.doc_query\", \"r\", encoding=\"utf8\") as f:\n results = []\n for line in f:\n sens.append(line.strip())\n with open(\"./data/train data/train.answer\", \"r\", encoding=\"utf8\") as fl:\n '''for line in fl:\n labs.append(line.split(\"|||\")[1].strip())'''\n for sen in sens:\n count += 1\n data, num = sen.split(\"|||\")\n #label = labs[int(num.strip())]\n tokenized_data = tokenizer.tokenize(data)\n if len(tokenized_data) > 512:\n tokenized_data = tokenized_data[:512]\n token_type_id = [0]*len(tokenized_data)\n x = tokenized_data.index(\"[SEP]\")\n for i in range(x+1, len(token_type_id)):\n token_type_id[i] = 1\n token_type_id = torch.tensor(token_type_id)\n position_ids = torch.tensor([i for i in range(len(tokenized_data))])\n indexed_tokens = tokenizer.convert_tokens_to_ids(tokenized_data)\n tokens_tensor = torch.tensor([indexed_tokens])\n #input_ids = torch.tensor(tokenizer.encode(tokenized_data)).unsqueeze(0)\n model.cuda()\n tokens_tensor = tokens_tensor.cuda()\n token_type_id = token_type_id.cuda()\n position_ids = position_ids.cuda()\n outputs = model(tokens_tensor, token_type_ids=token_type_id, position_ids=position_ids)\n\n start_score = outputs[0].squeeze(0)\n end_score = outputs[1].squeeze(0)\n\n max_score = -100000\n count1 = 0\n best_start = 0\n best_end = 0\n for ty, start in zip(token_type_id, start_score):\n if ty == 1:\n if count1 < len(end_score) - 2:\n for i in range(1, 3):\n m = count1+i\n k = start + end_score[m]\n if k > max_score:\n max_score = start + end_score[count1+i]\n best_start = count1\n best_end = count1 + i\n count1 += 1\n pre = tokenized_data[best_start:best_end+1]\n result = ''.join(pre) + \"|||\"\n if len(results) < int(num) + 1:\n results.append([''.join(pre) + \"|||\" + str(max_score)])\n else:\n results[int(num)].append(''.join(pre) + \"|||\" + str(max_score))\n if count % 200 == 0:\n print(\"{} lines processed.\".format(count))\n\n for i, answers in enumerate(results):\n if len(answers) == 1:\n fo.write(\" ||| \"+answers[0].split(\"|||\")[0]+\"\\n\")\n else:\n k = 0\n word = []\n score = []\n for item in answers:\n word.append(item.split(\"|||\")[0])\n score.append(item.split(\"|||\")[1])\n if score[0] > score[1]:\n k = 0\n else:\n k = 1\n fo.write(\" ||| \" + answers[k].split(\"|||\")[0] + \"\\n\")\nfo.close()\n","sub_path":"predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":3552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"639251265","text":"import logging\nimport pyaudio\nimport numpy as np\nfrom struct import unpack\nfrom matplotlib import pyplot as plt\nimport argparse\n\n# Define stream constants\nFORMAT = pyaudio.paInt16\nCHANNELS = 2\nRATE = 44100\nFREQUENCY = 1/RATE\nINPUT_BUFFER_TIME = 1\nINPUT_FRAMES_PER_BUFFER = int(RATE*INPUT_BUFFER_TIME)\nDECODE_FMT = '{}h'.format(RATE*2)\n\nlogger = logging.getLogger(name='Tuner')\n\n\nclass Sess:\n \"\"\" Audio stream context manager\"\"\"\n def __enter__(self):\n self.audio = pyaudio.PyAudio()\n self.stream = self.audio.open(format=FORMAT,\n channels=CHANNELS,\n rate=RATE,\n input=True,\n frames_per_buffer=INPUT_FRAMES_PER_BUFFER)\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.stream.close()\n self.audio.terminate()\n\n\ndef decode_sample(raw_sample):\n \"\"\"\n We get one short out for each two chars in the bytes array.\n Args:\n raw_sample: bytes\n Sample buffer from the stream.\n\n Returns:\n signal: Audio signal as a Numpy array\n \"\"\"\n # Parse the buffer and return a tuple of signed integers.\n shorts = unpack(DECODE_FMT, raw_sample)\n\n # Type conversion\n signal = np.array(shorts)\n\n return signal\n\n\ndef sig_to_freq(signal):\n \"\"\"\n Calculate the frequencies of a signal. Also return the dominant frequency.\n \"\"\"\n # Fast fourier transform\n coeffs = np.fft.fft(signal)\n frequencies = np.fft.fftfreq(n=len(signal), d=FREQUENCY)\n\n # Filter out unrealistic frequencies.\n filter_id = ((frequencies > 20) * (frequencies < 1500)) + (\n (frequencies > -1500) * (frequencies < -20))\n frequencies, coeffs = frequencies[filter_id], coeffs[filter_id]\n\n # Find the largest fourier coefficient.\n idx = np.argmax(coeffs.imag)\n\n # Find the corresponding frequency.\n peak = frequencies[idx]\n peak = peak * np.sign(peak)\n\n return coeffs, frequencies, peak\n\n\ndef freq_to_note(frequency):\n \"\"\"\n Maps frequencies to the equivalent musical notes\n \"\"\"\n # Store the output strings.\n notes = ('A', 'Bb', 'B', 'C', 'C#', 'D', 'Eb', 'E', 'F', 'F#', 'G', 'G#')\n directions = ('Flat', 'Sharp', 'Perfect!')\n\n # Map the frequency to a number between 0 and 12.\n c = np.log2(440)\n val = 12*(np.log2(frequency) - c)\n\n # Round to an integer to give the nearest note.\n rounded = int(np.round(val))\n note_idx = rounded % 12\n\n # Use the remainder to indicate tuning direction.\n remainder = val - rounded\n if remainder < - 0.1:\n idx = 0\n elif remainder > 0.1:\n idx = 1\n else:\n idx = 2\n return notes[note_idx], directions[idx]\n\n\ndef tuner(stream, sample_no):\n \"\"\"\n Print the loudest input frequency at regular intervals.\n \"\"\"\n slist = []\n flist = []\n for i in range(0, sample_no):\n # Read from the stream.\n sample = stream.read(INPUT_FRAMES_PER_BUFFER)\n\n # Decode the stream.\n signal = decode_sample(sample)\n\n # Take a fourier transform of the signal and find the loudest frequency.\n coeffs, frequencies, peak = sig_to_freq(signal=signal)\n\n # Store the recordings\n slist.append(signal)\n flist.append((coeffs, frequencies))\n\n # Map the frequency to the correct note.\n note, direction = freq_to_note(frequency=peak)\n\n # Display the frequency, note, and tuning guidance.\n print('Frequency: {}, Note: {}, Error: {}'.format(peak, note, direction))\n\n # Return some data for plotting and debugging.\n total_signal = np.concatenate(slist, 0)\n return total_signal, flist\n\n\ndef signalplot(s):\n \"\"\" Plot the full recorded audio\"\"\"\n plt.figure()\n plt.ylabel('Amplitude')\n plt.xlabel('Time')\n plt.plot(s)\n plt.savefig('signal.png')\n\n\ndef fftplot(coeffs, freqs):\n # Truncate the frequency range a bit.\n idx = (freqs < 2000) * (freqs > -2000)\n freqs, coeffs = freqs[idx], coeffs[idx]\n\n plt.subplot(2, 1, 1)\n plt.title('Real')\n plt.ylabel('Coeff')\n plt.plot(freqs, coeffs.real, color='b')\n\n plt.subplot(2, 1, 2)\n plt.title('Imaginary')\n plt.xlabel('Frequency')\n plt.ylabel('Coeff')\n plt.plot(freqs, coeffs.imag, color='g')\n plt.savefig('fft.png')\n\n\nif __name__ == '__main__':\n with Sess() as session:\n stream = session.stream\n parser = argparse.ArgumentParser(\n description='Tune your guitar!')\n parser.add_argument(\n '--time',\n type=int,\n default=10,\n help='Number of seconds to run the tuner for.')\n args = parser.parse_args()\n sample_no = args.time\n\n # Run the tuner.\n total_signal, freqlist = tuner(stream, sample_no)\n\n signalplot(total_signal)\n fftplot(*freqlist[min(5, sample_no-1)])\n","sub_path":"listener.py","file_name":"listener.py","file_ext":"py","file_size_in_byte":4897,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"287209402","text":"import pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.metrics.pairwise import cosine_similarity\n\ndf=pd.read_csv('movie_dataset.csv')\n\nfeatures=['genres','keywords','title','director','cast']\n\ndef get_title_from_index(index):\n return df[df.index == index][\"title\"].values[0]\n\ndef get_index_from_title(title):\n return df[df.title == title][\"index\"].values[0]\n\nfor feature in features:\n df[feature] = df[feature].fillna('')\n\n\ndef combine_features(row):\n return row['genres']+' '+row['keywords']+' '+row['title']+' '+row['director']+' '+row['cast']\n\ndf['combined_features']=df.apply(combine_features,axis=1)\n\ncv=CountVectorizer()\ncount_matrix=cv.fit_transform(df['combined_features'])\n\ncos_simi=cosine_similarity(count_matrix)\n\nmovie=input('ENTER YOUR MOVIE >>')\n\nind=get_index_from_title(movie)\n\nsimilar_movies=list(enumerate(cos_simi[ind]))\n\nsorted_similar_movies=sorted(similar_movies,key=lambda x:x[1],reverse=True)\n\ni=0\nfor m in sorted_similar_movies:\n print(get_title_from_index(m[0]))\n i=i+1\n if i>50:\n break\n\ninput()\n\n","sub_path":"movie recommendation system/mrs2.py","file_name":"mrs2.py","file_ext":"py","file_size_in_byte":1143,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"449000245","text":"# -*- coding: utf-8 -*-\nimport scrapy\nfrom sunbeam.items import SunbeamItem\nimport sys\nreload(sys)\nsys.setdefaultencoding('utf-8')\n\n\n\nclass SunwzSpider(scrapy.Spider):\n name = 'sunwz' # 爬虫名字是唯一的\n allowed_domains = ['wz.sun0769.com']\n\n url = 'http://wz.sun0769.com/index.php/question/questionType?type=4&page={}' \n offset = 0\n start_urls = [url.format(offset)]\n #start_urls = ['http://http://wz.sun0769.com/index.php/question/questionType?type=4&page={}'.format(i) for i in range(0,94110, 30 )]\n#\n# def start_request(self):\n# for i in range(0, 90, 30):\n# yield scrapy.Request(self.url.format(i))\n\n def parse(self, response):\n #取页面帖子的链接\n links = response.xpath('//div[@class=\"greyframe\"]/table//td/a[@class=\"news14\"]/@href').extract()\n for link in links:\n yield scrapy.Request(link, callback=self.parse_item)\n # 设置翻页\n if self.offset <= 94410:\n self.offset += 30\n yield scrapy.Request(self.url.format(self.offset), callback=self.parse)\n\n\n def parse_item(self, response):\n items = SunbeamItem()\n # 内容\n items['content'] = response.xpath('//div[@class=\"c1 text14_2\"]/text()')[0].extract()\n # 标题\n items['title'] = response.xpath('//div[contains(@class, \"pagecenter p3\")]//strong/text()').extract()[0].split(':')[0].replace('编号', '')\n print(items['title'])\n # 编号\n items['number'] = response.xpath('//div[contains(@class, \"pagecenter p3\")]//strong/text()').extract()[0].split(':')[-1].strip()\n print(items['number'])\n \n print(items['title'][0])\n # url\n items['url'] = response.url\n yield items\n\n\n","sub_path":"sunbeam/sunbeam/spiders/sunwz.py","file_name":"sunwz.py","file_ext":"py","file_size_in_byte":1755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"570602895","text":"# init_method.py\n\n# 此示例示意初始化方法的定义方法及用法\n\nclass Car:\n def __init__(self, c, b, m):\n self.color = c # 颜色\n self.brand = b # 品牌\n self.model = m # 型号\n print(\"初始化方法被调用\")\n \n def run(self, speed): # 行驶\n print(self.color, \"的\",\n self.brand, self.model, '正在以',\n speed, '公里/小时的速度行驶')\n\na4 = Car('红色', \"奥迪\", 'A4')\na4.run(230)\n\nt = Car('蓝色', \"TESLA\", 'Model S')\nt.run(199)\n\n","sub_path":"02-PythonBase/day17/code/init_method.py","file_name":"init_method.py","file_ext":"py","file_size_in_byte":538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"463116346","text":"from human import Human\r\nfrom ai import Artificial\r\n\r\nclass Game:\r\n def __init__(self):\r\n self.contestants = []\r\n pass\r\n\r\n def welcome_rules(self):\r\n print(\"Welcome to Rock, Paper, Scissors, Lizard, Spock (RPSLS)!\\n\"\r\n \"\\n\"\r\n \"The rules of RPSLS is:\\n\"\r\n \"\\n\"\r\n \"Rock crushes Scissors\\n\"\r\n \"Scissors cuts Paper\\n\"\r\n \"Paper covers Rock\\n\"\r\n \"Rock crushes Lizard\\n\"\r\n \"Lizard poisons Spock\\n\"\r\n \"Spock smashes Scissors\\n\"\r\n \"Scissors decapitates Lizard\\n\"\r\n \"Lizard eats Paper\\n\"\r\n \"Paper disproves Spock\\n\"\r\n \"Spock vaporizes Rock\\n\"\r\n \"\\n\"\r\n \"Let's play!\")\r\n \r\n def game_mode(self):\r\n game_choice = (input(\"How will you be playing?\\n\"\r\n \"Type (1) for single player\\n\"\r\n \"Type (2) for multiplayer \"))\r\n if game_choice == \"1\":\r\n ai_player = Artificial()\r\n self.contestants.append(ai_player)\r\n human_player1 = Human()\r\n self.contestants.append(human_player1)\r\n print(\"\\n\")\r\n self.ai_game()\r\n elif game_choice == \"2\":\r\n human_player1 = Human()\r\n self.contestants.append(human_player1)\r\n human_player2 = Human()\r\n self.contestants.append(human_player2)\r\n self.human_game()\r\n else:\r\n print(\"\\n\")\r\n self.game_mode()\r\n pass\r\n\r\n def ai_game(self):\r\n human_winner_counter = 0\r\n ai_winner_counter = 0\r\n\r\n human_round1 = self.human_turn(1)\r\n ai_round1 = self.ai_turn()\r\n round_one_winner = self.find_round_winner(human_round1, ai_round1)\r\n print(\"\\n\")\r\n print(f'{self.contestants[1].name} chose {human_round1} and\\n{self.contestants[0].name} chose {ai_round1}')\r\n print(f'{round_one_winner} wins round 1')\r\n print(\"\\n\")\r\n if round_one_winner == self.contestants[1].name:\r\n human_winner_counter += 1\r\n elif round_one_winner == self.contestants[0].name:\r\n ai_winner_counter += 1\r\n \r\n human_round2 = self.human_turn(1)\r\n ai_round2 = self.ai_turn()\r\n round_two_winner = self.find_round_winner(human_round2, ai_round2)\r\n print(\"\\n\")\r\n print(f'{self.contestants[1].name} chose {human_round2} and\\n{self.contestants[0].name} chose {ai_round2}')\r\n print(f'{round_two_winner} wins round 2')\r\n print(\"\\n\")\r\n if round_two_winner == self.contestants[1].name:\r\n human_winner_counter += 1\r\n elif round_two_winner == self.contestants[0].name:\r\n ai_winner_counter += 1\r\n if human_winner_counter == 2:\r\n print(f'{self.contestants[1].name} wins the game!!!\\n')\r\n self.ask_new_game()\r\n elif ai_winner_counter == 2:\r\n print(f'{self.contestants[0].name} wins the game!!!\\n''Better luck next time! ¯\\_(ツ)_/¯\\n')\r\n self.ask_new_game()\r\n elif human_winner_counter < 2 and ai_winner_counter < 2:\r\n human_round3 = self.human_turn(1)\r\n ai_round3 = self.ai_turn()\r\n round_three_winner = self.find_round_winner(human_round3, ai_round3)\r\n print(\"\\n\")\r\n print(f'{self.contestants[1].name} chose {human_round3} and\\n{self.contestants[0].name} chose {ai_round3}')\r\n print(f'{round_three_winner} wins round 3')\r\n print(\"\\n\")\r\n if round_three_winner == self.contestants[1].name:\r\n human_winner_counter += 1\r\n elif round_three_winner == self.contestants[0].name:\r\n ai_winner_counter += 1\r\n \r\n if human_winner_counter >= 2:\r\n print(f'{self.contestants[1].name} wins the game!!!\\n')\r\n self.ask_new_game()\r\n elif ai_winner_counter >= 2:\r\n print(f'{self.contestants[0].name} wins the game!!!\\n''Better luck next time! ¯\\_(ツ)_/¯\\n')\r\n self.ask_new_game()\r\n elif human_winner_counter == 1 and ai_winner_counter == 0:\r\n print(f'{self.contestants[1].name} wins the game!!!\\n')\r\n self.ask_new_game() \r\n elif ai_winner_counter == 1 and human_winner_counter == 0:\r\n print(f'{self.contestants[0].name} wins the game!!!\\n''Better luck next time! ¯\\_(ツ)_/¯\\n')\r\n self.ask_new_game()\r\n elif human_winner_counter == ai_winner_counter:\r\n print(\"How does it feel to tie and win nothing?! Sucks right?\\n\"\"Play again because if you ain't first, you're last!\\n\")\r\n self.ask_new_game()\r\n\r\n def ai_turn(self):\r\n ai_player_choice = self.contestants[0].choose_gesture()\r\n return ai_player_choice\r\n\r\n def human_turn(self, index_number):\r\n self.contestants[index_number].choose_gesture()\r\n human1_choice = self.valid_answer()\r\n return human1_choice\r\n\r\n def human_game(self):\r\n human1_winner_counter = 0\r\n human2_winner_counter = 0\r\n\r\n human1_round1 = self.human_turn(1)\r\n human2_round1 = self.human_turn(0)\r\n round_one_winner = self.find_round_winner(human1_round1, human2_round1)\r\n print(\"\\n\")\r\n print(f'{self.contestants[1].name} chose {human1_round1} and\\n{self.contestants[0].name} chose {human2_round1}')\r\n print(f'{round_one_winner} wins round 1!')\r\n print(\"\\n\")\r\n if round_one_winner == self.contestants[1].name:\r\n human1_winner_counter += 1\r\n elif round_one_winner == self.contestants[0].name:\r\n human2_winner_counter += 1\r\n \r\n human1_round2 = self.human_turn(1)\r\n human2_round2 = self.human_turn(0)\r\n round_two_winner = self.find_round_winner(human1_round2, human2_round2)\r\n print(\"\\n\")\r\n print(f'{self.contestants[1].name} chose {human1_round2} and\\n{self.contestants[0].name} chose {human2_round2}')\r\n print(f'{round_two_winner} wins round 2!')\r\n print(\"\\n\")\r\n if round_two_winner == self.contestants[1].name:\r\n human1_winner_counter += 1\r\n elif round_two_winner == self.contestants[0].name:\r\n human2_winner_counter += 1\r\n if human1_winner_counter == 2:\r\n print(f'{self.contestants[1].name} wins the game!!!\\n')\r\n self.ask_new_game()\r\n elif human2_winner_counter == 2:\r\n print(f'{self.contestants[0].name} wins the game!!!\\n')\r\n self.ask_new_game()\r\n elif human1_winner_counter < 2 and human2_winner_counter < 2:\r\n human1_round3 = self.human_turn(1)\r\n human2_round3 = self.human_turn(0)\r\n round_three_winner = self.find_round_winner(human1_round3, human2_round3)\r\n print(\"\\n\")\r\n print(f'{self.contestants[1].name} chose {human1_round3} and\\n{self.contestants[0].name} chose {human2_round3}')\r\n print(f'{round_three_winner} wins round 3!')\r\n print(\"\\n\")\r\n if round_three_winner == self.contestants[1].name:\r\n human1_winner_counter += 1\r\n elif round_three_winner == self.contestants[0].name:\r\n human2_winner_counter += 1\r\n \r\n if human1_winner_counter >= 2:\r\n print(f'{self.contestants[1].name} wins the game!!!\\n')\r\n self.ask_new_game()\r\n elif human2_winner_counter >= 2:\r\n print(f'{self.contestants[0].name} wins the game!!!\\n')\r\n self.ask_new_game()\r\n elif human1_winner_counter == 1 and human2_winner_counter == 0:\r\n print(f'{self.contestants[1].name} wins the game!!!\\n')\r\n self.ask_new_game() \r\n elif human2_winner_counter == 1 and human1_winner_counter == 0:\r\n print(f'{self.contestants[0].name} wins the game!!!\\n')\r\n self.ask_new_game()\r\n elif human1_winner_counter == human2_winner_counter:\r\n print(\"How does it feel to tie and win nothing?! Sucks right?\\n\"\"Play again because if you ain't first, you're last!\\n\")\r\n self.ask_new_game()\r\n pass \r\n\r\n def run_game(self):\r\n self.contestants = []\r\n self.welcome_rules()\r\n self.game_mode()\r\n\r\n def valid_answer(self):\r\n while True:\r\n choice = input(\"Please type your RPSLS gesture choice here: \").casefold()\r\n if choice.lower() not in (\"rock\", \"paper\", \"scissors\", \"lizard\", \"spock\"):\r\n print(\"Please re-type your gesture choice.\"\"\\n\")\r\n else:\r\n break\r\n return choice\r\n\r\n def find_round_winner(self, human1_choice, ai_player_choice):\r\n if human1_choice == ai_player_choice:\r\n return\"Nobody\"\r\n\r\n elif human1_choice == \"rock\": \r\n if ai_player_choice == \"scissors\" or ai_player_choice == \"lizard\":\r\n return self.contestants[1].name\r\n elif ai_player_choice == \"spock\" or ai_player_choice == \"paper\":\r\n return self.contestants[0].name\r\n\r\n elif human1_choice == \"paper\":\r\n if ai_player_choice == \"spock\" or ai_player_choice == \"rock\": \r\n return self.contestants[1].name\r\n elif ai_player_choice == \"lizard\" or ai_player_choice == \"scissors\":\r\n return self.contestants[0].name\r\n\r\n elif human1_choice == \"scissors\":\r\n if ai_player_choice == \"paper\" or ai_player_choice == \"lizard\":\r\n return self.contestants[1].name\r\n elif ai_player_choice == \"rock\" or ai_player_choice == \"spock\":\r\n return self.contestants[0].name\r\n\r\n elif human1_choice == \"lizard\":\r\n if ai_player_choice == \"spock\" or ai_player_choice == \"paper\":\r\n return self.contestants[1].name\r\n elif ai_player_choice == \"rock\" or ai_player_choice == \"scissors\":\r\n return self.contestants[0].name\r\n\r\n elif human1_choice == \"spock\":\r\n if ai_player_choice == \"rock\" or ai_player_choice == \"scissors\":\r\n return self.contestants[1].name\r\n elif ai_player_choice == \"paper\" or ai_player_choice == \"lizard\":\r\n return self.contestants[0].name\r\n\r\n def ask_new_game(self):\r\n new_game = input(\"Do you want to play again?\\n\"\r\n \"Type (y)\\n\"\r\n \"Type (n) \")\r\n if new_game == \"y\":\r\n self.run_game()\r\n elif new_game == \"n\":\r\n print(\"See you next time.\")\r\n else:\r\n self.ask_new_game()\r\n","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":10613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"415899508","text":"import numpy as np\n\ndef sor_solver(A, b, omega, initial_guess, tol):\n \"\"\"\n Implementacion del SOR\n Entradas:\n A: Matriz\n b: Vector \n omega: factor de relajacion\n tol: tolerancia (1e-5)\n initial_guess: solucion inicial\n Salidas:\n phi: vector solución\n \"\"\"\n phi = initial_guess[:]\n residual = np.linalg.norm(np.matmul(A, phi) - b) #residual\n itera = 1\n while residual > tol:\n print(\"iteracion: \", itera)\n for i in range(A.shape[0]):\n sigma = 0\n for j in range(A.shape[1]):\n if j != i:\n sigma += A[i][j] * phi[j]\n phi[i] = (1 - omega) * phi[i] + (omega / A[i][i]) * (b[i] - sigma)\n residual = np.linalg.norm(np.matmul(A, phi) - b)\n itera = itera + 1\n # print('Residual: {0:10.6g}'.format(residual))\n \n print(phi)\n return phi\n\n\n#Adaptación del pesudo código de SOR\ntol = 1e-5\nformato = \"{:.\"+str(tol).split('-')[1]+\"f}\"\nomega = 1.3 # factor de relajación w\n\nA = np.array([[4. , 3., 0.],\n [3., 4., -1.],\n [0., -1., 4.]])\n\n\nb = np.array([0.254,-1.425,2.978])\n\ninitial_guess = np.array([0.,0.,0.])\n\nphi = sor_solver(A, b, omega, initial_guess, tol)\nprint(\"Resultado: [ \", formato.format(phi[0]),\", \",formato.format(phi[1]),\", \",formato.format(phi[2]),\"]\")","sub_path":"Talleres/Taller 2/SOR.py","file_name":"SOR.py","file_ext":"py","file_size_in_byte":1258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"216974896","text":"print(\"AGENDA do ADS\")\r\n\r\nmenu = \"1\"\r\n\r\nwhile menu == \"1\" or menu == \"2\" or menu == \"3\":\r\n menu = input(\"Digite a opção desejada: \\n 1 - Cadastrar \\n 2 - Consultar \\n 3 - Sair \\n\")\r\n\r\n\r\n\r\n while menu != \"1\" and menu != \"2\" and menu != \"3\":\r\n print(\"Opção invalida!!! DIGITE NOVAMENTE!!\")\r\n menu = input(\"Digite a opção desejada: \\n 1 - Cadastrar \\n 2 - Consultar \\n 3 - Sair \\n\")\r\n\r\n\r\n \r\n if menu ==\"1\":\r\n lista_nome = []\r\n nome = input(\"Digite seu Nome: \")\r\n \r\n\r\n rua = input(\"Digite da sua Rua: \")\r\n cep = input(\"Digite seu CEP:\")\r\n bairro = input(\"Digite seu Bairro: \")\r\n estado = input(\"Digite o estado onde mora\")\r\n telefone = int(input(\"Digite seu ddd e telefone \"))\r\n\r\n \r\n dados = (\"Nome:{0} Rua:{1} CEP:{2} Bairro:{3} Estado:{4} Telefone:{5}' \".format(nome,rua,cep,bairro,estado,telefone))\r\n \r\n\r\n print(\"{0} CADASTRADO COM SUCESSO\".format(nome))\r\n\r\n arquivo = open('cadastro.txt','a')\r\n arquivo.write(dados + \"\\n\")\r\n print(\"Dados Adicionados no arquivo\")\r\n arquivo.close()\r\n \r\n\r\n\r\n \r\n\r\n elif menu == \"2\":\r\n palavra = input('Digite o nome da pessoa que deseja consultar: ')\r\n for line in open('cadastro.txt'):\r\n if palavra in line:\r\n print(line)\r\n\r\n \r\n elif menu == \"3\":\r\n print(\"Obrigado por utilizar!!!\")\r\n break\r\n","sub_path":"cadastro.py","file_name":"cadastro.py","file_ext":"py","file_size_in_byte":1474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"448824283","text":"from datetime import datetime, timedelta\n\nfrom scheme import UTC, current_timestamp\nfrom spire.schema import *\nfrom spire.support.logs import LogHelper\nfrom sqlalchemy.sql import bindparam, text\n\nfrom platoon.models.subscribedtask import SubscribedTask\n\n__all__ = ('Event',)\n\nlog = LogHelper('platoon')\nschema = Schema('platoon')\n\nclass Event(Model):\n \"\"\"An event.\"\"\"\n\n class meta:\n schema = schema\n tablename = 'event'\n\n id = Identifier()\n topic = Token(nullable=False)\n aspects = Hstore()\n status = Enumeration('pending completed', nullable=False, default='pending')\n occurrence = DateTime(timezone=True)\n\n HSTORE_FILTER = text(':aspects @> subscribed_task.aspects',\n bindparams=[bindparam('aspects', type_=aspects.type)])\n\n @classmethod\n def create(cls, session, topic, aspects=None):\n event = Event(topic=topic, aspects=aspects, occurrence=datetime.now(UTC))\n session.add(event)\n return event\n\n def collate_tasks(self, session):\n model = SubscribedTask\n return (session.query(model).with_lockmode('update')\n .filter(model.topic==self.topic)\n .filter((model.activation_limit == None) | (model.activations < model.activation_limit))\n .filter(self.HSTORE_FILTER | (model.aspects == None))\n .params(aspects=(self.aspects or {})))\n\n def describe(self):\n aspects = {'topic': self.topic}\n if self.aspects:\n aspects.update(self.aspects)\n return aspects\n\n @classmethod\n def process_events(cls, session):\n for event in session.query(cls).with_lockmode('update').filter_by(status='pending'):\n event.schedule_tasks(session)\n else:\n session.commit()\n\n @classmethod\n def purge(cls, session, lifetime):\n delta = datetime.now(UTC) - timedelta(days=lifetime)\n session.query(cls).filter(cls.status == 'completed', cls.occurrence < delta).delete()\n\n def schedule_tasks(self, session):\n description = self.describe()\n for task in self.collate_tasks(session):\n task.activate(session, description)\n\n self.status = 'completed'\n","sub_path":"platoon/models/event.py","file_name":"event.py","file_ext":"py","file_size_in_byte":2173,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"157884909","text":"import sys\n\nfrom PyQt5.QtCore import Qt\nfrom PyQt5.QtWidgets import QApplication, QMainWindow, QTabWidget\n\nfrom xps.menubar import MenuBar\nfrom xps.navbar import TreeDockWidget\n\n\nclass MainWindow(QMainWindow):\n def __init__(self):\n QMainWindow.__init__(self, parent=None, flags=Qt.Window)\n self.initUI()\n\n def initUI(self):\n self.setMenuBar(MenuBar(self))\n self.setDockNestingEnabled(True)\n self.addDockWidget(Qt.LeftDockWidgetArea, TreeDockWidget(self))\n self.setCentralWidget(QTabWidget(self))\n self.resize(1200, 800)\n\n\nif __name__ == '__main__':\n application = QApplication(sys.argv)\n application.setApplicationName('dcsTMS')\n application.setApplicationDisplayName('dcsTMS')\n application.setOrganizationName('shuker')\n application.setOrganizationDomain('shuker.io')\n window = MainWindow()\n window.show()\n sys.exit(application.exec_())\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":917,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"311180123","text":"#python3\n\nimport json\n\ndef retrieve_username():\n \"Retrieves username from memory\"\n try:\n with open(\"username.json\") as f_obj:\n username = json.load(f_obj)\n except FileNotFoundError:\n return none\n else:\n return username\n\ndef store_username():\n \"Stores username\"\n with \"username.json\" as f_obj:\n username = input(\"What is your username? \")\n json.dump(username, f_obj)\n print(\"I'll remember that.\")\n\ndef greet_user():\n \"Verifies identity and greets user\"\n username = retrieve_username()\n if username:\n verify_id = input(\"Is this you? \" + username)\n if verify_id.lowercase() == \"yes\" or verify_id.lowercase() == \"y\":\n print(\"Welcome back, \" + username)\n elif verify_id.lowercase() == \"no\" or verify_id.lowercase() == \"n\":\n store_username()\n else:\n print(\"Please answer yes or no.\")\n else:\n store_username()\n\nif __name__ == \"__main__\":\n greet_user()\n","sub_path":"remember_me.py","file_name":"remember_me.py","file_ext":"py","file_size_in_byte":999,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"116118751","text":"import sqlite3\r\nimport random\r\n\r\nconnect = sqlite3.connect('database.db')\r\nconnect.execute(\"PRAGMA foreign_keys = 1\")\r\ncursor = connect.cursor()\r\n\r\ncursor.execute('''CREATE TABLE IF NOT EXISTS Clients (\r\n id INTEGER PRIMARY KEY,\r\n surname TEXT,\r\n name TEXT,\r\n patronymic TEXT)''')\r\n\r\ncursor.execute('''CREATE TABLE IF NOT EXISTS House (\r\n client_id INTEGER,\r\n adress TEXT,\r\n type TEXT CHECK (type IN\r\n (\"Flat\", \"House\", \"Penthouse\", \"Room\")),\r\n FOREIGN KEY (client_id) REFERENCES clientss(id)\r\n ON DELETE RESTRICT)''')\r\n\r\ncursor.execute('''CREATE TABLE IF NOT EXISTS Car (\r\n house_id INTEGER,\r\n mark TEXT,\r\n year INTEGER CHECK (year>1899 AND year<2015),\r\n FOREIGN KEY (house_id) REFERENCES house(id)\r\n ON DELETE CASCADE)''')\r\n\r\nsurname = ['Smith', 'Jobs', 'Gates']\r\nname = ['Steve', 'Bill', 'Mark']\r\npatronymic = ['John', 'Leonard', 'Mario']\r\nfor id in range(1, 6):\r\n sql = 'INSERT INTO Clientss VALUES(?,?,?,?)'\r\n surname = random.choice(surname)\r\n name = random.choice(name)\r\n patronymic = random.choice(patronymic)\r\n cursor.execute(sql, (id, surname, name, patronymic))\r\n \r\nstatuses = [\"Flat\", \"House\", \"Penthouse\", \"Room\"]\r\nadresses = [\"Avenue\", \"Street\", \"Shosse\", \"Bulwaure\"]\r\nfor i in range(1, 11):\r\n sql = 'INSERT INTO House VALUES(?,?,?)'\r\n house_id = random.randint(1, 6)\r\n adress = random.choice(adresses)\r\n status = random.choice(statuses)\r\n cursor.execute(sql, (house_id, adress, status))\r\n \r\nmarkes = ['Mersedes', 'BMW', 'Opel', 'Nissan', 'Toyota', 'Audi']\r\nfor id in range(1, 11):\r\n sql = 'INSERT INTO Car VALUES(?,?,?)'\r\n car_id = random.randint(1, 11)\r\n mark = random.choice(markes)\r\n year = random.randint(1900, 2014)\r\n cursor.execute(sql, (id, car_id, mark, year))\r\n\r\n\r\n\r\nconnect.commit()\r\nconnect.close()","sub_path":"Lab 3/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"70650280","text":"#__author__ = 'alenush'\n\nimport pandas\nimport matplotlib.pyplot as plt\nimport numpy as np\n\ndata = pandas.read_csv('trainers.csv')\n\ngold_row = []\ngold_standard = {'BNC':[], 'BAWE':[], \"Original\":[]}\n\nfor row in data.iloc[0][2:38]:\n gold_row.append(row)\n\ngold_standard[\"BNC\"] = gold_row[:4] + gold_row[12:16] + gold_row[24:28]\ngold_standard[\"BAWE\"] = gold_row[4:8] + gold_row[16:20] + gold_row[28:32]\ngold_standard[\"Original\"] = gold_row[8:12] + gold_row[20:24] + gold_row[32:36]\n\nprint(gold_standard)\n\nstudents_answers = {}\n\nfor i in range(1,23):\n answers = []\n for st_rows in data.iloc[i][2:38]:\n answers.append(st_rows.lower())\n students_answers[data.iloc[i][1]] = answers\n\nfor_mean = {'BNC':[], \"BAWE\":[], 'Origin':[]}\nprint(len(students_answers))\nfor key, value in students_answers.items():\n print(\"Student\",key)\n bnc, bawe, origin = 0,0,0\n bnc_answ = value[:4] + value[12:16] + value[24:28]\n bawe_answers = value[4:8] + value[16:20] + value[28:32]\n origin_answers = value[8:12] + value[20:24] + value[32:36]\n for gold, answer in zip(gold_standard['BNC'], bnc_answ):\n if answer in gold:\n bnc +=1\n for gold, answer in zip(gold_standard['BAWE'], bawe_answers):\n if answer in gold:\n bawe +=1\n for gold, answer in zip(gold_standard['Original'], origin_answers):\n if answer in gold:\n origin +=1\n print(bnc/len(bnc_answ)*100, bawe/len(bnc_answ)*100, origin/len(bnc_answ)*100)\n for_mean['BNC'].append(bnc/len(bnc_answ)*100)\n for_mean['BAWE'].append(bawe/len(bnc_answ)*100)\n for_mean['Origin'].append(origin/len(bnc_answ)*100)\n\nprint(for_mean)\nlabels = []\nfor key, value in for_mean.items():\n print(key, np.mean(value), max(value), min(value))\n\n\n","sub_path":"nug_needs/experiment_data.py","file_name":"experiment_data.py","file_ext":"py","file_size_in_byte":1753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"154478251","text":"__all__ = ['BioPaxObject', 'Entity', 'Pathway', 'Gene', 'Unresolved']\n\nfrom ..xml_util import *\n\n\nclass Unresolved:\n def __init__(self, obj_id):\n self.obj_id = obj_id\n\n\nclass BioPaxObject:\n \"\"\"Generic BioPAX Object. It is the parent class of all more specific\n BioPAX classes.\"\"\"\n list_types = ['xref', 'comment']\n xml_types = {}\n\n def __init__(self, uid, name=None, comment=None, xref=None):\n self.uid = uid\n # TODO: is name in the right place here?\n self.name = name\n self.comment = comment\n # TODO: is xref in the right place here?\n self.xref = xref\n\n @classmethod\n def from_xml(cls, element):\n uid = get_id_or_about(element)\n kwargs = {'uid': uid}\n for key in cls.list_types:\n kwargs[key] = []\n for child in element.getchildren():\n key = get_attr_tag(child)\n # In some OWL formats, the child is directly defined\n # under this tag, in that case we directly deserialize it.\n if child.getchildren():\n gchild = child.getchildren()[0]\n obj_cls = globals()[get_tag(gchild)]\n val_to_add = obj_cls.from_xml(gchild)\n # Otherwise, we check if the element is a simple type that we\n # can just get as a text value\n elif (get_datatype(child.attrib) is None\n and not get_resource(child.attrib)) \\\n or is_datatype(child.attrib, nssuffix('xsd', 'string')) \\\n or is_datatype(child.attrib, nssuffix('xsd', 'int')) \\\n or is_datatype(child.attrib, nssuffix('xsd', 'float')):\n val_to_add = child.text\n # If neither of the above is the case, then we assume that the\n # element is a reference that is defined in another block\n # somewhere so we treat is as Unresolved until later.\n else:\n res = get_resource(child.attrib)\n val_to_add = Unresolved(res)\n\n if key in cls.list_types:\n kwargs[key].append(val_to_add)\n else:\n kwargs[key] = val_to_add\n return cls(**kwargs)\n\n def to_xml(self):\n id_type = 'about' if is_url(self.uid) else 'ID'\n element = makers['bp'](self.__class__.__name__,\n **{nselem('rdf', id_type): self.uid})\n for attr in [a for a in dir(self)\n if not a.startswith('_')\n and a not in {'list_types', 'xml_types',\n 'to_xml', 'from_xml', 'uid'}]:\n val = getattr(self, attr)\n if val is None:\n continue\n if isinstance(val, list):\n for v in val:\n child_elem = self._simple_to_xml(attr, v)\n if child_elem is not None:\n element.append(child_elem)\n else:\n child_elem = self._simple_to_xml(attr, val)\n if child_elem is not None:\n element.append(child_elem)\n return element\n\n def _simple_to_xml(self, attr, val):\n if isinstance(val, BioPaxObject):\n child_elem = makers['bp'](\n snake_to_camel(attr),\n **{nselem('rdf', 'resource'):\n ('#%s' % val.uid) if not is_url(val.uid) else val.uid}\n )\n return child_elem\n elif isinstance(val, str):\n xml_type = self.xml_types.get(attr, 'string')\n child_elem = makers['bp'](\n snake_to_camel(attr),\n val,\n **{nselem('rdf', 'datatype'): nssuffix('xsd', xml_type)}\n )\n return child_elem\n return None\n\n\nclass Entity(BioPaxObject):\n \"\"\"BioPAX Entity.\"\"\"\n list_types = BioPaxObject.list_types + \\\n ['evidence', 'data_source']\n\n def __init__(self,\n standard_name=None,\n display_name=None,\n all_names=None,\n participant_of=None,\n availability=None,\n data_source=None,\n evidence=None,\n **kwargs):\n super().__init__(**kwargs)\n self.standard_name = standard_name\n self.display_name = display_name\n self.all_names = all_names\n self.participant_of = participant_of\n self.availability = availability\n self.data_source = data_source\n self.evidence = evidence\n\n\nclass Gene(Entity):\n \"\"\"BioPAX Gene\"\"\"\n def __init__(self, organism, **kwargs):\n super().__init__(**kwargs)\n self.organism = organism\n\n\nclass Pathway(Entity):\n \"\"\"BioPAX Pathway.\"\"\"\n list_types = Entity.list_types + ['pathway_component']\n\n def __init__(self,\n pathway_component=None,\n pathway_order=None,\n organism=None,\n **kwargs):\n super().__init__(**kwargs)\n self.pathway_component = pathway_component\n self.pathway_order = pathway_order\n self.organism = organism\n\n\n# These are necessary to have the objects in the global\n# scope, required for some modes of deserialization\nfrom .interaction import *\nfrom .physical_entity import *\nfrom .util import *\n","sub_path":"pybiopax/biopax/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":5313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"281918073","text":"'''\nAuthor: Puffrora\nDate: 2020-10-13 10:45:43\nLastModifiedBy: Puffrora\nLastEditTime: 2020-10-13 12:00:36\n'''\n\n\nclass RangeModule:\n\n import bisect\n\n def __init__(self):\n self.range = []\n\n # 找到目标区间的起始下标和终止下标\n def get_bounds(self, left, right):\n i, j = 0, len(self.range) - 1\n for k in (100, 10, 1):\n while i + k - 1 < len(self.range) and self.range[i+k-1][1] < left:\n i += k\n while j - k + 1 >= 0 and self.range[j-k+1][0] > right:\n j -= k\n return i, j\n\n def addRange(self, left, right):\n i, j = self.get_bounds(left, right)\n if i <= j:\n left = min(self.range[i][0], left)\n right = max(self.range[j][1], right)\n \n self.range[i:j+1] = [(left, right)]\n\n def queryRange(self, left, right):\n i = bisect.bisect_left(self.range, (left, float('inf')))\n if i: i -= 1\n return bool(self.range) and self.range[i][0] <= left and self.range[i][1] >= right\n \n def removeRange(self, left, right):\n i, j = self.get_bounds(left, right)\n merge = []\n for k in range(i, j+1):\n if self.range[k][0] < left:\n merge.append((self.range[k][0], left))\n if right < self.range[k][1]:\n merge.append((right, self.range[k][1]))\n self.range[i:j+1] = merge\n\n # Your RangeModule object will be instantiated and called as such:\n # obj = RangeModule()\n # obj.addRange(left,right)\n # param_2 = obj.queryRange(left,right)\n # obj.removeRange(left,right)\n","sub_path":"Leetcode/leetcode715 Range 模块.py","file_name":"leetcode715 Range 模块.py","file_ext":"py","file_size_in_byte":1614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"119453466","text":"# LRU Page Replacement Policy\n# Least Recently Used\n# Ayush Jain - 2017UCP1168\nimport sys\nfhand = open(\"test.dat\",\"r\")\nnumPages = fhand.readline()\nnumPages = int(numPages)\ntmp = list(map(int,fhand.readline().split()))\nif(tmp[-1] != -1):\n print(\"Incorrect input format..Try Again..Exiting\")\n sys.exit(1)\ntmp = tmp[:-1]\nref_string = tmp\n############# Input from file done ##############\nbool_arr = [0 for i in range(100)]\nqueue = [] ; count = 0\nnum_Hits = 0 ; ttt = 0\nfor i in range(len(ref_string)):\n count += 1 ; ttt += 1\n tmp = ref_string[i]\n if(bool_arr[tmp] == 1):\n print(\"Page Requested : \",tmp,\" - Hit\",sep='')\n queue.remove(tmp)\n queue.append(tmp)\n num_Hits += 1\n else:\n print(\"Page Requested : \",tmp,\" - Miss\",sep='')\n bool_arr[tmp] = 1\n if(len(queue) < numPages):\n queue.append(tmp)\n else:\n x = queue[0]\n bool_arr[x] = 0\n queue = queue[1:]\n queue.append(tmp)\n if(ttt == 25):\n print(\"Hit Ratio = \",(num_Hits/count)*100,\"%\")\n ttt = 0\nprint(\"Hit Ratio = \",(num_Hits/count)*100,\"%\")\n","sub_path":"Assign_12_Memory_Management/Page_Replacement/LRU.py","file_name":"LRU.py","file_ext":"py","file_size_in_byte":1133,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"314701726","text":"#!/usr/bin/env python\n#\n# A library that provides a Python interface to the Telegram Bot API\n# Copyright (C) 2015-2020\n# Leandro Toledo de Souza \n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser Public License for more details.\n#\n# You should have received a copy of the GNU Lesser Public License\n# along with this program. If not, see [http://www.gnu.org/licenses/].\nfrom telegram import PhotoSize, UserProfilePhotos\n\n\nclass TestUserProfilePhotos(object):\n total_count = 2\n photos = [\n [\n PhotoSize('file_id1', 'file_un_id1', 512, 512),\n PhotoSize('file_id2', 'file_un_id1', 512, 512)\n ],\n [\n PhotoSize('file_id3', 'file_un_id3', 512, 512),\n PhotoSize('file_id4', 'file_un_id4', 512, 512)\n ]\n ]\n\n def test_de_json(self, bot):\n json_dict = {\n 'total_count': 2,\n 'photos': [[y.to_dict() for y in x] for x in self.photos]\n }\n user_profile_photos = UserProfilePhotos.de_json(json_dict, bot)\n assert user_profile_photos.total_count == self.total_count\n assert user_profile_photos.photos == self.photos\n\n def test_to_dict(self):\n user_profile_photos = UserProfilePhotos(self.total_count, self.photos)\n user_profile_photos_dict = user_profile_photos.to_dict()\n assert user_profile_photos_dict['total_count'] == user_profile_photos.total_count\n for ix, x in enumerate(user_profile_photos_dict['photos']):\n for iy, y in enumerate(x):\n assert y == user_profile_photos.photos[ix][iy].to_dict()\n","sub_path":"tests/test_userprofilephotos.py","file_name":"test_userprofilephotos.py","file_ext":"py","file_size_in_byte":2023,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"148635036","text":"import ElementTree2 as ET\nfrom ROOT import TLorentzVector, TCanvas, TH1F,TLegend,gStyle, TLatex\n\ntree1 = ET.parse('pp_h3_h1h2_h1aa_h2bb_lhapdf.lhe')\nroot1=tree1.getroot()\n\ntree2 = ET.parse('pp_h3_h1h2_h1bb_h2aa_lhapdf.lhe')\nroot2=tree2.getroot()\n\n\neta_b_l1=[]\neta_bbar_l1=[]\nphi_b_l1=[]\nphi_bbar_l1=[]\nphi_a_l1=[]\nphi_a2_l1=[]\neta_a_l1=[]\neta_a2_l1=[]\npt_b_l1=[]\npt_a2_l1=[]\npt_bbar_l1=[]\npt_a_l1=[]\nm_1_H1_l1=[]\nm_1_H2_l1=[]\nm_1_H3_l1=[]\n\n\neta_b_l2=[]\neta_bbar_l2=[]\nphi_b_l2=[]\nphi_bbar_l2=[]\nphi_a_l2=[]\nphi_a2_l2=[]\neta_a_l2=[]\neta_a2_l2=[]\npt_b_l2=[]\npt_a2_l2=[]\npt_bbar_l2=[]\npt_a_l2=[]\nm_1_H1_l2=[]\nm_1_H2_l2=[]\nm_1_H3_l2=[]\n\n\n\ngStyle.SetFrameLineWidth(3)\ngStyle.SetOptTitle(1)\ngStyle.SetOptStat(111)\ngStyle.SetFillColor(2)\ngStyle.SetLineWidth(1)\ngStyle.SetPadColor(1)\n\n#legend=TLegend(.63,.69,.87,.89,\"\",\"#gamma #gamma\")\n#legend=TLegend(0.57, 0.5, 0.94,0.65,\"\",\"b b~\")\n\nc=TCanvas(\"c\",\"First canvas\",2000,1900)\n\ncmsname=TLatex(0.15,1.85,\"Trials\")\n#cmsname=TLatex(0.15,1.85,\"CMS #it{#bf{Preliminary}}\")\ncmsname.SetTextSize(0.036)\ncmsname.SetTextAlign(12)\ncmsname.SetNDC(1)\ncmsname.SetTextFont(61)\n#lhefdata=LHEFData(float(root.attrib['version']))\n#lhefdata=LHEFData(float(root.attrib['version']))\nfor child in root1:\n if(child.tag=='event'):\n lines=child.text.strip().split('\\n')\n event_header=lines[0].strip()\n num_part=int(event_header.split()[0].strip())\n\n a1_1=[s for s in lines if s.split()[0]=='22']\n a1=a1_1[1::2]\n a2=a1_1[0::2]\n b1=[s for s in lines if s.split()[0]=='5']\n bbar1=[s for s in lines if s.split()[0]=='-5']\n\n px3_l1= float (a1[0].split()[6])\n py3_l1= float (a1[0].split()[7])\n pz3_l1= float (a1[0].split()[8])\n e3_l1= float (a1[0].split()[9])\n\n px4_l1= float (a2[0].split()[6])\n py4_l1= float (a2[0].split()[7])\n pz4_l1= float (a2[0].split()[8])\n e4_l1= float (a2[0].split()[9])\n\n p3=TLorentzVector(px3_l1,py3_l1,pz3_l1,e3_l1)\n p4=TLorentzVector(px4_l1,py4_l1,pz4_l1,e4_l1)\n #h1 constructed from aa\n pb1=p3+p4\n m_1_H1_l1.append(pb1.M())\n eta_a_l1.append(p3.Eta())\n pt_a_l1.append(p3.Pt())\n eta_a2_l1.append(p4.Eta())\n pt_a2_l1.append(p4.Pt())\n phi_a_l1.append(p3.Phi())\n phi_a2_l1.append(p4.Phi())\n\n px5_l1= float (b1[0].split()[6])\n py5_l1= float (b1[0].split()[7])\n pz5_l1= float (b1[0].split()[8])\n e5_l1= float (b1[0].split()[9])\n\n px6_l1= float (bbar1[0].split()[6])\n py6_l1= float (bbar1[0].split()[7])\n pz6_l1= float (bbar1[0].split()[8])\n e6_l1= float (bbar1[0].split()[9])\n\n p5=TLorentzVector(px5_l1,py5_l1,pz5_l1,e5_l1)\n p6=TLorentzVector(px6_l1,py6_l1,pz6_l1,e6_l1)\n #h2 constructed from bb~\n pb2=p5+p6\n m_1_H2_l1.append(pb2.M())\n eta_b_l1.append(p5.Eta())\n eta_bbar_l1.append(p6.Eta())\n pt_b_l1.append(p5.Pt())\n pt_bbar_l1.append(p6.Pt())\n phi_b_l1.append(p5.Phi())\n phi_bbar_l1.append(p6.Phi())\n\nfor child in root2:\n if(child.tag=='event'):\n lines=child.text.strip().split('\\n')\n event_header=lines[0].strip()\n num_part=int(event_header.split()[0].strip())\n\n a1_1=[s for s in lines if s.split()[0]=='22']\n a1=a1_1[1::2]\n a2=a1_1[0::2]\n b1=[s for s in lines if s.split()[0]=='5']\n bbar1=[s for s in lines if s.split()[0]=='-5']\n\n px3_l2= float (a1[0].split()[6])\n py3_l2= float (a1[0].split()[7])\n pz3_l2= float (a1[0].split()[8])\n e3_l2= float (a1[0].split()[9])\n\n px4_l2= float (a2[0].split()[6])\n py4_l2= float (a2[0].split()[7])\n pz4_l2= float (a2[0].split()[8])\n e4_l2= float (a2[0].split()[9])\n\n p3=TLorentzVector(px3_l2,py3_l2,pz3_l2,e3_l2)\n p4=TLorentzVector(px4_l2,py4_l2,pz4_l2,e4_l2)\n #h2 constructed from aa\n pb1=p3+p4\n m_1_H2_l2.append(pb1.M())\n eta_a_l2.append(p3.Eta())\n pt_a_l2.append(p3.Pt())\n eta_a2_l2.append(p4.Eta())\n pt_a2_l2.append(p4.Pt())\n phi_a_l2.append(p3.Phi())\n phi_a2_l2.append(p4.Phi())\n\n px5_l2= float (b1[0].split()[6])\n py5_l2= float (b1[0].split()[7])\n pz5_l2= float (b1[0].split()[8])\n e5_l2= float (b1[0].split()[9])\n\n px6_l2= float (bbar1[0].split()[6])\n py6_l2= float (bbar1[0].split()[7])\n pz6_l2= float (bbar1[0].split()[8])\n e6=float (bbar1[0].split()[9])\n\n p5=TLorentzVector(px5_l2,py5_l2,pz5_l2,e5_l2)\n p6=TLorentzVector(px6_l2,py6_l2,pz6_l2,e6_l2)\n #h1 constructed from bb~\n pb2=p5+p6\n m_1_H1_l2.append(pb2.M())\n eta_b_l2.append(p5.Eta())\n eta_bbar_l2.append(p6.Eta())\n pt_b_l2.append(p5.Pt())\n pt_bbar_l2.append(p6.Pt())\n phi_b_l2.append(p5.Phi())\n phi_bbar_l2.append(p6.Phi())\n\n\n\nc.SetLogy()\n\nh1_l1=TH1F('Invariant Mass of H1 (#gamma #gamma)',\"\",1000,0,1200)\nfor i in m_1_H1_l1:\n h1_l1.Fill(i)\n\nh14_l1=TH1F('Invariant Mass of H2 (bb~)',\"\",1000,0,1200)\nfor i in m_1_H2_l1:\n h14_l1.Fill(i)\n\n\nh2_l1=TH1F('#eta_{#gamma1}',\"\",100,-5,5)\nfor i in eta_a_l1:\n h2_l1.Fill(i)\n\nh9_l1=TH1F('#eta_{#gamma2}',\"\",100,-5,5)\nfor i in eta_a2_l1:\n h9_l1.Fill(i)\n\nh3_l1=TH1F('#eta_{b}',\"\",100,-5,5)\nfor i in eta_b_l1:\n h3_l1.Fill(i)\n\nh7_l1=TH1F('#eta_{b~}',\"\",100,-5,5)\nfor i in eta_bbar_l1:\n h7_l1.Fill(i)\n\nh4_l1=TH1F('P_{T#gamma1}',\"\",100,0,1000)\nfor i in pt_a_l1:\n h4_l1.Fill(i)\n\nh8_l1=TH1F('P_{T#gamma2}',\"\",100,0,1000)\nfor i in pt_a2_l1:\n h8_l1.Fill(i)\n\nh5_l1=TH1F('Pt of b',\"\",100,0,1000)\nfor i in pt_b_l1:\n h5_l1.Fill(i)\n\nh6_l1=TH1F('Pt of bbar',\"\",100,0,1000)\nfor i in pt_bbar_l1:\n h6_l1.Fill(i)\n\nh10_l1=TH1F('Phi of a',\"\",10,-4,4)\nfor i in phi_a_l1:\n h10_l1.Fill(i)\n\nh11_l1=TH1F('Phi of a2',\"\",10,-4,5)\nfor i in phi_a2_l1:\n h11_l1.Fill(i)\n\nh12_l1=TH1F('Phi of b',\"\",10,-4,4)\nfor i in phi_b_l1:\n h12_l1.Fill(i)\nh13_l1=TH1F('Phi of bbar',\"\",10,-4,4)\nfor i in phi_bbar_l1:\n h13_l1.Fill(i)\n\n\n\n\nh1_l2=TH1F('Invariant Mass of H1 (bb~)',\"\",1000,0,1200)\nfor i in m_1_H1_l2:\n h1_l2.Fill(i)\n\nh14_l2=TH1F('Invariant Mass of H2 (#gamma #gamma)',\"\",1000,0,1200)\nfor i in m_1_H2_l2:\n h14_l2.Fill(i)\n\n\nh2_l2=TH1F('#eta_{#gamma1}',\"\",100,-5,5)\nfor i in eta_a_l2:\n h2_l2.Fill(i)\n\nh9_l2=TH1F('#eta_{#gamma2}',\"\",100,-5,5)\nfor i in eta_a2_l2:\n h9_l2.Fill(i)\n\nh3_l2=TH1F('#eta_{b}',\"\",100,-5,5)\nfor i in eta_b_l2:\n h3_l2.Fill(i)\n\nh7_l2=TH1F('#eta_{b~}',\"\",100,-5,5)\nfor i in eta_bbar_l2:\n h7_l2.Fill(i)\n\nh4_l2=TH1F('P_{T#gamma1}',\"\",100,0,1000)\nfor i in pt_a_l2:\n h4_l2.Fill(i)\n\nh8_l2=TH1F('P_{T#gamma2}',\"\",100,0,1000)\nfor i in pt_a2_l2:\n h8_l2.Fill(i)\n\nh5_l2=TH1F('Pt of b',\"\",100,0,1000)\nfor i in pt_b_l2:\n h5_l2.Fill(i)\n\nh6_l2=TH1F('Pt of bbar',\"\",100,0,1000)\nfor i in pt_bbar_l2:\n h6_l2.Fill(i)\n\nh10_l2=TH1F('Phi of a',\"\",10,-4,4)\nfor i in phi_a_l2:\n h10_l2.Fill(i)\n\n#h11_l2=TH1F('Phi of a2',\"\",10,-4,5)\n#for i in phi_a2_l2:\n# h11_l2.Fill(i)\n\nh12_l2=TH1F('Phi of b',\"\",10,-4,4)\nfor i in phi_b_l2:\n h12_l2.Fill(i)\nh13_l2=TH1F('Phi of bbar',\"\",10,-4,4)\nfor i in phi_bbar_l2:\n h13_l2.Fill(i)\n\n\n\n'''\ntps1=TPaveStats()\nh1.FindObject(\"stats\")\ntps1.SetName(\"Hist1 Stats\")\nX1=tps1.GetX1NDC()\nY1=tps1.GetY1NDC()\nX2=tps1.GetX2NDC()\nY2=tps1.GetY2NDC()\n\ntps2=TPaveStats()\nh14.FindObject(\"stats\")\ntps2.SetTextColor(kRed)\ntps2.SetLineColor(kRed)\ntps2.SetX1NDC(X1)\ntps2.SetX2NDC(X2)\ntps2.SetY1NDC(Y1-(Y2-Y1))\ntps2.SetY2NDC(Y1)\n'''\n'''\nlegend=TLegend(0.1,0.1,0.3,0.3)\nlegend.SetHeader(\"Legend\")\nlegend.AddEntry(h2,\"#eta_{#gamma1}\",\"l\")\nlegend.AddEntry(h9,\"#eta_{#gamma2}\",\"l\")\nlegend.AddEntry(h3,\"#eta_{b}\",\"l\")\nlegend.AddEntry(h7,\"#eta_{b~}\",\"l\")\nlegend.AddEntry(h4,\"pt_{#gamma1}\",\"l\")\nlegend.AddEntry(h8,\"pt_{#gamma2}\",\"l\")\nlegend.AddEntry(h4,\"pt_{#gamma1}\",\"l\")\nlegend.AddEntry(h8,\"pt_{#gamma2}\",\"l\")\n'''\n\nh1_l1.SetXTitle(\"M_h1_aa [GeV]\")\nh1_l1.SetYTitle(\"Events\")\nh1_l1.SetLineColor(6)\nh1_l2.SetXTitle(\"M_h1_bb [GeV]\")\nh1_l2.SetYTitle(\"events\")\nh1_l2.SetLineColor(4)\nh1_l1.DrawNormalized(\"hist\")\nh1_l2.DrawNormalized(\"hist&SAMES\")\nc.SaveAs(\"Mass_combine_h1.png\")\nc.SaveAs(\"Mass_combine_h1.root\")\n\nh14_l1.SetXTitle(\"M_h2_bb [GeV]\")\nh14_l1.SetYTitle(\"Events\")\nh14_l1.SetLineColor(6)\nh14_l2.SetXTitle(\"M_h1_aa [GeV]\")\nh14_l2.SetYTitle(\"events\")\nh14_l2.SetLineColor(4)\nh14_l1.DrawNormalized(\"hist\")\nh14_l2.DrawNormalized(\"hist&SAMES\")\nc.SaveAs(\"Mass_combine_h2.png\")\nc.SaveAs(\"Mass_combine_h2.root\")\n\nh2_l1.SetXTitle(\"#eta_{#gamma}\")\nh2_l1.SetYTitle(\"Events\")\nh2_l1.SetLineColor(6)\nh2_l2.SetXTitle(\"#eta_{#gamma}\")\nh2_l2.SetYTitle(\"events\")\nh2_l2.SetLineColor(4)\nh2_l1.DrawNormalized(\"hist\")\nh2_l2.DrawNormalized(\"hist&SAMES\")\nc.SaveAs(\"eta_combine_gamma.png\")\nc.SaveAs(\"eta_combine_gamma.root\")\n\nh3_l1.SetXTitle(\"#eta_{b}\")\nh3_l1.SetYTitle(\"Events\")\nh3_l1.SetLineColor(6)\nh3_l2.SetXTitle(\"#eta_{b}\")\nh3_l2.SetYTitle(\"events\")\nh3_l2.SetLineColor(4)\nh3_l1.DrawNormalized(\"hist\")\nh3_l2.DrawNormalized(\"hist&SAMES\")\nc.SaveAs(\"eta_combine_b.png\")\nc.SaveAs(\"eta_combine_b.root\")\n\nh4_l1.SetXTitle(\"p_{T#gamma} \")\nh4_l1.SetYTitle(\"Events\")\nh4_l1.SetLineColor(6)\nh4_l2.SetXTitle(\"p_{T#gamma}\")\nh4_l2.SetYTitle(\"events\")\nh4_l2.SetLineColor(4)\nh4_l1.DrawNormalized(\"hist\")\nh4_l2.DrawNormalized(\"hist&SAMES\")\nc.SaveAs(\"pT_combine_gamma.png\")\nc.SaveAs(\"pT_combine_gamma.root\")\n\nh5_l1.SetXTitle(\"#p_{T}_b\")\nh5_l1.SetYTitle(\"Events\")\nh5_l1.SetLineColor(6)\nh5_l2.SetXTitle(\"#p_{T}_b\")\nh5_l2.SetYTitle(\"events\")\nh5_l2.SetLineColor(4)\nh5_l1.DrawNormalized(\"hist\")\nh5_l2.DrawNormalized(\"hist&SAMES\")\nc.SaveAs(\"pT_combine_b.png\")\nc.SaveAs(\"pT_combine_b.root\")\n\nh6_l1.SetXTitle(\"#p_{T}_b~\")\nh6_l1.SetYTitle(\"Events\")\nh6_l1.SetLineColor(6)\nh6_l2.SetXTitle(\"#p_{T}_b~\")\nh6_l2.SetYTitle(\"events\")\nh6_l2.SetLineColor(4)\nh6_l1.DrawNormalized(\"hist\")\nh6_l2.DrawNormalized(\"hist&SAMES\")\nc.SaveAs(\"pT_combine_b~.png\")\nc.SaveAs(\"pT_combine_b~.root\")\n\nh7_l1.SetXTitle(\"#eta_{b~}\")\nh7_l1.SetYTitle(\"Events\")\nh7_l1.SetLineColor(6)\nh7_l2.SetXTitle(\"#eta_{b~}\")\nh7_l2.SetYTitle(\"events\")\nh7_l2.SetLineColor(4)\nh7_l1.DrawNormalized(\"hist\")\nh7_l2.DrawNormalized(\"hist&SAMES\")\nc.SaveAs(\"eta_combine_b~.png\")\nc.SaveAs(\"eta_combine_b~.root\")\n\nh10_l1.SetXTitle(\"#phi_{#gamma2}\")\nh10_l1.SetYTitle(\"Events\")\nh10_l1.SetLineColor(6)\nh10_l2.SetXTitle(\"#phi_{#gamma2}\")\nh10_l2.SetYTitle(\"events\")\nh10_l2.SetLineColor(4)\nh10_l1.DrawNormalized(\"hist\")\nh10_l2.DrawNormalized(\"hist&SAMES\")\nc.SaveAs(\"phi_combine_gamma.png\")\nc.SaveAs(\"phi_combine_gamma.root\")\n\nh12_l1.SetXTitle(\"#phi_{b}\")\nh12_l1.SetYTitle(\"Events\")\nh12_l1.SetLineColor(6)\nh12_l2.SetXTitle(\"#phi_{b}\")\nh12_l2.SetYTitle(\"events\")\nh12_l2.SetLineColor(4)\nh12_l1.DrawNormalized(\"hist\")\nh12_l2.DrawNormalized(\"hist&SAMES\")\nc.SaveAs(\"phi_combine_b.png\")\nc.SaveAs(\"phi_combine_b.root\")\n\nh13_l1.SetXTitle(\"#phi_{b~}\")\nh13_l1.SetYTitle(\"Events\")\nh13_l1.SetLineColor(6)\nh13_l2.SetXTitle(\"#phi_{b~}\")\nh13_l2.SetYTitle(\"events\")\nh13_l2.SetLineColor(4)\nh13_l1.DrawNormalized(\"hist\")\nh13_l2.DrawNormalized(\"hist&SAMES\")\nc.SaveAs(\"phi_combine_b~.png\")\nc.SaveAs(\"phi_combine_b~.root\")\n","sub_path":"pp_h3_h1h2_h1aa_h2bb_lhapdf/combined.py","file_name":"combined.py","file_ext":"py","file_size_in_byte":11081,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"289425729","text":"import collections\nimport itertools\nimport heapq\n\nclass Maze:\n \"\"\" Class finding its way through the maze. \"\"\"\n\n def __init__(self,fileName, multi_robots = False):\n self.grid = self.read_map(fileName)\n self.y, self.x, self.total_keys = self.find_start_and_keys()\n self.pos = self.initialize_position(multi_robots)\n\n def compute_shortest_path(self):\n \"\"\" Traverse the grid by first looking for the reachable keys and select\n nodes with shortest distance while traversing.\"\"\"\n\n # Initial node - depth, position, collection of keys\n q = [(0, self.pos, frozenset())]\n\n # Visited nodes\n visited = set()\n\n while q:\n\n # Obtain shortest the node\n depth, current_pos, keys = heapq.heappop(q)\n\n # We are done if all keys are found\n if keys == self.total_keys:\n return depth\n\n # Skip node if visited before\n if (current_pos, keys) in visited:\n continue\n\n # Update visited node\n visited.add( (current_pos, keys) )\n\n # Go through positions\n for i, (current_y, current_x) in enumerate(current_pos):\n # Go through the reachable keys for each position\n for d, y, x, key in self.reachable_keys(current_y,current_x,keys):\n # Update positions list\n new_pos = current_pos[0:i] + ((y,x,),) + current_pos[i+1:]\n # Push to heap new depth, new position, and collected keys\n heapq.heappush(q, (depth+d, new_pos, keys | frozenset([key])))\n\n def reachable_keys(self, y_, x_, keys):\n \"\"\" Given start position y_,x_, and keys find other reachable keys \"\"\"\n\n # Initialize node to go through\n q = collections.deque( [(y_, x_, 0)] )\n\n # Initialize visited nodes\n visited = set()\n\n # Search directions\n directions = ( (1,0), (-1, 0), (0,1), (0, -1))\n\n while q:\n\n # Get the node\n y, x, depth = q.popleft()\n\n # If we reach a key which we do not have, yield it\n if self.grid[y][x].islower() and self.grid[y][x] not in keys:\n yield depth, y, x, self.grid[y][x]\n continue\n\n # Search neighbors\n for dy, dx in directions:\n y_new, x_new = y + dy, x + dx\n\n # Skip if we have visited the node\n if ((y_new,x_new)) in visited:\n continue\n\n # Add to visited node\n visited.add((y_new,x_new))\n\n value = self.grid[y_new][x_new]\n\n if value != '#' and (not value.isupper() or value.lower() in keys):\n q.append( (y_new, x_new, depth + 1) )\n\n def initialize_position(self, multi_robots):\n \"\"\" Set the initial position depending on part one or part two \"\"\"\n if multi_robots:\n y, x = self.y, self.x\n self.grid[y-1] = self.grid[y-1][:x] + '#' + self.grid[y-1][x+1:]\n self.grid[y ] = self.grid[y][:x-1] + '###' + self.grid[y][x+2:]\n self.grid[y+1] = self.grid[y+1][:x] + '#' + self.grid[y+1][x+1:]\n return ( (self.y-1, self.x-1), (self.y-1, self.x+1), \\\n (self.y+1, self.x-1), (self.y+1, self.x+1), )\n else:\n return ( (self.y,self.x),)\n\n def find_start_and_keys(self):\n \"\"\" Figure out middle of the map and how many keys are present \"\"\"\n # Flatten grid\n linear_grid = list(itertools.chain.from_iterable(self.grid))\n\n # Width and height of grid\n height, width = len(self.grid), len(self.grid[0])\n start_index = linear_grid.index('@')\n\n # Divide index with length and width\n y, x = start_index // width, start_index % width\n \n # If the key is a lower case letter then include\n total_keys = set(key for key in linear_grid if key.islower())\n\n return y, x, total_keys\n\n def read_map(self,fileName):\n \"\"\" Read the map \"\"\"\n with open(fileName) as f: return f.read().splitlines()\n def print_map(self):\n \"\"\" Print the map \"\"\"\n for row in self.grid: print(\"\".join(row))","sub_path":"2019/day18/python/maze.py","file_name":"maze.py","file_ext":"py","file_size_in_byte":3793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"337912116","text":"def send_request(self, data, **message_kwargs):\n data = to_list(data)\n become = self._become\n if become:\n self.connection.queue_message('vvvv', 'firing event: on_become')\n data.insert(0, {\n 'cmd': 'enable',\n 'input': self._become_pass,\n })\n output = message_kwargs.get('output', 'text')\n request = request_builder(data, output)\n headers = {\n 'Content-Type': 'application/json-rpc',\n }\n (response, response_data) = self.connection.send('/command-api', request, headers=headers, method='POST')\n try:\n response_data = json.loads(to_text(response_data.getvalue()))\n except ValueError:\n raise ConnectionError('Response was not valid JSON, got {0}'.format(to_text(response_data.getvalue())))\n results = handle_response(response_data)\n if become:\n results = results[1:]\n if (len(results) == 1):\n results = results[0]\n return results","sub_path":"Data Set/bug-fixing-5/84d9b3e58986d70ca64539e816e342a9d7d0b888--fix.py","file_name":"84d9b3e58986d70ca64539e816e342a9d7d0b888--fix.py","file_ext":"py","file_size_in_byte":945,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"464049159","text":"# -*- coding: utf-8 -*-\nimport time\nimport numpy as np\nimport geatpy as ea # 导入geatpy库\nfrom sys import path as paths\nfrom os import path as currentPath\npaths.append(currentPath.split(currentPath.realpath(__file__))[0])\nfrom updateNDSet import updateNDSet # 导入该算法模板所需的外部函数\n\nclass moea_awGA_templet(ea.MoeaAlgorithm):\n \n \"\"\"\nmoea_awGA_templet : class - 多目标进化优化awGA算法模板\n \n算法描述:\n 采用awGA进行多目标优化。\n\n模板使用注意:\n 本模板调用的目标函数形如:aimFunc(pop), \n 其中pop为Population类的对象,代表一个种群,\n pop对象的Phen属性(即种群染色体的表现型)等价于种群所有个体的决策变量组成的矩阵,\n 该函数根据该Phen计算得到种群所有个体的目标函数值组成的矩阵,并将其赋值给pop对象的ObjV属性。\n 若有约束条件,则在计算违反约束程度矩阵CV后赋值给pop对象的CV属性(详见Geatpy数据结构)。\n 该函数不返回任何的返回值,求得的目标函数值保存在种群对象的ObjV属性中,\n 违反约束程度矩阵保存在种群对象的CV属性中。\n 例如:population为一个种群对象,则调用aimFunc(population)即可完成目标函数值的计算,\n 此时可通过population.ObjV得到求得的目标函数值,population.CV得到违反约束程度矩阵。\n 若不符合上述规范,则请修改算法模板或自定义新算法模板。\n \n参考文献:\n [1] Gen M,CHeng R. Genetic Algorithms and Engineering Optimization[M]. \n New York: John Wiley & Sons,2000\n \n \"\"\"\n \n def __init__(self, problem, population):\n ea.MoeaAlgorithm.__init__(self, problem, population) # 先调用父类构造方法\n if str(type(population)) != \"\":\n raise RuntimeError('传入的种群对象必须为Population类型')\n self.name = 'awGA'\n self.selFunc = 'tour' # 选择方式,采用锦标赛选择\n if population.Encoding == 'P':\n self.recFunc = 'xovpmx' # 部分匹配交叉\n self.mutFunc = 'mutinv' # 染色体片段逆转变异\n elif population.Encoding == 'BG':\n self.recFunc = 'xovud' # 均匀交叉\n self.mutFunc = 'mutbin' # 二进制变异\n elif population.Encoding == 'RI':\n self.recFunc = 'xovud' # 均匀交叉\n self.mutFunc = 'mutuni' # 均匀变异\n else:\n raise RuntimeError('编码方式必须为''BG''、''RI''或''P''.')\n self.pc = 1 # 重组概率\n self.pm = 1 # 整条染色体的变异概率\n self.MAXSIZE = population.sizes # 非支配解集大小限制\n\n def run(self):\n #==========================初始化配置===========================\n problem = self.problem\n population = self.population\n NIND = population.sizes\n MAXSIZE = self.MAXSIZE\n if MAXSIZE is None: # 检查MAXSIZE,默认取2倍的种群规模\n MAXSIZE = 2 * NIND\n self.initialization() # 初始化算法模板的一些动态参数\n #===========================准备进化============================\n if population.Chrom is None:\n population.initChrom(NIND) # 初始化种群染色体矩阵(内含解码,详见Population类的源码)\n else:\n population.Phen = population.decoding() # 染色体解码\n self.problem.aimFunc(population) # 计算种群的目标函数值\n NDSet = updateNDSet(population, problem.maxormins, MAXSIZE) # 计算适应度和得到全局非支配种群\n self.evalsNum = population.sizes # 记录评价次数\n #===========================开始进化============================\n while self.terminated(population) == False:\n uniChrom = np.unique(NDSet.Chrom, axis = 0)\n repRate = 1 - uniChrom.shape[0] / NDSet.sizes # 计算NDSet中的重复率\n # 选择个体去进化形成子代\n offspring = population[ea.selecting(self.selFunc, population.FitnV, NIND)]\n offspring.Chrom = ea.recombin(self.recFunc, offspring.Chrom, self.pc) #重组\n offspring.Chrom = ea.mutate(self.mutFunc, offspring.Encoding, offspring.Chrom, offspring.Field, self.pm) # 变异\n if population.Encoding != 'BG' and repRate > 0.1:\n offspring.Chrom = ea.mutate('mutgau', offspring.Encoding, offspring.Chrom, offspring.Field, self.pm, False, 3) # 高斯变异,对标准差放大3倍。\n offspring.Phen = offspring.decoding() # 染色体解码\n self.problem.aimFunc(offspring) # 求进化后个体的目标函数值\n self.evalsNum += offspring.sizes # 更新评价次数\n # 父代种群和育种种群合并\n population = population + offspring\n NDSet = updateNDSet(population, problem.maxormins, MAXSIZE, NDSet) # 计算合并种群的适应度及更新NDSet\n # 保留个体到下一代\n population = population[ea.selecting('dup', population.FitnV, NIND)] # 选择,保留NIND个个体\n NDSet = NDSet[np.where(np.all(NDSet.CV <= 0, 1))[0]] # 最后要彻底排除非可行解\n self.passTime += time.time() - self.timeSlot # 更新用时记录\n #=========================绘图及输出结果=========================\n if self.drawing != 0:\n ea.moeaplot(NDSet.ObjV, 'Pareto Front', True)\n # 返回帕累托最优集\n return NDSet\n ","sub_path":"geatpy/templates/moeas/awGA/moea_awGA_templet.py","file_name":"moea_awGA_templet.py","file_ext":"py","file_size_in_byte":5601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"310043710","text":"import urllib.request\nimport os.path\n\nurl_base = 'http://yann.lecun.com/exdb/mnist/'\nkey_file = {\n 'train_img':'train-images-idx3-ubyte.gz',\n 'train_label':'train-labels-idx1-ubyte.gz',\n 'test_img':'t10k-images-idx3-ubyte.gz',\n 'test_label':'t10k-labels-idx1-ubyte.gz'\n}\ndataset_dir = os.path.dirname(os.path.abspath(__file__))\n\ndef _download(filename):\n file_path = dataset_dir + '/' + filename\n if os.path.exists(file_path):\n return print('already exist')\n print('Downloading ' + filename + ' ...')\n urllib.request.urlretrieve(url_base + filename, file_path)\n print('Done')\n\ndef download_mnist():\n for v in key_file.values():\n _download(v)\n\ndownload_mnist()\n","sub_path":"intelligentSystemTraining/MnistDownload.py","file_name":"MnistDownload.py","file_ext":"py","file_size_in_byte":703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"98246711","text":"import tensorflow as tf\n\n\"\"\"\ntfrecord_list : a list of tfrecord file\nparse_function: A function with a signature parse_function( example_proto )\nbatch_size : an integer denoting the size of the batch\nparser function is supplied by client and therefore this\nprepare_dataset is independent of the task in hand.\nReturns the Dataset\n\"\"\"\n\n\n#########################################\n# gets an input list of tfrecord file names\n# and prepares a datset\n#\n#########################################\n\n\ndef prepare_dataset(tfrecord_list, parse_function, batch_size ):\n\n dataset = tf.data.TFRecordDataset(tfrecord_list)\n\n dataset = dataset.shuffle(buffer_size=5000)\n\n dataset = dataset.map(parse_function)\n\n dataset = dataset.batch(batch_size)\n\n dataset = dataset.repeat()\n\n return dataset","sub_path":"tfread/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":794,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"544079400","text":"\"\"\"\n Module that defines the `NetworkGroup` object and methods to read, write and manipulate it\n\"\"\"\n\nfrom collections.abc import Collection\nfrom typing import Any, Dict, Iterator, List, Union\n\nimport networkx as nx\nimport simplejson\n\nfrom .network import Network\n\nDType = List[Dict[str, Any]]\n\n\nclass NetworkGroup(Collection):\n \"\"\"\n Class that represents a group of network objects\n These network objects are intended to be visualized together\n\n Parameters\n ----------\n networks : List[Network]\n The collection of networks to be grouped\n key = context-id, value = Network\n\n Attributes\n ----------\n graph : Union[nx.MultiGraph, nx.MultiDiGraph]\n The networkx multi-graph representation of the network\n nodes: DType\n The list of nodes in the network group\n links: DType\n The list of links in the network group\n contexts: DType\n The list of all contexts in the network group\n \"\"\"\n\n def __init__(self, networks: List[Network]) -> None:\n self.nodeid_map: Dict[int, Dict[str, str]] = dict()\n self._networks = networks\n if not networks or [n for n in networks if not isinstance(n, Network)]:\n raise ValueError(\n \"The networks parameter must be a list of one or more networks\"\n )\n self.graph = self._combine_networks(networks)\n\n def __contains__(self, key) -> bool:\n if key in range(len(self)):\n return True\n return False\n\n def __len__(self) -> int:\n return len(self._networks)\n\n def __iter__(self) -> Iterator:\n return iter(self._networks)\n\n def __repr__(self) -> str:\n n_nodes = len(self.nodes)\n n_links = len(self.links)\n n_contexts = len(self.contexts)\n return f\"\"\n\n def _combine_nodes(self, all_nodes: Dict[int, DType]) -> DType:\n \"\"\" Combine nodes of individual networks into a single list \"\"\"\n nodes: DType = []\n node_hash: Dict[int, int] = dict() # taxid => nodes.index\n if len(all_nodes) == 1:\n return all_nodes[0]\n for cid, network_nodes in all_nodes.items():\n self.nodeid_map[cid] = dict()\n for node in network_nodes:\n if node[\"taxid\"] not in node_hash:\n id_ = len(nodes)\n id_old = node[\"id\"]\n id_new = f\"id{id_}\"\n nodes.append(\n {**node, **{\"id\": id_new, \"children\": [], \"abundance\": None}}\n )\n node_hash[node[\"taxid\"]] = id_\n self.nodeid_map[cid][id_old] = id_new\n else:\n id_old = node[\"id\"]\n ind = node_hash[node[\"taxid\"]]\n id_new = nodes[ind][\"id\"]\n self.nodeid_map[cid][id_old] = id_new\n return nodes\n\n def _combine_links(self, all_links: Dict[int, DType]) -> DType:\n \"\"\" Combine links of individual networks into a single list \"\"\"\n links = []\n if len(all_links) == 1:\n for link in all_links[0]:\n links.append({**link, \"context_index\": 0})\n return links\n for cid, network_links in all_links.items():\n for link in network_links:\n source, target = link[\"source\"], link[\"target\"]\n new_source = self.nodeid_map[cid][source]\n new_target = self.nodeid_map[cid][target]\n links.append(\n {\n **link,\n **{\n \"source\": new_source,\n \"target\": new_target,\n \"context_index\": cid,\n },\n }\n )\n return links\n\n def _combine_networks(\n self, networks: List[Network]\n ) -> Union[nx.MultiGraph, nx.MultiDiGraph]:\n \"\"\"\n Combine networks into a network group\n\n Parameters\n ----------\n networks : List[Network]\n The list of networks to be grouped\n\n Returns\n -------\n Union[nx.MultiGraph, nx.MultiDiGraph]\n The networkx graph of the network\n \"\"\"\n nodes_dict = dict()\n links_dict = dict()\n contexts = []\n for cid, network in enumerate(networks):\n nodes_dict[cid] = network.nodes\n links_dict[cid] = network.links\n contexts.append(network.metadata)\n merged_nodes = self._combine_nodes(nodes_dict)\n merged_links = self._combine_links(links_dict)\n if all([n.graph.is_directed() for n in networks]):\n graph = nx.MultiDiGraph(contexts=contexts)\n else:\n graph = nx.MultiGraph(contexts=contexts)\n for node in merged_nodes:\n graph.add_node(node[\"id\"], **node)\n for link in merged_links:\n graph.add_edge(link[\"source\"], link[\"target\"], **link)\n return graph\n\n @property\n def nodes(self) -> DType:\n \"\"\" The list of nodes in the `NetworkGroup` and their corresponding properties \"\"\"\n return [data for _, data in self.graph.nodes(data=True)]\n\n @property\n def links(self) -> DType:\n \"\"\" The list of links in the `NetworkGroup` and their corresponding properties \"\"\"\n return [data for _, _, data in self.graph.edges(data=True)]\n\n @property\n def contexts(self) -> DType:\n \"\"\" The contexts for the group of networks \"\"\"\n return self.graph.graph[\"contexts\"]\n\n def filter_links(self, pvalue_filter: bool, interaction_filter: bool) -> DType:\n \"\"\"\n The links of the networks after applying filtering\n\n Parameters\n ----------\n pvalue_filter : bool\n If True will use `pvalue_threshold` for filtering\n interaction_filter : bool\n If True will use `interaction_threshold` for filtering\n\n Returns\n -------\n DType\n The list of links in the network after applying thresholds\n \"\"\"\n filtered_links_dict = dict()\n for cid, network in enumerate(self._networks):\n filtered_links_dict[cid] = network.filter_links(\n pvalue_filter=pvalue_filter, interaction_filter=interaction_filter\n )\n merged_filtered_links = self._combine_links(filtered_links_dict)\n return merged_filtered_links\n\n def json(\n self, pvalue_filter: bool = False, interaction_filter: bool = False\n ) -> str:\n \"\"\"\n Returns the network as a `JSON` string\n\n Parameters\n ----------\n pvalue_filter : bool\n If True will use `pvalue_threshold` for filtering\n Default value is False\n interaction_filter : bool\n If True will use `interaction_threshold` for filtering\n Default value is False\n\n Returns\n -------\n str\n The `JSON` string representation of the network\n \"\"\"\n nodes = self.nodes\n links = self.filter_links(\n pvalue_filter=pvalue_filter, interaction_filter=interaction_filter\n )\n contexts = self.contexts\n network = {\"contexts\": contexts, \"nodes\": nodes, \"links\": links}\n return simplejson.dumps(network, indent=2, sort_keys=True, ignore_nan=True)\n\n def write(\n self, fpath: str, pvalue_filter: bool = False, interaction_filter: bool = False\n ) -> None:\n \"\"\"\n Write network to file as JSON\n\n Parameters\n ----------\n fpath : str\n The path to the `JSON` file\n pvalue_filter : bool\n If True will use `pvalue_threshold` for filtering\n Default value is False\n interaction_filter : bool\n If True will use `interaction_threshold` for filtering\n Default value is False\n \"\"\"\n with open(fpath, \"w\") as fid:\n fid.write(\n self.json(\n pvalue_filter=pvalue_filter, interaction_filter=interaction_filter\n )\n )\n\n @classmethod\n def load_json(cls, fpath: str) -> \"NetworkGroup\":\n \"\"\"\n Create a `NetworkGroup` object from network `JSON` file\n\n Parameters\n ----------\n fpath : str\n The path to the network `JSON` file\n\n Returns\n -------\n NetworkGroup\n The instance of the `NetworkGroup` class\n \"\"\"\n with open(fpath, \"r\") as fid:\n raw_data = simplejson.load(fid)\n n_networks = len(raw_data[\"contexts\"])\n all_node_dict = {n[\"id\"]: n for n in raw_data[\"nodes\"]}\n data_dict: Dict[int, dict] = {\n n: {\"nodes\": [], \"links\": [], \"metadata\": {}} for n in range(n_networks)\n }\n unique_node_dict: Dict[int, dict] = {n: set() for n in range(n_networks)}\n for cid in range(n_networks):\n data_dict[cid][\"metadata\"] = {**raw_data[\"contexts\"][cid]}\n for link in raw_data[\"links\"]:\n link_cid = link[\"context_index\"]\n source = all_node_dict[link[\"source\"]]\n source_name = link[\"source\"]\n target = all_node_dict[link[\"target\"]]\n target_name = link[\"target\"]\n data_dict[link_cid][\"links\"].append(link)\n if source_name not in unique_node_dict[link_cid]:\n data_dict[link_cid][\"nodes\"].append(source)\n unique_node_dict[link_cid].add(source_name)\n if target_name not in unique_node_dict[link_cid]:\n data_dict[link_cid][\"nodes\"].append(target)\n unique_node_dict[link_cid].add(target_name)\n networks: List[Network] = []\n for cid in range(n_networks):\n metadata = data_dict[cid][\"metadata\"]\n nodes = data_dict[cid][\"nodes\"]\n links = data_dict[cid][\"links\"]\n network_raw_data = {**metadata, \"nodes\": nodes, \"links\": links}\n networks.append(Network.load_json(raw_data=network_raw_data))\n return cls(networks)\n\n def combine_pvalues(self, method: str) -> np.array:\n \"\"\"\n Combine pvalues of links in the network group using Empirical Brown's Method\n\n Parameters\n ----------\n method : str\n\n Returns\n -------\n np.array\n \"\"\"\n pass\n","sub_path":"mindpipe/main/network_group.py","file_name":"network_group.py","file_ext":"py","file_size_in_byte":10740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"29849027","text":"import preprocessing \nimport numpy as np\nimport cv2\n\ndef open_image2(filename, scale_to=[64, 64]):\n \"\"\"Opens an image, returns the preprocessed image (scaled, masked)\"\"\"\n #img = cv2.imread(filename) * cv2.imread(filename.replace('Bmp', 'Msk'))/255\n img = cv2.imread(filename)/255\n #processed_img = np.zeros(list(scale_to)+[3])\n\n # scaling\n # img_w, img_h = img.shape[1], img.shape[0]\n # target_w, target_h = scale_to[1], scale_to[0]\n # factor = target_w / img_w if img_w/img_h > target_w/target_h else target_h / img_h\n # img = cv2.resize(img, None, fx=factor, fy=factor)\n img = cv2.resize(img, tuple(scale_to))\n\n # centering image\n # x, y = int(target_w/2 - img.shape[1]/2), int(target_h/2 - img.shape[0]/2)\n # processed_img[y:y+img.shape[0], x:x+img.shape[1]] = img\n\n # normalising\n processed_img = img.astype(np.float32)\n for c in range(3):\n processed_img[:,:,c] /= np.max(processed_img[:,:,c])\n\n # to grayscale\n processed_img = cv2.cvtColor(\n (processed_img*255).astype(np.uint8), cv2.COLOR_RGB2GRAY)\n processed_img = np.expand_dims(processed_img, -1)\n\n # new_image = cv2.adaptiveThreshold(processed_img, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, \\\n # \tcv2.THRESH_BINARY, 11, 2)\n\n blur = cv2.GaussianBlur(processed_img, (5,5), 0)\n _, new_image = cv2.threshold(blur, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)\n new_image = np.expand_dims(new_image, -1)\n #new_image = cv2.Laplacian(new_image, cv2.CV_64F)\n\n return new_image\n\n\ncv2.imshow(\"Test Image\", open_image2(\"English/Img/GoodImg/Bmp/Sample021/img021-00061.png\"))\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n\n","sub_path":"image_viewing.py","file_name":"image_viewing.py","file_ext":"py","file_size_in_byte":1657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"150209897","text":"# ---\n# jupyter:\n# jupytext:\n# formats: ipynb,py:light\n# text_representation:\n# extension: .py\n# format_name: light\n# format_version: '1.4'\n# jupytext_version: 1.1.1\n# kernelspec:\n# display_name: Python 3\n# language: python\n# name: python3\n# ---\n\nimport pandas as pd\nfrom requests import get\nfrom bs4 import BeautifulSoup\nfrom datetime import date\n\n\n# +\ndef get_page(url):\n response = get(url)\n html = response.content\n soup = BeautifulSoup(html, \"html.parser\")\n return soup\n\nbase_ff_url = 'https://www.fleaflicker.com/mlb/leagues/'\nleague_ids = ['21579', '21581', '21580', '21582', '21583', '21584', '21585', '21586', '21587', '21588', '21589', \n '21590', '21591', '21592', '21593', '21594', '21595', '21596']\n\nall_teams = []\nfor l in league_ids:\n url = base_ff_url + l\n soup = get_page(url)\n trs = soup.find_all('tr')\n raw_headers = trs[1].find_all('th')\n player_data = trs[2:]\n headers = []\n for header in raw_headers:\n if header.text:\n headers.append(header.text)\n exp_headers = headers + ['league_id', 'league_name', 'team_id'] \n league_name = soup.find_all('li', {'class': 'active'})[1].text.strip()\n for row in player_data:\n d_dict = dict.fromkeys(exp_headers)\n d_dict['league_id'] = l\n d_dict['league_name'] = league_name\n d_dict['Team'] = row.find('td', {'class': 'left'}).text\n d_dict['Owner'] = row.find('td', {'class': 'right'}).text\n d_dict['team_id'] = row.find('a', href=True).get('href')[-6:]\n try:\n d_dict['Rank'] = row.find_all('td', {'class': 'right text-center'})[-1].text\n except IndexError:\n d_dict['Rank'] = row.find_all('td', {'class': 'bottom right text-center'})[-1].text\n heads = exp_headers[2:14]\n if d_dict['Owner'] == 'Take Over':\n stats = row.find_all('span', {'class': 'nowrap'})\n else:\n stats = row.find_all('span', {'class': 'nowrap'})[1:]\n for h, s in zip(heads, stats):\n d_dict[h] = s.text\n all_teams.append(d_dict)\n# -\n\nall_df = pd.DataFrame(all_teams, columns=exp_headers)\nall_df.HR = all_df.HR.astype(int)\nall_df.R = all_df.R.astype(int)\nall_df.RBI = all_df.RBI.astype(int)\nall_df.SB = all_df.SB.astype(int)\nall_df.OBP = all_df.OBP.astype(float)\nall_df.OPS = all_df.OPS.astype(float)\nall_df.SO = all_df.SO.astype(int)\nall_df.SV = all_df.SV.astype(int)\nall_df.HD = all_df.HD.astype(int)\nall_df.ERA = all_df.ERA.astype(float)\nall_df.WHP = all_df.WHP.astype(float)\nall_df.QS = all_df.SV.astype(int)\n\nrank_headers = ['HR', 'R','RBI','SB','OBP','OPS','SO','SV','HD','ERA','WHP','QS']\nfor r in rank_headers:\n if r in ['ERA', 'WHP']:\n all_df[r+'_Points'] = all_df[r].rank(ascending=False)\n else:\n all_df[r+'_Points'] = all_df[r].rank()\n\nall_df['Total_Points'] = all_df.iloc[:,-12:].sum(axis=1)\nall_df['Overall_Rank'] = all_df.Total_Points.rank(ascending=False)\n\nall_df.head()\n\nall_df[all_df.Owner == 'xacex']\n\nt_date = str(date.today())\nall_df.to_csv('current_rankings_'+t_date+'.csv')\n\n\n","sub_path":"FleaFlicker/.ipynb_checkpoints/FleaFlicker Scrape-checkpoint.py","file_name":"FleaFlicker Scrape-checkpoint.py","file_ext":"py","file_size_in_byte":3109,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"457995891","text":"# 打印斐波那契(Fibonacci)数列:1,1,2,3,5,8,13 ......\n# a,b=0,1\n# while b<100:\n# print(b,end=' ')\n# a,b=b,a+b\n\n\n# 我们来写一个程序计算幂级数:e^x = 1 + x + x^2 / 2! + x^3 / 3! + ... + x^n / n! (0 < x < 1)。\nprint(\"please input a number:\")\nx = float(input())\nn = result = term = 1\n\nwhile n < 100:\n term *= x / n\n result += term\n if term < 0.0001:\n break\n n += 1\n\nprint(\"result is {:.10f}\".format(result))\n","sub_path":"01.新手入门课/循环/fib.py","file_name":"fib.py","file_ext":"py","file_size_in_byte":462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"602120168","text":"import pygame\nfrom settings import SOUND_PATH, skill_PATH\nimport os\n\nclass RequestSubject:\n def __init__(self, model):\n self.__observers = []\n self.model = model\n\n def register(self, observer):\n self.__observers.append(observer)\n\n def notify(self, user_request):\n for o in self.__observers:\n o.update(user_request, self.model)\n\n\nclass EnemyGenerator:\n def __init__(self, subject):\n subject.register(self)\n self.cd = 900\n self.max_cd = 900\n self.boss_generate_flag = 1\n\n def update(self, user_request: str, model):\n if(self.cd >= self.max_cd and user_request == \"start new wave\"):\n if model.checkpoint == 1 or model.checkpoint == 2:\n model.enemies.add(self.en_num(model.checkpoint),model)\n self.cd = 0\n elif model.checkpoint == 3 and self.boss_generate_flag == 1:\n model.enemies.add(self.en_num(model.checkpoint),model)\n self.cd = 0\n self.boss_generate_flag = 0\n else:\n self.cd += 1\n \n def en_num(self, checkpoint):\n if checkpoint == 1:\n return 3\n elif checkpoint == 2:\n return 4\n else:\n return 1\n \n\n\nclass Music:\n def __init__(self, subject):\n subject.register(self)\n\n def update(self, user_request: str, model):\n if user_request == \"music\":\n pygame.mixer.music.unpause()\n model.sound.play()\n\n\nclass Muse:\n def __init__(self, subject):\n subject.register(self)\n\n def update(self, user_request: str, model):\n if user_request == \"mute\":\n pygame.mixer.music.pause()\n model.sound.play()\n\n\nclass Hero_howhow:\n def __init__(self, subject):\n subject.register(self)\n self.howhow_music = pygame.mixer.Sound(os.path.join(SOUND_PATH,\"howhow_sound.mp3\"))\n def update(self, user_request: str, model):\n if user_request == \"howhow\":\n if model.money >= 70:\n model.money -= 70\n model.heros.add('howhow', model.hero_level)\n self.howhow_music.set_volume(0.4)\n pygame.mixer.Channel(2).play(self.howhow_music)\n print('summon howhow')\n\nclass Hero_godtone:\n def __init__(self, subject):\n subject.register(self)\n self.godtone_music = pygame.mixer.Sound(os.path.join(SOUND_PATH,\"tone_sound.mp3\"))\n def update(self, user_request: str, model):\n if user_request == \"godtone\":\n if model.money >= 50:\n model.money -= 50\n model.heros.add(\"godtone\", model.hero_level)\n self.godtone_music.set_volume(0.03)\n pygame.mixer.Channel(2).play(self.godtone_music)\n print('summon godtone')\n\n\nclass Hero_p:\n def __init__(self, subject):\n subject.register(self)\n self.p_music = pygame.mixer.Sound(os.path.join(SOUND_PATH,\"p_sound.mp3\"))\n def update(self, user_request: str, model):\n if user_request == \"p\":\n if model.money >= 200:\n model.money -= 200\n model.heros.add(\"p\", model.hero_level)\n self.p_music.set_volume(0.8)\n pygame.mixer.Channel(2).play(self.p_music)\n print('summon p')\n\nclass Hero_brian:\n def __init__(self, subject):\n subject.register(self)\n self.brian_music = pygame.mixer.Sound(os.path.join(SOUND_PATH,\"brian_sound.mp3\"))\n def update(self, user_request: str, model):\n if user_request == \"brian\":\n if model.money >= 70:\n model.money -= 70\n model.heros.add(\"brian\", model.hero_level)\n self.brian_music.set_volume(0.4)\n pygame.mixer.Channel(2).play(self.brian_music)\n print('summon brian')\n\n\n\n\nclass Special:\n def __init__(self, subject):\n subject.register(self)\n self.skill_music = pygame.mixer.Sound(os.path.join(SOUND_PATH,\"rising.mp3\"))\n def update(self, user_request: str, model):\n if user_request == \"special\" and model.en.expedition:\n if model.money >= 200:\n model.money -= 200\n model.skill_animation = True\n self.skill_music.set_volume(0.6)\n pygame.mixer.Channel(3).play(self.skill_music)\n for en in model.en.expedition:\n en.health = en.health // 2\n\n\nclass Upgrade:\n def __init__(self, subject):\n subject.register(self)\n self.upgrade_music = pygame.mixer.Sound(os.path.join(SOUND_PATH,\"upgradesound.wav\"))\n def update(self, user_request: str, model):\n hero_update_cost = [100, 150, 200]\n if user_request == \"upgrade\":\n if model.money >= hero_update_cost[model.hero_level] and model.hero_level < 3:\n model.money -= hero_update_cost[model.hero_level]\n self.upgrade_music.set_volume(0.6)\n pygame.mixer.Channel(3).play(self.upgrade_music)\n \n\n\n","sub_path":"game/user_request.py","file_name":"user_request.py","file_ext":"py","file_size_in_byte":5085,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"298812943","text":"# coding: utf-8\n\nimport urllib.parse\nimport http.cookiejar\nimport urllib.request\nimport re\nimport http.client\n\n\nfrom bs4 import BeautifulSoup\n\n\n# 我们需要的网址\nurl = \"http://www.baidu.com\"\n\n\nprint(\">>> 第一种方式,最基本方式\")\nresp = urllib.request.urlopen(url)\nresult = resp.read()\n# 需要解码为我们需要的编码\nprint(len(result.decode(\"utf-8\")))\nresp.close()\n\n\nprint(\">>> 第二种方式,携带数据\")\nheaders = { 'User-Agent' : 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)' }\nvalues = { 'name' : 'test', 'pwd' : 'test' }\n# encoding\ndata = urllib.parse.urlencode(values).encode(\"utf-8\")\n# 新建请求\nreq = urllib.request.Request(url, data, headers)\n# 打开连接\nresp = urllib.request.urlopen(req)\n# 得到结果\nprint(resp.readline())\nresp.close()\n\n\nprint(\">>> 第三种方式\")\n# urlopen 是使用 opener 对象处理请求的, opener 内包含一系列 handler,用于处理不同类型的请求\n# 默认情况下, urlopen 内会调用 build_opener() 方法创建默认的 opener\n# 我们也可以通过手动的方式,创建自己的 opener,并 build_opener 指定使用我们的 opener\n# 下面是为请求增加 Cookie 支持的方法\njaropener = urllib.request.build_opener(urllib.request.HTTPCookieProcessor())\njaropener.addheaders = [('User-agent',\n 'Mozilla/5.0 (Windows NT 6.3; WOW64) '\n 'AppleWebKit/537.36 (KHTML, like Gecko) '\n 'Chrome/31.0.1650.63 Safari/537.36')];\nresp = urllib.request.install_opener(jaropener)\n\n# 头部和参数\nheaders = {}\nparameters = {'a':'b'}\n# 编码\nparameters = urllib.parse.urlencode(parameters).encode()\n# 获取 req\nreq = urllib.request.Request(url, parameters, headers)\n# 获得 resp\nresp = urllib.request.urlopen(req)\n# 最终结果\nprint(resp.getcode())\nprint(resp.info())\nprint(resp.readline())\nhtml = resp.read()\n\nbs = BeautifulSoup(html, 'html.parser', from_encoding='utf-8')\nprint(bs.find_all(\"a\", href=re.compile(\"more\")))\nnode = bs.find(\"a\")\nprint(node.name)\n\n\nresp.close()\n\n\n\n'''\n# 下面是 python2 中网络连接的例子\n\nurl = \"http://www.baidu.com\"\n\nprint '连接的第一种办法:'\nresp1 = urllib2.urlopen(url)\nprint resp1.getcode(), len(resp1.read())\n\n\nprint '连接的第二种办法:'\nreq = urllib2.Request(url)\nreq.add_header(\"user-agent\", \"Mozilla/5.0\")\nresp2 = urllib2.urlopen(req)\nprint resp2.getcode(), len(resp2.read())\n\n\nprint '连接的第三种办法:'\ncj = cookielib.CookieJar()\nopener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))\nurllib2.install_opener(opener)\nresp3 = urllib2.urlopen(url)\nbaiduhtml = resp3.read()\nprint resp3.getcode(), cj, baiduhtml\n\n\n# BS4 的使用\nsoup = BeautifulSoup(baiduhtml, 'html.parser', from_encoding='utf-8')\n\nprint '获取所有连接:'\nlinks = soup.find_all('a')\nfor link in links:\n print link.name, link['href'], link.get_text()\n\nprint '获取news连接:'\nlink = soup.find('a', href='http://news.baidu.com')\nprint link.name, link['href'], link.get_text()\n\nprint '获取正则表达式连接:'\nlink = soup.find('a', href=re.compile(r'news'))\nprint link.name, link['href'], link.get_text()\n\n'''\n\n\n","sub_path":"basic/network.py","file_name":"network.py","file_ext":"py","file_size_in_byte":3154,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"455450041","text":"#coding:utf-8\n\"\"\"\n Time : 2020-02-23 07:32:54\n Author : Vincent\n FileName: server.py\n Software: PyCharm\n Last Modified by: Vincent\n Last Modified time: 2020-02-23 07:32:54\n\"\"\"\nimport json\nimport logging\nimport base64\nfrom spyne import Application, rpc, ServiceBase\nfrom spyne import String, Integer\nfrom spyne.protocol.soap import Soap11\nfrom spyne.server.django import DjangoApplication\nfrom django.views.decorators.csrf import csrf_exempt\nfrom .xml_envelope import XmlEnvelopeTree\nfrom .handle import request_base64_decode\nfrom .map.mysql_orm import init_db, get_session, makeorder_to_db, saveaddr_to_db\n\n\n# Create your views here.\nlogging.basicConfig(level=logging.DEBUG, filename='my_server.log',\n format=\"%(asctime)s %(name)s:%(levelname)s:%(module)s:%(funcName)s:\"\n \"%(processName)s:%(process)d:%(message)s\")\nlogging.getLogger(__name__).setLevel(logging.DEBUG)\n\n\nclass OrderServices(ServiceBase):\n \"\"\"声明服务的类,类的方法,就是客户端访问的服务,业务逻辑,操作都在这里面\"\"\"\n\n @rpc(String, _returns=String)\n def saveOrderInfo(self, request):\n '''\n 保存订单接口\n :param request: 接收的请求\n :return: 订单保存结果\n '''\n logging.info('接收到请求:%s' % request)\n rq_decode = request_base64_decode(request)\n logging.info('请求参数:%s' % rq_decode)\n env_tree = XmlEnvelopeTree(rq_decode)\n dict_data = env_tree.xml_to_dict()\n logging.info('请求体字典数据:%s' % dict_data)\n result = makeorder_to_db(dict_data)\n xml_tree = XmlEnvelopeTree(result)\n logging.info('响应数据:%s' % xml_tree.envelope_encode())\n return base64.b64encode(xml_tree.envelope_encode().encode('utf-8')).decode()\n\n @rpc(String, _returns=String)\n def acceptUserAddrInfo(self, request):\n '''\n 保存推送过来的地址信息接口\n :param request: 接收的请求\n :return: 地址保存结果\n '''\n logging.info('接收到请求:%s' % request)\n rq_decode = request_base64_decode(request)\n logging.info('请求参数:%s' % rq_decode)\n env_tree = XmlEnvelopeTree(rq_decode)\n dict_data = env_tree.xml_to_dict()\n logging.info('请求体字典数据:%s' % dict_data)\n result = saveaddr_to_db(dict_data)\n xml_tree = XmlEnvelopeTree(result)\n logging.info('响应数据:%s' % xml_tree.envelope_encode())\n return base64.b64encode(xml_tree.envelope_encode().encode('utf-8')).decode()\n\n\nsoap_app = Application([OrderServices],\n tns='webservice_test.myservice.views',\n # in_protocol=HttpRpc(validator='soft'),\n # 'SampleServices',\n in_protocol=Soap11(validator=\"lxml\"),\n out_protocol=Soap11())\ndjango_app = DjangoApplication(soap_app)\nsum_app = csrf_exempt(django_app)\nes = get_session()\ninit_db(es[0])\nlogging.info(\"listening to http://127.0.0.1:8000\")\nlogging.info(\"wsdl is at: http://localhost:8000/OrderServices?wsdl\")","sub_path":"dj_webservice/myservices/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":3147,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"96832299","text":"# -*- coding: utf-8 -*-\n#create by jelly\n#2016.07.01\n\nimport re\nimport os\nimport subprocess\nimport time\nimport sys\nsys.path.append(\"..\")\nfrom system.osi import (run_command)\nhdoarm = \"hdparm\"\nsmart_ctl = \"smartctl\"\n\ndef smart_info(disk):\n disk_info = {}\n disk_info[\"name\"] = disk\n cmd = [smart_ctl, \"-i\", \"/dev/%s\" % disk]\n o, e, rc = run_command(cmd)\n for l in o:\n if (re.match('smartctl', l) is not None):\n if (re.match('smartctl database', l) is None):\n line = l.split(\"[\")\n disk_info[\"version\"] = line[0].split(\"smartctl\")[1].strip()\n\n if (re.match('Device Model', l) is not None):\n line = l.split(\":\")\n disk_info[\"device_model\"] = line[1].strip()\n\n if (re.match('Serial Number', l) is not None):\n line = l.split(\":\")\n disk_info[\"serial_number\"] = line[1].strip()\n\n if (re.match('Firmware Version', l) is not None):\n line = l.split(\":\")\n disk_info[\"firmware_version\"] = line[1].strip()\n\n if (re.match('User Capacity', l) is not None):\n line = l.split(\":\")\n disk_info[\"capacity\"] = line[1].strip()\n\n if (re.match('Sector Size', l) is not None):\n line = l.split(\":\")\n disk_info[\"sector_size\"] = line[1].strip()\n\n if (re.match('Rotation Rate', l) is not None):\n line = l.split(\":\")\n disk_info[\"rotation_rate\"] = line[1].strip()\n\n if (re.match('ATA Version is', l) is not None):\n line = l.split(\":\")\n disk_info[\"ata_version\"] = line[1].strip()\n\n if (re.match('SATA Version is', l) is not None):\n line = l.split(\":\")\n disk_info[\"sata_version\"] = line[1].strip()\n\n if (re.match('SMART support is', l) is not None):\n if (re.match('Available', l) is None):\n line = l.split(\":\")\n disk_info[\"enabled\"] = line[1].strip()\n\n cmd = [smart_ctl, \"-H\", \"/dev/%s\" % disk]\n o, e, rc = run_command(cmd)\n for l in o:\n if(re.match(\"test result\",l) is not None):\n line = l.split(\":\")\n disk_info[\"assessment\"] = line[1].strip()\n break\n return disk_info,rc\n\n#print smart_info(\"sdb\")\n\n#test begin\ndef smart_test(disk_info):\n #disk info must include disk name and test style(short and long)\n line = {\"status\":\"fail\",\"description\":\"\"}\n disk = disk_info[\"disk\"]\n style = disk_info[\"style\"]\n cmd = [smart_ctl, \"-t\", style, \"/dev/%s\" % disk]\n o, e, rc = run_command(cmd)\n for l in o:\n if (re.match(\"Testing has begun\",l) is not None):\n line[\"status\"] = \"success\"\n if(re.match(\"Please wait\",l) is not None):\n line[\"description\"] = l\n break\n if line[\"status\"] == \"fail\":\n #return {\"error\":\"smart test fail\"}\n return {\"error\":\"SMART测试失败\"}\n else:\n return line\n#abort smart test disk\ndef abort_smart_test(disk):\n #disk = sdx\n line = {\"status\":\"fail\"}\n cmd = [smart_ctl,\"-X\", \"/dev/%s\" % disk]\n o, e, rc = run_command(cmd)\n for l in o:\n if (re.match(\"Self-testing aborted\",l) is not None):\n line[\"status\"] = \"abort success\"\n break\n if line[\"status\"] == \"fail\":\n #return {\"error\":\"abort test fail\"}\n return {\"error\":\"停止SMART测试失败\"}\n else:\n return line\n\n#get smart test status,status is completed and processing\ndef get_smart_status(disk):\n dict = {\"remain\":\"\",\"status\":\"\"}\n line = \"\"\n key = 0\n cmd = [smart_ctl,\"-c\",\"/dev/%s\" % disk]\n o, e, rc = run_command(cmd)\n if rc == 0:\n for l in o:\n if(\"Total time to complete Offline\" in l):\n break\n if(key):\n line = line + \" \" + l\n if(\"Self-test execution status:\" in l):\n line = l\n key = 1\n #print line\n if(\"The previous self-test routine completed\" in line):\n dict[\"remain\"] = \"0\"\n dict[\"status\"] = \"completed\"\n else:\n dict[\"remain\"] = str((re.findall(r'(\\w*[0-9]+)\\w*',line))[1]) + \"%\"\n dict[\"status\"] = \"processing\"\n return dict\n else:\n #return {\"error\":\"get smart test status fail\"}\n return {\"error\":\"获取SMART测试状态失败\"}\n\n#self samrt test log\ndef smart_test_log(disk):\n #out put logs\n cmd = [smart_ctl, \"-l\", \"selftest\", \"/dev/%s\" % disk]\n o, e, rc = run_command(cmd)\n smart_log = []\n for l in o:\n log = {}\n if(re.match(\"#\",l) is not None):\n li = l.split()\n if (len(li) == 9):\n log[\"num\"] = li[1]\n log[\"Description\"] = li[2]\n log[\"Status\"] = li[3] + \" \" + li[4] + \" \" + li[5]\n log[\"Remain\"] = li[6]\n log[\"LifeTime\"] = li[7]\n log[\"LBA_of_first_error\"] = li[8]\n smart_log.append(log)\n continue\n return smart_log\n\ndef smart_error_log(disk):\n #out put error logs\n error_log = {}\n cmd = [smart_ctl, \"-l\", \"error\", \"/dev/%s\" % disk]\n o, e, rc = run_command(cmd)\n error = True\n for l in o:\n if(re.match(\"No Errors Logged\",l) is not None):\n error = False\n if (error == True):\n error_log[\"error\"] = True\n #code........\n else:\n error_log[\"error\"] = False\n return error_log\n\ndef smartproperty(disk):\n key = 0\n list = []\n cmd = [smart_ctl,\"-a\",\"/dev/%s\" % disk]\n o, e, rc = run_command(cmd)\n if rc == 0:\n for l in o:\n if(\"SMART Error Log\" in l):\n break\n if(key and len(l)>0):\n l = l.split()\n dict = {}\n dict[\"id\"] = l[0]\n dict[\"name\"] = l[1]\n dict[\"flag\"] = l[2]\n dict[\"normed_value\"] = l[3]\n dict[\"worst\"] = l[4]\n dict[\"threshold\"] = l[5]\n dict[\"type\"] = l[6]\n dict[\"updated\"] = l[7]\n dict[\"failed\"] = l[8]\n if(len(l) == 10):\n dict[\"raw_value\"] = l[9]\n else:\n value = \"\"\n for i in range(len(l)-9):\n value = value + \" \" + l[9+i]\n dict[\"raw_value\"] = value\n list.append(dict)\n #print l\n if(\"ID# ATTRIBUTE_NAME\" in l):\n key = 1\n return list\n else:\n #return [{\"error\":\"smart property error\"}]\n return [{\"error\":\"获取SMART信息错误\"}]\n\n\ndef smartcapability(disk):\n list = []\n relist = []\n i = 0\n j = 0\n key = 0\n line = \"\"\n cmd = [smart_ctl,\"-c\",\"/dev/%s\" % disk]\n o, e, rc = run_command(cmd)\n #print o\n if rc == 0:\n for l in o:\n if(key and len(l)>0):\n if(\":\" in l and l[0]!=\"\\t\"):\n if(j == 0):\n i = i + 1\n if(i>1):\n i = 0\n list.append(line)\n #line = \" \"\n line = l\n else:\n line = l\n else:\n i = i + 1\n j = 0\n line = line + \" \" + l\n elif(l[0]==\"\\t\"):\n i = i + 1\n line = line + \" \" + l.strip(\"\\t\")\n else:\n i = 0\n j = 1\n list.append(line)\n line = l\n if(\"General SMART Values:\" in l):\n key = 1\n if line != \"\":\n list.append(line)\n for l in list:\n dict = {}\n l = l.split(\":\")\n dict[\"name\"] = l[0]\n dict[\"flag\"] = l[1].split(\")\")[0].split(\"(\")[-1].strip()\n dict[\"capability\"] = l[1].split(\")\")[1].strip(\"\\t\")\n relist.append(dict)\n return relist\n else:\n return [{\"name\":\"none\",\"flag\":\"none\",\"capability\":\"none\"}]\n else:\n #return [{\"error\":\"smart capability error\"}]\n return [{\"error\":\"获取SMART信息错误\"}]\n#print smartcapability(\"sda\")\n\n#bcache disk function\n#disk = {\"cache_disk\":\"sdb\",\"data_disk\":[\"sdc\",\"sdd\"]}\ndef add_bcache_backing(disk):\n disk_data = \"\"\n history = 0\n cache_disk = disk[\"cache_disk\"]\n cache_disk = \"/dev/%s\" % cache_disk\n cmd1 = [\"bcache-super-show\",\"-f\",cache_disk]\n o,e,rc = run_command(cmd1)\n if(len(o) > 5):\n history = 1\n data_disk = disk[\"data_disk\"]\n if(history == 0):\n for i in range((len(data_disk))):\n data_disk_path = \"/dev/%s\" % data_disk[i]\n cmd2 = [\"bcache-super-show\",\"-f\",data_disk_path]\n o,e,rc = run_command(cmd2)\n if(len(o) > 5):\n history = 1\n break\n cmd = [\"make-bcache\",\"-C\",cache_disk,\"-B\",\"--writeback\"]\n for i in range(len(data_disk)):\n disk_data = \"/dev/%s\" % data_disk[i]\n cmd.insert(5,disk_data)\n if(history == 1):\n cmd.insert(len(cmd),\"--wipe-bcache\")\n o,e,rc = run_command(cmd)\n if(rc == 0):\n return {\"status\": \"add bcache method success\"}\n else:\n #return {\"error\": \"add bcache method fail\"}\n return {\"error\": \"增加cache功能盘失败\"}\n#disk = {\"set_uuid\":\"fafaa-7777979cascasdb\"}\ndef delete_bcache(disk):\n key = 1\n i = 0\n disk_cache = disk[\"set_uuid\"]\n list_cache = os.listdir(\"/sys/fs/bcache\")\n if disk_cache in list_cache:\n list_link = os.listdir(\"/sys/fs/bcache/%s\" % disk_cache)\n for link in list_link:\n if os.path.islink(\"/sys/fs/bcache/%s/%s\" % (disk_cache,link)):\n i = i + 1\n if(i > 1):\n key = 0\n break\n else:\n key = 0\n if(key):\n path_register = \"/sys/fs/bcache/%s/unregister\" % disk_cache\n #cmd = [\"echo\",\"1\",\">\",path_register]\n #run_command(cmd)\n os.system(\"echo 1 > %s\" % path_register)\n return {\"status\": \"delete cache disk success\"}\n else:\n #return {\"error\": \"The backing disk is exists, please delete backing disk first or the cache disk not exists\"}\n return {\"error\": \"数据盘还存在, 请先删除数据盘然后再次删除cache盘\"}\n#disk = {\"name\":\"sdc\"}\ndef delete_backing(disk):\n data_disk = disk[\"name\"]\n path_discon = \"/sys/block/%s/bcache/detach\" % data_disk\n if os.path.exists(path_discon):\n #cmd = [\"echo\",\"1\",\">\",path_discon]\n #run_command(cmd)\n os.system(\"echo 1 > %s\" % path_discon)\n path_stop = \"/sys/block/%s/bcache/stop\" % data_disk\n #cmd = [\"echo\",\"1\",\">\",path_stop]\n #o,e,rc =run_command(cmd)\n if os.path.exists(path_stop):\n os.system(\"echo 1 > %s\" % path_stop)\n return {\"status\": \"delete data disk success\"}\n #else:\n else:\n #return {\"error\": \"delete data disk fail\"}\n return {\"error\": \"删除数据盘失败\"}\n else:\n return {\"error\": \"删除数据盘失败\"}\ndef get_cache_uuid():\n uuid_back = []\n if os.path.exists(\"/sys/fs/bcache/\"):\n uuid = os.listdir(\"/sys/fs/bcache/\")\n for i in range(len(uuid)):\n if uuid[i] != \"register_quiet\" and uuid[i] != \"register\":\n uuid_back.append(uuid[i])\n return uuid_back\n return uuid_back\n\n#disk = \"bcache0\"\ndef get_bcachedisk(disk):\n data = \"\"\n path_bcache = \"/sys/block/%s/\" % disk\n cmd = [\"ls\",\"-l\",path_bcache]\n o,e,rc = run_command(cmd)\n for l in o:\n l = l.split(\"/\")\n if len(l) > 8:\n data = l[-2]\n break\n return data\n","sub_path":"services/storage/fs/disks.py","file_name":"disks.py","file_ext":"py","file_size_in_byte":11782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"298129739","text":"from setuptools import setup, find_packages\n\npackages = find_packages()\n\nsetup(name='baybars',\n version='0.0.15',\n setup_requires=['pbr==5.1.1'],\n copyright='Copyright 2019 Jet.com',\n url='http://pypi.org/project/baybars/',\n packages=packages,\n install_requires=[\n 'python-consul==1.1.0',\n 'azure-storage-blob==1.4.0',\n 'azure-storage-queue==1.4.0',\n 'confluent-kafka==0.11.6',\n 'azure-cosmos==3.0.2',\n 'pysftp==0.2.9',\n 'requests==2.20.1',\n 'numpy==1.15.4',\n 'pandas==0.23.4',\n 'python-consul==1.1.0',\n 'PyHive==0.6.1',\n 'elasticsearch==6.3.1',\n 'azure-cosmosdb-table==1.0.5'\n ],\n keywords='azure kafka blob documentdb cosmosdb queue tar',\n python_requires='>=3.5',\n zip_safe=False,\n pbr=True)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"303525442","text":"from typing import List\nimport numpy as np\nimport os\nimport settings\nfrom Mdp.transition_counting_translator import TransitionCountingTranslator\nfrom transition_counting.heatmap_plotter import plot_count_heatmap\nfrom transition_counting.transition_counter import TransitionCounter\n\n\nclass ConversationComparer:\n def compare_and_save_plots(\n self,\n file_name: str,\n original_conversation: List[dict],\n calculated_conversation: List[dict],\n frame_step: int,\n file_metadata: dict,\n show: bool\n ):\n base = os.path.basename(file_name)\n # get file name without extention\n name = os.path.splitext(base)[0]\n original_results = self.__count(\n original_conversation, file_metadata, frame_step\n )\n calculated_results = self.__count(\n calculated_conversation, file_metadata, frame_step\n )\n\n self.__save_plots(original_results, name, \"original\", show)\n self.__save_plots(calculated_results, name, \"calculated\", show)\n\n def __save_plots(self, results, file_name, original_or_not: str, show: bool):\n file_name_counts = os.path.join(\n settings.COMPARISON_PLOTS_FOLDER_PATH,\n f\"{file_name}_{original_or_not}_plot_counts.png\",\n )\n file_name_probs = os.path.join(\n settings.COMPARISON_PLOTS_FOLDER_PATH,\n f\"{file_name}_{original_or_not}_plot_probs.png\",\n )\n\n translator = TransitionCountingTranslator(results)\n\n probabilities_matrix = translator.transform_to_4x4_probabilities_matrix()\n\n plot_count_heatmap(np.round(probabilities_matrix, decimals=3), file_name_probs, show)\n\n def __count(self, conversation, file_metadata: dict, frame_step: int) -> np.ndarray:\n result = np.zeros((2, 2, 2, 2))\n starting_points = np.arange(0, frame_step)\n counter = TransitionCounter()\n\n for i in starting_points:\n file_result = counter.count_transitions(\n conversation, frame_step, i, file_metadata\n )\n result += file_result\n\n return result\n\n","sub_path":"src/inverse_reinforcement_learning/conversation_comperar.py","file_name":"conversation_comperar.py","file_ext":"py","file_size_in_byte":2129,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"342179665","text":"# MIT License\n#\n# Copyright (c) 2021 Soohwan Kim\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nfrom lightning_asr.vocabs.vocab import Vocabulary\n\n\nclass LibriSpeechVocabulary(Vocabulary):\n \"\"\"\n Converts label to string for librispeech dataset.\n\n Args:\n model_path (str): path of sentencepiece model\n vocab_size (int): size of vocab\n \"\"\"\n def __init__(self, model_path: str, vocab_size: int):\n super(LibriSpeechVocabulary, self).__init__()\n try:\n import sentencepiece as spm\n except ImportError:\n raise ImportError(\"Please install sentencepiece: `pip install sentencepiece`\")\n\n self.sp = spm.SentencePieceProcessor()\n self.sp.Load(model_path)\n self.pad_id = self.sp.PieceToId(\"\")\n self.sos_id = self.sp.PieceToId(\"\")\n self.eos_id = self.sp.PieceToId(\"\")\n self.blank_id = self.sp.PieceToId(\"\")\n self.vocab_size = vocab_size\n\n def label_to_string(self, labels):\n if len(labels.shape) == 1:\n return self.sp.DecodeIds([l.item() for l in labels])\n\n elif len(labels.shape) == 2:\n sentences = list()\n\n for label in labels:\n sentence = self.sp.DecodeIds([l for l in label])\n sentences.append(sentence)\n return sentences\n else:\n raise ValueError(\"Unsupported label's shape\")\n","sub_path":"lightning_asr/vocabs/librispeech.py","file_name":"librispeech.py","file_ext":"py","file_size_in_byte":2429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"65956326","text":"class StoreEngine:\n\n def __init__(self, db):\n self.db = db\n\n def store(self, inter):\n data = {\n 'name': inter.name,\n 'region': inter.region,\n 'templates': self._convert_entities(inter.templates),\n 'functions': self._convert_entities(inter.funcs),\n }\n self.db['interfaces'].update({'name': inter.name}, data, True)\n\n def _convert_entities(self, entities):\n result = []\n\n for ent in entities:\n data = {\n 'name': ent.name,\n 'params': self._convert_params(ent.params)\n }\n\n result.append(data)\n\n return result\n\n def _convert_params(self, params):\n result = []\n\n for p in params:\n position = p.pos + 1 if p.pos is not None else None\n\n data = {\n 'name': p.name,\n 'position': position,\n 'type': str(p.type)\n }\n\n result.append(data)\n\n return result\n\n","sub_path":"xsl_stat/store.py","file_name":"store.py","file_ext":"py","file_size_in_byte":1019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"297765797","text":"import argparse\nfrom utils_db.db_cassandra import CassandraDatabaseHandler\n\ndef _parse_options():\n argParser = argparse.ArgumentParser( 'Sample Cassandra BCP import utility' )\n argParser.add_argument( '-config', dest='config', help='Config section name in db_example.py' )\n argParser.add_argument( '-source', dest='source', help='File name of the data source' )\n argParser.add_argument( '-target', dest='target', help='Target table name in Cassandra' )\n argParser.add_argument( '-log', dest='log', help='Logging file name' )\n argParser.add_argument( '-env', dest='env', help='Required environment variables',\n action='append', type=lambda kv:kv.split( '=', 1 ) )\n argParser.add_argument( '-bcp_opt', dest='options', help='Optional CQLSH copyutil options: \\\n http://docs.datastax.com/en/cql/3.1/cql/cql_reference/copy_r.html',\n action='append', type=lambda kv:kv.split( '=', 1 ) )\n return argParser.parse_args()\n\ndef main():\n \"\"\"\n Below command line options serve as an example to run this script:\n -config FMNA_DEV -target tbl_poc_fund_returns -source e:\\dev\\cass\\data\\out\\DMRI_FE_USA_short_bcp.csv\n -log e:\\dev\\cass\\data\\stdout.txt\n -env CASSANDRA_HOME=c:\\FAST\\apache-cassandra-3.10 -env PYTHON_HOME=c:\\FAST\\Python\\2.7.12\n -bcp_opt MAXBATCHSIZE=20 -bcp_opt HEADER=False\n \"\"\"\n\n argResults = _parse_options()\n status= 1\n\n with CassandraDatabaseConnection( argResults.config, argResults.target ) as dbSession:\n if dbSession.validateTableExistence():\n status = dbSession.runBcpIn(\n argResults.source, argResults.target, verbose=True,\n stdoutFile=argResults.log, stderrFile=argResults.log,\n envVars=dict( argResults.env ),\n options=dict( argResults.options ) )\n\n print( \"Cassandra BCP import status: %s\" % status )\n\nif __name__ == '__main__':\n main()\n","sub_path":"examples/cassandra_bcp_example.py","file_name":"cassandra_bcp_example.py","file_ext":"py","file_size_in_byte":1984,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"178227671","text":"# Autor: Cecilia Daniela Olivares Hernández, a01745727\r\n# Descripcion: Conversión de temperatura de escala Fahrenheit a la escala Celsius\r\n\r\n# Escribe tu programa después de esta línea.\r\n\r\nF = int(input(\"Inserta la temperatura en Fahrenheits: \"))\r\n\r\nC = (F - 32) / 1.8\r\n\r\nprint(\"La tenperatura Fahrenheit en Celsius es: \"'%.4f' % C)\r\n","sub_path":"extraTemperatura.py","file_name":"extraTemperatura.py","file_ext":"py","file_size_in_byte":338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"311022235","text":"# -*- coding: utf-8 -*-\n#\n\nfrom airflow.operators import PythonOperator\nfrom airflow.models import DAG\nfrom datetime import datetime, timedelta\nfrom airflow.models import Variable\nimport logging\n\nargs = {\n 'owner': 'systems',\n 'start_date': datetime.now() - timedelta(minutes=10),\n}\n\ndag = DAG(\n dag_id='apple',\n default_args=args,\n schedule_interval=\"2 * * * *\")\n\ndef print_context(*args, **kwargs):\n print('lol')\n\n\nrun_this = PythonOperator(\n task_id='do_it_for_the_lolz',\n provide_context=True,\n python_callable=print_context,\n dag=dag)\n\n\n# Generate 2 chill tasks, chilling from 0 to 1 seconds?\nfor i in range(2):\n task = PythonOperator(\n task_id='sleeping_on_job_for_' + str(i),\n python_callable=print_context,\n op_kwargs={'random_base': float(i)},\n dag=dag)\n\n run_this.set_downstream(task)\n\n\n\n","sub_path":"dags/test/troll.py","file_name":"troll.py","file_ext":"py","file_size_in_byte":862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"504296875","text":"import csv,os\nfrom google.cloud import bigquery\n\nimport get_hive_schema,get_bq_schema\n# Imports required variables from Initialization script\nfrom init_script import *\n# Imports required functions from the Utilities script\nfrom utilities import *\n\ndef get_metrics_table_schema():\n \"\"\"Creates schema for the Bigquery comparison metrics table\n \n Returns:\n list -- list of google.cloud.bigquery.schema.SchemaField objects\n \"\"\"\n schema = [\n bigquery.SchemaField('operation', 'STRING', mode='REQUIRED',description='operation'),\n bigquery.SchemaField('table_name', 'STRING', mode='REQUIRED',description='Table name'),\n bigquery.SchemaField('Column_count', 'STRING', mode='REQUIRED',description='Number of columns'),\n ]\n for col in columns_list:\n schema.append(bigquery.SchemaField(str(col), 'STRING', mode='REQUIRED'))\n return schema\n\ndef create_bq_metrics_table(hive_bq_comparison_table):\n \"\"\"Creates Bigquery comparison metrics table\n \n Arguments:\n hive_bq_comparison_table {str} -- Bigquery table name to be created\n \"\"\"\n\n dataset_ref = bq_client.dataset(dataset_id)\n table_ref = dataset_ref.table(hive_bq_comparison_table)\n table = bigquery.Table(table_ref, schema=get_metrics_table_schema())\n table = bq_client.create_table(table)\n\n# Reads the validation rules to a variable\ndef read_validations():\n \"\"\"Reads the validation rules\n \n Returns:\n list -- Validation rules list\n \"\"\"\n\n global validations_list\n file=open('validations.csv','rb')\n reader=csv.reader(file)\n validations_list = [row for row in reader]\n return validations_list\n\ndef analyze_bq_table(dataset_id,bq_table_name,schema):\n \"\"\"Gets metadata about the bigquery table\n \n Arguments:\n dataset_id {str} -- Bigquery dataset id\n bq_table_name {str} -- Bigquery table name\n schema {dict} -- Schema of the BQ table\n \n Returns:\n dict -- Bigquery Table metadata\n \"\"\"\n\n table_analysis={}\n dataset_ref =bq_client.dataset(dataset_id)\n table_ref = dataset_ref.table(bq_table_name)\n table = bq_client.get_table(table_ref)\n table_analysis['operation']=\"BQ\"\n table_analysis['table_name']=bq_table_name\n table_analysis['num_cols']=str(len(table.schema))\n table_analysis['schema']=schema\n # for col in columns_list:\n # for key,value in schema.iteritems():\n # table_analysis[str(col)]=str(schema[col])\n return table_analysis\n\n# Analyzes HIVE table for metrics\ndef analyze_hive_table(database,table_name,schema):\n \"\"\"Gets metadata about the Hive table\n \n Arguments:\n database {str} -- Hive database name\n table_name {str} -- Hive table name\n schema {dict} -- Schema of the Hive table\n \n Returns:\n dict -- Hive table metadata\n \"\"\"\n\n num_cols=0\n table_description = get_description(database,table_name)\n for j in range(len(table_description)):\n if table_description[j][0]=='LOCATION':\n location=table_description[j+1][0]\n for i in table_description:\n if i[0]=='ROW FORMAT SERDE ':\n break\n if 'CREATE TABLE' in i[0] or 'PARTITIONED BY' in i[0]:\n pass\n else:\n num_cols+=1\n table_analysis={}\n table_analysis['operation']=\"HIVE\"\n table_analysis['table_name']=table_name\n table_analysis['num_cols']=str(num_cols)\n table_analysis['schema']=schema\n return table_analysis\n\n# Inserts comparison data to a CSV file\ndef append_row_to_metrics_table(row_data):\n \"\"\"Writes comparison metrics data to csv file\n \n Arguments:\n row_data {dict} -- Metadata to write\n \"\"\"\n\n global is_csv_file_created\n data=[row_data['operation'],row_data['table_name'],row_data['num_cols']]\n for item in columns_list:\n data.append(row_data['schema'][item])\n with open(hive_bq_comparison_csv, 'a+') as csvFile:\n writer = csv.writer(csvFile)\n print(data)\n writer.writerow(data)\n csvFile.close()\n is_csv_file_created = True\n\n\n\ndef do_health_checks(hive_table_analysis,bq_table_analysis,schema):\n \"\"\"Populates the Health checks values from the comparison\n \n Arguments:\n hive_table_analysis {dict} -- Hive table metadata\n bq_table_analysis {dict} -- Bigquery table metadata\n schema {dict} -- Bigquery schema\n \n Returns:\n dict -- Health checks\n \"\"\"\n\n healths={\n \"operation\":\"Health Check\",\n \"table_name\":\"NA\",\n \"num_cols\":\"Fail\",\n \"schema\":schema\n }\n if (hive_table_analysis['num_cols']==bq_table_analysis['num_cols']):\n healths[\"num_cols\"]=\"Pass\"\n\n for item in columns_list:\n if 'array_' in hive_table_analysis['schema'][item]:\n hive_table_analysis['schema'][item] = '_'.join(hive_table_analysis['schema'][item].split('_')[-2:])\n \n \n if ([hive_table_analysis['schema'][item],bq_table_analysis['schema'][item]] in validations_list):\n healths['schema'][str(item)]=\"Pass\"\n else:\n healths['schema'][str(item)]=\"Fail\"\n return healths\n\ndef write_csv_to_gcs(filename):\n \"\"\"Writes comparison csv to GCS bucket\n \n Arguments:\n filename {str} -- Comparison metrics csv filename\n \n Returns:\n str -- GCS URI of filepath\n \"\"\"\n\n bucket=gcs_client.get_bucket(gcs_bucket_name)\n blob = bucket.blob(filename)\n blob.upload_from_filename(filename)\n uri = 'gs://'+gcs_bucket_name+'/'+filename\n return uri\n\ndef delete_blob(blob_name):\n \"\"\"Deletes file from GCS bucket\n \n Arguments:\n blob_name {str} -- GCS blob name to be deleted\n \"\"\"\n\n storage_client = storage.Client()\n bucket = storage_client.get_bucket(gcs_bucket_name)\n blob = bucket.blob(blob_name)\n blob.delete()\n print('File {} deleted.'.format(blob_name))\n\n# Loads CSV metrics data to BQ comparison table\ndef load_csv_to_bigquery(csv_uri,bq_table_name):\n \"\"\"Loads metrics CSV data to BQ comparison table\n \n Arguments:\n csv_uri {str} -- GCS URI of the metrics file\n bq_table_name {str} -- BQ comparison metrics table name\n \"\"\"\n\n dataset_ref = bq_client.dataset(dataset_id)\n job_config = bigquery.LoadJobConfig()\n job_config.schema = get_metrics_table_schema()\n job_config.source_format = bigquery.SourceFormat.CSV\n\n load_job = bq_client.load_table_from_uri(csv_uri,dataset_ref.table(bq_table_name),job_config=job_config)\n printOutput('Loading metrics data to BigQuery... Job {}'.format(load_job.job_id))\n\n load_job.result()\n\n destination_table = bq_client.get_table(dataset_ref.table(bq_table_name))\n printOutput('Loaded {} rows in metrics table'.format(destination_table.num_rows))\n printOutput('Migrated data successfully from hive to BigQuery')\n printOutput('Comparison metrics of tables available in BQ table '+bq_table_name)\n\n# Main function for creating the comparison metrics table\ndef write_metrics_to_bigquery(csv_name,table_name):\n \"\"\"Main function to be called to write comparison metrics to Bigquery\"\"\"\n\n global is_csv_file_written\n global is_csv_file_created\n global columns_list\n global hive_bq_comparison_csv,hive_bq_comparison_table\n hive_bq_comparison_csv=csv_name\n hive_bq_comparison_table = table_name\n\n printOutput(\"Analyzing the hive and BQ tables...\")\n\n bq_schema = get_bq_schema.get_schema(dataset_id,bq_table)\n # print(bq_schema)\n\n hive_schema,columns_list = get_hive_schema.get_schema(hive_database,hive_table_name)\n # print(hive_schema)\n # print(columns_list)\n\n hive_table_analysis = analyze_hive_table(hive_database,hive_table_name,hive_schema)\n # print(hive_table_analysis)\n append_row_to_metrics_table(hive_table_analysis)\n is_csv_file_created = True\n bq_table_analysis=analyze_bq_table(dataset_id,bq_table,bq_schema)\n # print(bq_table_analysis)\n append_row_to_metrics_table(bq_table_analysis)\n\n validations_list = read_validations()\n healths=do_health_checks(hive_table_analysis,bq_table_analysis,bq_schema)\n # print(healths)\n append_row_to_metrics_table(healths)\n\n create_bq_metrics_table(hive_bq_comparison_table)\n # print(bq_table_analysis)\n\n csv_uri = write_csv_to_gcs(hive_bq_comparison_csv)\n is_csv_file_written = True\n\n load_csv_to_bigquery(csv_uri,hive_bq_comparison_table)\n os.remove(hive_bq_comparison_csv)\n delete_blob(hive_bq_comparison_csv)\n","sub_path":"examples/hive-bq/metrics.py","file_name":"metrics.py","file_ext":"py","file_size_in_byte":8423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"156389264","text":"import itertools\nimport json\nimport os\n\nfrom django.http import HttpResponse\n\nfrom django.db.utils import IntegrityError\n\nfrom django.contrib.sessions.backends.db import SessionStore\n\nfrom .tesseract import convert_tiff_path_to_text\nfrom .highlighter import convert_string_to_html\nfrom .decisioner import HmsTreeGenerator\nfrom .decisioner import Cursor\nfrom ..models import UserComplaintClassification\nfrom .cfpb_db import update_database\nfrom .analyzer import classify\n\n\nclass AjaxFielder(object):\n '''Static methods for ajax requests.'''\n\n @classmethod\n def input_ajax(cls, request):\n '''Takes ajax request and routes data to the appropriate routine.'''\n # Delay makes animations look nicer.\n print(request.POST.dict())\n function_dict = {'load_tiff': cls.load_tiff,\n 'choice': cls.choice,\n 'backup': cls.backup,\n 'db_refresh': cls.db_refresh,\n 'analyze_tiff': cls.analyze_tiff,\n 'analyze_text': cls.analyze_text}\n try:\n function_string = request.POST.dict()['function']\n target_function = function_dict[function_string]\n except KeyError:\n return False\n result_list = target_function(request)\n return json.dumps(result_list)\n\n @classmethod\n def load_tiff(cls, request):\n '''Take a tiff, turn it into text, and return a response.'''\n response = {}\n sent_file = request.FILES['file']\n if sent_file.size > 10000000:\n return 'No files over 10 megabytes.'\n temporary_path = os.path.join(os.path.expanduser(\"~\"),\n 'hms_tempfile.tiff')\n with open(temporary_path, 'wb+') as f:\n f.write(sent_file.read())\n # Get text\n extracted_text = convert_tiff_path_to_text(temporary_path).decode()\n highlighted_text = convert_string_to_html(extracted_text)\n os.remove(temporary_path)\n # Store extracted and highlighted text in session.\n request.session['highlighted_text'] = highlighted_text\n request.session['extracted_text'] = extracted_text\n tree = HmsTreeGenerator().get_tree()\n request.session['decision_tree'] = tree\n request.session['cursor'] = Cursor(tree.get_root(),\n extracted_text)\n request.session.save()\n if extracted_text:\n panel_1_text = request.session['cursor'].get_node_criterion()\n panel_1_text += '
'\n for item in request.session['cursor'].get_node_options():\n panel_1_text += ''.join([''])\n panel_1_text += ' '\n response['#panel_1'] = panel_1_text\n response['#panel_2'] = highlighted_text\n sess = request.session\n response['#panel_3'] = sess['cursor'].get_node_description()\n response['#panel_4'] = sess['cursor'].get_tags()\n else:\n response['#panel_2'] = 'No text found'\n return response\n\n @classmethod\n def choice(cls, request):\n '''Takes a choice and response.'''\n choice_string = request.POST.dict()['arguments']\n choice = cls.string_to_class(choice_string)\n response = {}\n # Store extracted and highlighted text in session.\n cursor = request.session['cursor']\n cursor.choose(choice)\n # If cursor complete, write to database.\n if cursor.complete:\n tags = [item for item\n in set(cursor.get_tags())\n if item is not None]\n response['#panel_1'] = ' '\n response['#panel_3'] = ' '\n response['#panel_4'] = ' '\n response['#panel_2'] = 'Analysis complete. Tags:\\n'\n response['#panel_2'] += ' '.join(tags)\n sans_tags = [item.replace('#','')\n for item in tags]\n argument_dict = {key: True for key in sans_tags}\n UCC = UserComplaintClassification\n complaint = UCC(categorizer=request.user.username,\n complaint_text=str(cursor._inspected_item_data),\n tag_string=''.join(tags),\n **argument_dict)\n complaint.save()\n\n else:\n panel_1_text = cursor.get_node_criterion()\n panel_1_text += '
'\n for item in cursor.get_node_options():\n panel_1_text += ''.join([''])\n panel_1_text += ' '\n panel_1_text += '
'\n panel_1_text += ''\n response['#panel_1'] = panel_1_text\n response['#panel_3'] = cursor.get_node_description()\n tags = [item for item\n in set(cursor.get_tags())\n if item is not None]\n print(tags)\n response['#panel_4'] = ' '.join(tags)\n # request.session['cursor'] = cursor\n request.session.save()\n return response\n\n @classmethod\n def string_to_class(cls, input_string):\n stripped_string = input_string.strip()\n if stripped_string == 'True':\n return True\n if stripped_string == 'False':\n return False\n try:\n float(stripped_string)\n if '.' in input_string:\n return float(input_string)\n else:\n return int(input_string)\n except ValueError:\n return input_string\n\n @classmethod\n def backup(cls, request):\n response = {}\n # Store extracted and highlighted text in session.\n cursor = request.session['cursor']\n cursor.backup()\n panel_1_text = cursor.get_node_criterion()\n panel_1_text += '
'\n for item in cursor.get_node_options():\n panel_1_text += ''.join([''])\n panel_1_text += ' '\n # If can back up further, add backup button\n if cursor._current_node.parent_node is not None:\n panel_1_text += '
'\n panel_1_text += ''\n response['#panel_1'] = panel_1_text\n response['#panel_3'] = cursor.get_node_description()\n tags = [item for item\n in set(cursor.get_tags())\n if item is not None]\n response['#panel_4'] = ' '.join(tags)\n # request.session['cursor'] = cursor\n request.session.save()\n return response\n\n @classmethod\n def db_refresh(cls, request):\n response = {}\n # db_successfully_created is a boolean response\n db_successfully_created = update_database()\n if db_successfully_created:\n response[\"db_response_plan\"] = \"Successfully created.\"\n return response\n else:\n response[\"db_response_plan\"] = \"Failure.\"\n return response\n\n @classmethod\n def analyze_tiff(cls, request):\n text = cls.tiff_to_text(request)\n response = classify(text)\n # Response here\n return response\n\n @classmethod\n def tiff_to_text(cls, request):\n '''Take a tiff, turn it into text, and return a response.'''\n sent_file = request.FILES['file']\n if sent_file.size > 10000000:\n return False\n temporary_path = os.path.join(os.path.expanduser(\"~\"),\n 'hms_tempfile.tiff')\n with open(temporary_path, 'wb+') as f:\n f.write(sent_file.read())\n # Get text\n extracted_text = convert_tiff_path_to_text(temporary_path).decode()\n os.remove(temporary_path)\n return extracted_text\n\n @classmethod\n def analyze_text(cls, request):\n text = request.POST.dict()['arguments']\n response = classify(text)\n return response\n","sub_path":"app_hms/services/ajax.py","file_name":"ajax.py","file_ext":"py","file_size_in_byte":9700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"257514695","text":"\"\"\"\nUtilities to check for valid parameters\n\"\"\"\n\nimport numbers\nimport warnings\n\nfrom .compat import _basestring\n\n\ndef check_threshold(threshold, data, percentile_func, name='threshold'):\n \"\"\" Checks if the given threshold is in correct format and within the limit.\n\n If necessary, this function also returns score of the data calculated based\n upon the given specific percentile function.\n Note: This is only for threshold as string.\n\n Parameters\n ----------\n threshold: float or str\n If threshold is a float value, it should be within the range of the\n maximum intensity value of the data.\n If threshold is a percentage expressed in a string it must finish with a\n percent sign like \"99.7%\".\n data: ndarray\n an array of the input masked data.\n percentile_func: function {scoreatpercentile, fastabspercentile}\n Percentile function for example scipy.stats.scoreatpercentile\n to calculate the score on the data.\n name: str, optional\n A string just used for representing the name of the threshold for a precise\n error message.\n\n Returns\n -------\n threshold: number\n returns the score of the percentile on the data or\n returns threshold as it is if given threshold is not a string percentile.\n \"\"\"\n if isinstance(threshold, _basestring):\n message = ('If \"{0}\" is given as string it '\n 'should be a number followed by the percent '\n 'sign, e.g. \"25.3%\"').format(name)\n if not threshold.endswith('%'):\n raise ValueError(message)\n\n try:\n percentile = float(threshold[:-1])\n except ValueError as exc:\n exc.args += (message, )\n raise\n\n threshold = percentile_func(data, percentile)\n elif isinstance(threshold, numbers.Real):\n # checks whether given float value exceeds the maximum\n # value of the image data\n value_check = abs(data).max()\n if abs(threshold) > value_check:\n warnings.warn(\"The given float value must not exceed {0}. \"\n \"But, you have given threshold={1} \".format(value_check,\n threshold))\n else:\n raise TypeError('%s should be either a number '\n 'or a string finishing with a percent sign' % (name, ))\n return threshold\n","sub_path":"python/nilearn/2016/8/param_validation.py","file_name":"param_validation.py","file_ext":"py","file_size_in_byte":2437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"54319748","text":"from django.shortcuts import render, reverse, redirect, get_object_or_404\nfrom .forms import WorkoutForm\nfrom Profile.models import Profile\nfrom .models import workouts\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\n\n# Create your views here.\n\ndef workouts_page(request):\n user = Profile.objects.filter(user=request.user)\n workout = workouts.objects.filter(user=request.user)\n\n if request.method == 'POST':\n form = WorkoutForm(request.POST)\n if form.is_valid():\n form.save()\n return redirect(reverse('workouts_page'))\n else:\n form = WorkoutForm()\n\n context = {\n 'user': user,\n 'workout': workout,\n 'form': form,\n }\n \n template = 'workouts.html'\n\n return render(request, template, context)\n\n@login_required\ndef edit_workout(request, workout_id):\n \"\"\" Edit a product in the store \"\"\"\n if not request.user.is_superuser:\n messages.info(request, 'Sorry, you can not do that.')\n return redirect(reverse('Profile'))\n\n workout = get_object_or_404(workouts, pk=workout_id)\n if request.method == 'POST':\n form = WorkoutForm(request.POST, request.FILES, instance=workout)\n if form.is_valid():\n form.save()\n messages.info(request, 'Successfully updated workout!')\n return redirect(reverse('workouts_page'))\n else:\n messages.error(request, 'Failed to update picture. Please ensure the form is valid.')\n else:\n form = WorkoutForm(instance=workout)\n\n template = 'edit_workout.html'\n \n context = {\n 'form': form,\n 'workout': workout,\n }\n\n return render(request, template, context)\n\n@login_required\ndef delete_workout(request, workouts_id):\n \"\"\" Delete a product from the store \"\"\"\n if not request.user.is_authenticated:\n messages.info(request, 'Sorry, you can not do that!')\n return redirect(reverse('workouts'))\n\n picture = get_object_or_404(workouts, pk=workouts_id)\n picture.delete()\n messages.info(request, 'Picture deleted!')\n return redirect(reverse('workouts_page'))","sub_path":"workouts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2157,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"105044152","text":"import numpy as np\n\nfrom netCDF4 import Dataset\nfrom datetime import datetime\nfrom datetime import timedelta\nimport os\nimport sys\n\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.basemap import Basemap\n \nfrom tools_BAIU import get_lonlat, prep_proj_multi, get_mslp, get_prsvar, get_gph\n\nquick = True \nquick = False\n\n\n\ndef main( vtime=datetime(2018, 7, 1, 0 ), hpa=500 ):\n\n TOP = \"/data_ballantine02/miyoshi-t/honda/SCALE-LETKF/BAIU2018_5.3.6\"\n \n adt_h = 24\n adt = timedelta( hours=adt_h )\n \n\n INFO = {\"TOP\": TOP, }\n\n lon2d, lat2d = get_lonlat( INFO, stime=datetime( 2018, 7, 1, 0 ) )\n\n clevs = np.arange( 800, 1200, 4 )\n\n mslp = get_mslp( INFO, stime=vtime, vtime=vtime, m=0 )\n gph = get_gph( INFO, stime=vtime, vtime=vtime, m=0 )\n var_ = get_prsvar( INFO, nvar=\"Tprs\", stime=vtime, vtime=vtime, m=0, hpa=hpa ) \n\n print( np.nanmax(var_), np.nanmin(var_) )\n\n levs = np.arange( 270, 300, 1 )\n\n cmap = plt.cm.get_cmap(\"RdBu_r\")\n levs = np.arange( 280, 301, 1 )\n levs = np.arange( 284, 297, 1 )\n\n\n if hpa < 300:\n levs = np.arange( 218, 231, 1 )\n elif hpa < 400:\n levs = np.arange( 238, 251, 1 )\n elif hpa < 600:\n levs = np.arange( 264, 275, 1 )\n# elif hpa > 900:\n# levs = np.arange( 0, 20, 1 )\n\n fig, ((ax1)) = plt.subplots( 1, 1, figsize=(8, 6.0 ) )\n fig.subplots_adjust( left=0.07, bottom=0.05, right=0.94, top=0.95,\n wspace=0.2, hspace=0.02)\n \n lons = 105\n lone = 165\n late = 50\n lats = 15\n \n ax_l = [ ax1 ]\n m_l = prep_proj_multi('merc', ax_l, ll_lon=lons, ur_lon=lone, ll_lat=lats, ur_lat=late )\n \n x2d, y2d = m_l[0](lon2d, lat2d)\n \n SHADE = ax1.contourf( x2d, y2d, var_,\n cmap=cmap, extend='both',\n levels=levs )\n\n pos = ax1.get_position()\n cb_width = 0.015\n cb_height = pos.height*0.98\n ax_cb = fig.add_axes( [pos.x1, pos.y0+0.01, cb_width, cb_height] )\n cb = plt.colorbar( SHADE, cax=ax_cb, orientation = 'vertical', ticks=levs[::2])\n cb.ax.tick_params( labelsize=8 )\n \n \n clevs = np.arange( 0, 15000, 20 )\n CONT = ax1.contour( x2d, y2d, gph, colors='k',\n linewidths=1.0, levels=clevs )\n ax1.clabel( CONT, fontsize=8, fmt='%.0f' )\n# CONT = ax1.contour( x2d, y2d, mslp*0.01, colors='k',#'gainsboro',\n# linewidths=1.0, levels=clevs )\n# ax1.clabel( CONT, fontsize=8, fmt='%.0f' )\n \n tit = r'Analysis T{0:} (K), valid: {1:}'.format( hpa, vtime.strftime('%HUTC %m/%d') )\n ofig = \"tk{0:}_v{1:}\".format( hpa, vtime.strftime('%H%m%d') )\n fig.suptitle( tit, fontsize=12 )\n \n \n if not quick:\n opath = \"png/TK_anal\"\n os.makedirs(opath, exist_ok=True)\n \n ofig = os.path.join(opath, ofig + \".png\")\n plt.savefig(ofig,bbox_inches=\"tight\", pad_inches = 0.1)\n print(ofig)\n plt.clf()\n else:\n print(ofig)\n plt.show()\n \n\ntime = datetime( 2018, 6, 26, 0 )\netime = datetime( 2018, 7, 6, 0 )\n\nhpa = 700\nhpa = 500\nhpa = 300\nhpa = 200\n#hpa = 950\n\nwhile time <= etime:\n main( vtime=time, hpa=hpa )\n time += timedelta( days=1 )\n","sub_path":"src/TK_anal.py","file_name":"TK_anal.py","file_ext":"py","file_size_in_byte":3193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"193226802","text":"import numpy as np\nfrom typing import List, Optional\n\n\nclass ShapenetTransforms:\n def __init__(self):\n self.coords = {\"x\": 0, \"y\": 1, \"z\": 2}\n self.rotation_matrices = {\n \"xy\": lambda x: self.rotz(x),\n \"xz\": lambda x: self.roty(x),\n \"yz\": lambda x: self.rotx(x),\n }\n\n @staticmethod\n def to_center(pc: np.ndarray):\n center = np.mean(pc, axis=0, keepdims=True)\n pc -= center\n return pc\n\n def on_floor(self, pc: np.ndarray, floor_normal: str = \"z\") -> np.ndarray:\n floor_normal = self.coords[floor_normal]\n height_from_floor = np.min(pc[:, floor_normal])\n floor_translation = np.zeros((1, 3))\n floor_translation[0, floor_normal] = height_from_floor\n pc -= floor_translation\n return pc\n\n def to_rotate(self, pc: np.ndarray, alpha: float, orientation: str = \"xy\") -> np.ndarray:\n rotation_matrix = self.rotation_matrices[orientation](-alpha)\n pc = pc @ rotation_matrix\n return pc\n\n @staticmethod\n def to_translate(pc: np.ndarray, t_vec: np.ndarray, step_size: Optional[float] = 1.0) -> np.ndarray:\n t_vec *= step_size\n t_vec = t_vec.reshape((1, 3))\n pc += t_vec\n return pc\n\n def to_standard(self, pc: np.ndarray) -> np.ndarray:\n pc = self.to_center(pc)\n pc = self.to_rotate(pc, alpha=np.pi/2, orientation='yz')\n pc = self.on_floor(pc)\n return pc\n\n @staticmethod\n def rotx(t: float):\n \"\"\"Rotation about the x-axis.\"\"\"\n c = np.cos(t).item()\n s = np.sin(t).item()\n rot_mat = np.asarray([[1.0, 0.0, 0.0], [0.0, c, -s], [0.0, s, c]])\n return rot_mat\n\n @staticmethod\n def roty(t: float):\n \"\"\"Rotation about the y-axis.\"\"\"\n c = np.cos(t).item()\n s = np.sin(t).item()\n return np.ndarray([[c, 0.0, s], [0.0, 1.0, 0.0], [-s, 0.0, c]])\n\n @staticmethod\n def rotz(t: float):\n \"\"\"Rotation about the z-axis.\"\"\"\n c = np.cos(t).item()\n s = np.sin(t).item()\n return np.asarray([[c, -s, 0.0], [s, c, 0.0], [0.0, 0.0, 1.0]])\n\n @staticmethod\n def rand_alpha():\n return 2 * np.pi * np.random.rand(1)\n\n def rand_unit2_vector(self, coord_to_discard: List[str] = ['z']):\n v = 2 * np.random.rand(1, 3) - 1\n for coord in coord_to_discard:\n zero_direction = self.coords[coord]\n v[0, zero_direction] = 0\n return v / np.linalg.norm(v)\n","sub_path":"shapenet/shapenet_transforms.py","file_name":"shapenet_transforms.py","file_ext":"py","file_size_in_byte":2486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"351364025","text":"#!/usr/bin/python3\n\"\"\" Base Model \"\"\"\n\n\nimport uuid\nfrom datetime import datetime\nimport models\n\n\nformat_t = \"%Y-%m-%dT%H:%M:%S.%f\"\n\n\nclass BaseModel():\n \"\"\"\n Defines all common\n attributes/methods for other classes\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\" args: Not used\"\"\"\n self.id = str(uuid.uuid4())\n self.created_at = datetime.now()\n self.updated_at = self.created_at\n if (kwargs):\n for key, value in kwargs.items():\n if key == \"__class__\":\n continue\n if key == 'created_at' or key == 'updated_at':\n self.__dict__[key] = datetime.strptime(value, format_t)\n else:\n self.__dict__[key] = value\n else:\n models.storage.new(self)\n\n def __str__(self):\n \"\"\" should print: [] () \"\"\"\n class_name = self.__class__.__name__\n return (\"[{:s}] ({:s}) {}\".format(class_name, self.id, self.__dict__))\n\n def save(self):\n \"\"\" Update datatime\"\"\"\n self.updated_at = datetime.now()\n models.storage.save()\n\n def to_dict(self):\n \"\"\"\n Returns a dictionary containing all keys/values\n of __dict__ of the instance\n \"\"\"\n class_dict = dict(self.__dict__)\n class_dict[\"__class__\"] = self.__class__.__name__\n class_dict[\"created_at\"] = self.created_at.isoformat()\n class_dict[\"updated_at\"] = self.updated_at.isoformat()\n return class_dict\n","sub_path":"models/base_model.py","file_name":"base_model.py","file_ext":"py","file_size_in_byte":1552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"486920177","text":"import os\n\ndef postfixConfig(domain):\n\n #Postfix config\n #master.cf changes\n os.system(\"sed -i '12s/smtp/2525/' /etc/postfix/master.cf\")\n\n allowedSP=\"gmail.com,smtp.gmail.com,yahoo.com,hotmail.com,aol.com\"\n #main.cf changes\n os.system('postconf -e \"myorigin ='+domain+'\"')\n os.system('postconf -e \"myhostname = '+domain+'\"')\n os.system('postconf -e \"relay_domains = '+domain+','+allowedSP+' \"')\n maincf=open(\"/etc/postfix/main.cf\",\"a\")\n maincf.write(\"milter_protocol = 2\\nmilter_default_action = accept\\n\")\n maincf.close()\n\n return True\n","sub_path":"serverConf/scripts/postfix/postfix.py","file_name":"postfix.py","file_ext":"py","file_size_in_byte":573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"66144601","text":"import httpx\nimport asyncio\n\nfrom config.config_handler import config\n\n\nasync def handle_language(language, type):\n running = True\n request = config.api() + (config.request().replace(\"___LANG___\", language))\n offset = 0\n entries = {}\n while running:\n print(\"Processing: language=%s, offset=%i\" % (language, offset))\n response = await httpx.AsyncClient().get(request.replace(\"___OFFSET___\", str(offset)), timeout=100)\n response = response.json()\n if 'query-continue-offset' not in response.keys():\n running = False\n else:\n offset = response['query-continue-offset']\n entries.update(handle_results(response[\"query\"][\"results\"], language, type))\n return entries\n\n\ndef handle_results(results, language, type):\n ret = {}\n for element in results:\n set_data = element.popitem()[1]\n data = set_data[\"printouts\"][\"Prefix\"][0]\n if isinstance(data, str):\n ret[data] = {\"lang\": language, \"type\": type}\n return ret\n\n\nasync def main():\n requests = []\n\n for element in config.languages():\n requests.append(handle_language(element[\"lang\"], element[\"type\"]))\n\n test = await asyncio.gather(*requests)\n return test\n\nif __name__ == '__main__':\n test = asyncio.run(main())\n print(len(test))\n print(test)\n concat = {}\n for el in test:\n concat.update(el)\n\n print(len(concat))\n print(concat)\n","sub_path":"set_prefix_fetcher/fetcher.py","file_name":"fetcher.py","file_ext":"py","file_size_in_byte":1438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"535676769","text":"# File: sort.py\n# Author: Eren Guendelsberger (Fall 2014)\n# Randomly creates a list of 1000 elements between -1000 and 1000.\n# Sorts the list and prints it.\n\nimport random;\n\n# constants for list length, min element, and max element\nLIST_LENGTH = 10;\nMIN = -1000;\nMAX = 1000;\n\ndef main() :\n # create and print list\n list = [];\n print(\"Before sorting: \");\n for i in range(LIST_LENGTH) :\n list.append(random.randint(MIN, MAX));\n print(list[i], \" \", sep=\"\", end = \"\");\n print();\n\n sort(list);\n\n #print list after sorting\n print(\"After sorting: \");\n for i in range(LIST_LENGTH) :\n print(list[i], \" \", sep=\"\", end = \"\");\n print();\n\n# sort the given list\ndef sort(list) :\n return mergesort(list, 0, len(list));\n\n# mergesort the given list (with the given start and end index)\ndef mergesort(list, start, end) :\n # Base case: lists of length <= 1 are already sorted\n if (len(list) <= 1) :\n return list;\n\n # Recursive step: mergesort the left and right half, merge them together\n else :\n half = start + end // 2;\n left = mergesort(list, start, half);\n right = mergesort(list, half, end);\n return merge(left, right);\n\n# merges two sorted lists (left and right) into one sorted list\ndef merge(left, right) :\n leftIndex = 0;\n rightIndex = 0;\n result = []; \n\n # check each element one by one mergint them together\n while (leftIndex < len(left) and rightIndex < len(right)) :\n if (left[leftIndex] > right[rightIndex]) :\n result.append(right[rightIndex]);\n else :\n result.append(left[leftIndex]);\n\n # add the remainder of left\n for i in range(leftIndex, len(left)) :\n result.append(left[i]);\n\n # add the remaining elements of right\n for i in range(rightIndex, len(right)) :\n result.append(right[i]);\n \n return result;\n\nmain()\n","sub_path":"sort.py","file_name":"sort.py","file_ext":"py","file_size_in_byte":1897,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"229162958","text":"\"\"\"\nDoubly shear layer in 2D.\nSpecial Inputs & standard value: shear_layer_width = 80, initial_perturbation_magnitude = 0.05\n\"\"\"\n\nimport numpy as np\nimport numpy.ma as ma\nfrom lettuce.unit import UnitConversion\n\n\nclass DoublyPeriodicShear2D:\n def __init__(self, resolution, reynolds_number, mach_number, lattice, shear_layer_width=80, initial_perturbation_magnitude=0.05):\n self.initial_perturbation_magnitude = initial_perturbation_magnitude\n self.shear_layer_width = shear_layer_width\n self.resolution = resolution\n self.units = UnitConversion(\n lattice,\n reynolds_number=reynolds_number, mach_number=mach_number,\n characteristic_length_lu=resolution, characteristic_length_pu=1,\n characteristic_velocity_pu=1\n )\n\n def analytic_solution(self, x, t=0):\n raise NotImplementedError\n\n def initial_solution(self, x):\n initial_perturbation_magnitude = self.initial_perturbation_magnitude\n shear_layer_width = self.shear_layer_width\n\n ux = np.tanh(shear_layer_width * (x[1] - 0.25))\n ux[ma.masked_greater(x[1], 0.5).mask] = np.tanh(shear_layer_width * (0.75 - x[1][ma.masked_greater(x[1], 0.5).mask]))\n uy = np.array(initial_perturbation_magnitude * np.sin(2*np.pi*(x[0] + 0.25)))\n\n # switching to ij/matrix-indexing -> 1st entry: i = -y (i/line index is going down instead of up like y), 2nd entry: x = j (column index)\n u = np.array([-uy, ux])\n p = np.array([np.zeros(x[0].shape)])\n return p, u\n\n @property\n def grid(self):\n x = np.linspace(0, 1, num=self.resolution, endpoint=False)\n y = np.linspace(0, 1, num=self.resolution, endpoint=False)\n return np.meshgrid(x, y, indexing='ij')\n\n\n @property\n def boundaries(self):\n return []\n","sub_path":"lettuce/flows/doublyshear.py","file_name":"doublyshear.py","file_ext":"py","file_size_in_byte":1828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"559083210","text":"#!/usr/bin/env python\n\nMAX_TITLE_LEN = 44\n\ndef main():\n\tmonth_base_urls = []\n\tyear_base_urls = []\n\t\n\tin_file = open(\"blog_urls.txt\",\"r\")\n\tout_file = open(\".htaccess\", \"w\")\n\t\n\tfor new_url in in_file:\n\t\tnew_url = new_url.strip()\n\t\tif not new_url or not len(new_url):\n\t\t\tout_file.write(\"\\n\")\n\t\t\t\n\t\telif not new_url.startswith(\"#\"):\n\t\t\told_url = new_url.replace(\"http://www.function1.com\", \"/site\", 1)[:-1]\n\t\t\told_url = old_url.replace(\"6-5\", \"65\").replace(\"6-1\", \"61\").replace(\"6-0\", \"60\").replace(\"4-5\", \"45\")\n\t\t\told_url = \"%s.html\" % (old_url[:min(MAX_TITLE_LEN, len(old_url))].rstrip(\"-\"))\n\t\t\tout_file.write(\"redirect 301 %s %s\\n\" % (old_url, new_url))\n\t\t\n\t\t\tmonth_base_url = new_url[:33]\n\t\t\tif month_base_url not in month_base_urls:\n\t\t\t\tmonth_base_urls.append(month_base_url)\n\t\n\tout_file.write(\"\\n\")\n\tmonth_base_urls.sort()\n\t\n\tfor month_base_new_url in month_base_urls:\n\t\tmonth_base_old_url = month_base_new_url.replace(\"http://www.function1.com\", \"/site\", 1)\n\t\tout_file.write(\"RedirectMatch 301 ^%s$ %s\\n\" % (month_base_old_url, month_base_new_url))\n\t\t\n\t\tyear_base_url = \"%s/\" % ('/'.join(month_base_new_url.split('/')[0:-2]))\n\t\tif year_base_url not in year_base_urls:\n\t\t\tyear_base_urls.append(year_base_url)\n\t\t\t\n\tout_file.write(\"\\n\")\n\tyear_base_urls.sort()\n\t\n\tfor year_base_new_url in year_base_urls:\n\t\tyear_base_old_url = year_base_new_url.replace(\"http://www.function1.com\", \"/site\", 1)\n\t\tout_file.write(\"RedirectMatch 301 ^%s$ %s\\n\" % (year_base_old_url, year_base_new_url))\n\t\t\n\tin_file.close()\n\tout_file.close()\n\t\nif __name__ == \"__main__\":\n\tmain()\n","sub_path":"htaccess.py","file_name":"htaccess.py","file_ext":"py","file_size_in_byte":1556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"167950055","text":"import matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nimport numpy as np\nimport scipy.linalg as lg\n\n\nimg2=mpimg.imread('Handsome.jpg')\n[r2,g2,b2] = [img2[:,:,i] for i in range(3)]\n\nred = {'Index' : 0,'Mat': r2, 'Color' : 'Reds'}\ngreen = {'Index' : 1, 'Mat': g2, 'Color' : 'Greens'}\nblue = {'Index' : 2, 'Mat': b2, 'Color' : 'Blues'}\nrgb = [red,green,blue]\n\nfor layer in rgb:\n plt.imshow(layer['Mat'], cmap = layer['Color'])\n plt.show()\n\n# SVD decomposition of img\n U,s,V = lg.svd(layer['Mat'])\n \n S = np.zeros(layer['Mat'].shape,s.dtype)\n \n for i in range(s.size):\n S[i][i] = s[i]\n \n layer['U'] = U\n layer['eigenval'] = s\n layer['S'] = S\n layer['V'] = V\n \nimg_new = np.zeros_like(img2)\n\ndimension = 30\nfor layer in rgb:\n S_30 = np.zeros(layer['Mat'].shape,s.dtype)\n \n for i in range(dimension):\n S_30[i][i] = layer['eigenval'][i]\n \n img_new[:,:,layer['Index']]= np.dot(np.dot(layer['U'],S_30),layer['V'])\nplt.imshow(img_new)\nplt.imsave('Handsome_lower_resolution.jpg',img_new)\nplt.show()\n\ndimension = 200\nfor layer in rgb:\n S_200 = np.zeros(layer['Mat'].shape,s.dtype)\n \n for i in range(dimension):\n S_200[i][i] = layer['eigenval'][i]\n \n img_new[:,:,layer['Index']]= np.dot(np.dot(layer['U'],S_200),layer['V'])\nplt.imshow(img_new)\nplt.imsave('Handsome_better_resolution.jpg',img_new)\nplt.show()\n","sub_path":"task2.py","file_name":"task2.py","file_ext":"py","file_size_in_byte":1407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"463758253","text":"import os\nimport sys\nbasedir = os.path.dirname(os.path.realpath(__file__))\nsys.path.insert(0,basedir+'/../..')\n\nimport parsmat as psm\nimport channelutil as chanutil\n\nimport unittest\n\nclass parent_test(unittest.TestCase):\n def calculate_coefficients(self, dat, thres, smatdata):\n psm.use_mpmath_types(dat.TESTDPS)\n chanutil.use_mpmath_types(dat.TESTDPS)\n asymcalc = chanutil.AsymCalc(chanutil.hartrees, thresholds=thres)\n return psm.calculate_coefficients(smatdata, asymcalc), asymcalc\n\nclass test_parsmat(parent_test):\n def runTest(self):\n import mpmathtestdata as dat\n coeffs,_ = self.calculate_coefficients(dat, [0.,2.], dat.smatdata_inel)\n for i in range(4):\n testdps = 1e-37\n self.assertTrue(psm.nw.are_matrices_close(dat.A_inel[i],coeffs[0][i], \n rtol=testdps, atol=testdps))\n self.assertTrue(psm.nw.are_matrices_close(dat.B_inel[i],coeffs[1][i], \n rtol=testdps, atol=testdps))\n\nclass test_fin(parent_test):\n def runTest(self):\n import mpmathtestdata as dat\n coeffs,asymcalc = self.calculate_coefficients(dat,[0.,0.],dat.smatdata_el)\n fun = psm.get_elastic_Fin_fun(coeffs, asymcalc)\n parFinMat = fun(3.0)\n testdps = 1e-37\n self.assertTrue(psm.nw.are_matrices_close(parFinMat,dat.finData_el_3,\n rtol=testdps, atol=testdps))\n\nclass test_Smat(parent_test):\n def runTest(self):\n import mpmathtestdata as dat\n coeffs,asymcalc = self.calculate_coefficients(dat,[0.,0.],dat.smatdata_el)\n fun = psm.get_elastic_Smat_fun(coeffs, asymcalc)\n parsmat = fun(3.0)\n testdps = 1e-37\n self.assertTrue(psm.nw.are_matrices_close(parsmat,dat.smatdata_el_3,\n rtol=testdps, atol=testdps))\n\nclass test_Qmat(parent_test):\n def runTest(self):\n import mpmathtestdata as dat\n coeffs,asymcalc = self.calculate_coefficients(dat,[0.,0.],dat.smatdata_el)\n fun = psm.get_elastic_Qmat_fun(coeffs, asymcalc)\n parqmat = fun(3.0)\n\nif __name__ == \"__main__\":\n #Just for debug\n b = test_parsmat()\n b.runTest()\n","sub_path":"parsmat/tests/test_parsmat_mpmath.py","file_name":"test_parsmat_mpmath.py","file_ext":"py","file_size_in_byte":2293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"18062923","text":"from djongo import models\nfrom pymongo import MongoClient\nfrom config import mongo_host\n\nMONGO_HOST = mongo_host\n\n\nclass Authors(models.Model):\n '''\n This authors represent all imported authors by csv\n '''\n authors_name = models.CharField(max_length=100, blank=False, unique=True)\n\n def get_client(self):\n self.db = MongoClient(host=MONGO_HOST)\n client = self.db.librarybooks.core_authors\n\n return client\n\n def __str__(self):\n return self.authors_name\n\n\nclass Books(models.Model):\n '''\n This books represent all books created\n '''\n name = models.CharField(max_length=100, blank=False)\n edition = models.PositiveIntegerField(blank=False)\n publication_year = models.PositiveIntegerField(blank=False)\n author = models.ManyToManyField(Authors, related_name='books', blank=False)\n\n def get_client(self):\n db = MongoClient(host=MONGO_HOST)\n client = db.librarybooks.core_books\n\n return client\n\n def __str__(self):\n return self.name\n","sub_path":"core/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"129377659","text":"#!usr/bin/python\n#-*- coding:utf-8 -*-\n\"\"\"\n@author:shenchen\n@file: urls\n@time: 2018/11/17\n\"\"\"\nfrom django.urls import path\nfrom . import views\nurlpatterns = [\n path('start/', views.spider),\n path('spider2/', views.spider2),\n path('spider3/', views.spider3),\n]\n","sub_path":"spider/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"509015680","text":"#!/usr/bin/python\n'''Search local dirs for AddOns\nCheck for new version online\nUpdate local\n\nTODO\ncache cleanup\nbetter addon path searching\n'''\n# pylint: disable=invalid-name, redefined-builtin, no-name-in-module\nfrom __future__ import print_function\n\nimport argparse\nimport json\nimport os\nimport platform\nimport sys\nimport zipfile\n\ntry:\n from six.moves import input # pylint: disable=redefined-builtin\nexcept ImportError:\n sys.exit(\"Please install six: pip install six\")\n\nfrom utils import namemap, slugify, CACHE, FileNotFoundError, get_version, VERSIONMAP\n\n\nplatname = platform.system()\nif platname == \"Darwin\":\n ADDDIR = \"/Applications/World of Warcraft/Interface/AddOns/\"\nelif platname == \"Windows\":\n ADDDIR = \"\"\n\nLATESTURL = \"https://adonis.iggy.ninja/latest.json\"\n\nADDONS = {}\n# most of these are ignored because they are bliz built-ins or they are part of \"bundles\"\nIGNOREDADDONS = ['vuhdooptions', 'masterplana', 'msbtoptions', 'enchantrix-barker',\n 'titanxp', 'titanbag', 'titanclock', 'titangold', 'titanlocation', 'titanloottype',\n 'titanperformance', 'titanrepair', 'titanvolume',\n 'datastore_talents', 'datastore_stats', 'datastore_spells',\n 'datastore_reputations',\n 'datastore_quests', 'datastore_pets', 'datastore_mails', 'datastore_inventory',\n 'datastore_garrisons', 'datastore_currencies', 'datastore_crafts',\n 'datastore_agenda', 'datastore_achievements', 'datastore_auctions',\n 'blitz_options', 'blitz_progress',\n 'auc-advanced',\n 'auc-stat-stddev', 'auc-stat-simple', 'auc-util-fixah', 'auc-stat-purchased',\n 'auc-stat-ilevel', 'auc-stat-histogram', 'auc-scandata', 'auc-filter-basic',\n 'altoholic_achievements', 'altoholic_summary', 'altoholic_search',\n 'altoholic_guild', 'altoholic_grids', 'altoholic_characters', 'altoholic_agenda',]\n\naparser = argparse.ArgumentParser(description='Check for and update WoW addons.')\naparser.add_argument('-y', dest='yes', action='store_true', default=False,\n help='Answer yes to prompts')\naparser.add_argument('-r', dest='report', action='store_true', default=False,\n help='Report available upgrades only (no changes)')\npargs = aparser.parse_args()\n\nfor ent in os.listdir(ADDDIR):\n slug = namemap(slugify(ent))\n if slug in IGNOREDADDONS:\n continue\n ADDONS[slug] = {}\n try:\n toc = open(os.path.join(ADDDIR, \"{}/{}.toc\".format(ent, ent)))\n for line in toc.readlines():\n if line.startswith(\"##\") and \":\" in line:\n k, v = line.split(':', 1)\n ADDONS[slug][k.strip('# ').lower()] = v.strip('# \\n').strip().strip(r'\\r')\n\n except FileNotFoundError as err:\n # no toc file, that probably means this is a Bliz AddOn or just some random dir\n # print(err)\n pass\n\n# So now we have a list of the the installed addons, check the online database to see if there's a\n# new version\nlatest = json.load(CACHE.getfd(LATESTURL, refresh_age=60))\n\nfor slug, info in ADDONS.items():\n if slug in latest and 'version' in ADDONS[slug]:\n ver, url = latest[slug]\n instver = get_version(ADDONS[slug]['version'])\n latestver = get_version(ver)\n\n if slug in VERSIONMAP and instver in VERSIONMAP[slug]:\n instver = VERSIONMAP[slug][instver]\n\n\n print('Match found in database: {}'.format(slug))\n print('Installed version: {}'.format(instver))\n print('Latest version: {}'.format(latestver))\n\n if instver != latestver and pargs.report is False:\n if pargs.yes is False:\n yn = input('Would you like to upgrade {} from {}? [Y/n] '.format(slug, url))\n if pargs.yes or yn is \"\" or yn.startswith('y') or yn.startswith('Y'):\n # do the upgrade\n print('Upgrading {} from {}, please wait...'.format(slug, url))\n ufd = CACHE.get(url)\n with zipfile.ZipFile(ufd) as zfile:\n zfile.extractall(ADDDIR)\n","sub_path":"addons.py","file_name":"addons.py","file_ext":"py","file_size_in_byte":4173,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"457434077","text":"#!/usr/bin/env python\n\nimport rospy\nfrom tcp_endpoint.srv import RosUnityHandshake, RosUnityHandshakeResponse\n\nclass RosUnityHandshakeService:\n \"\"\"\n Class to auto-detect Unity IP.\n \"\"\"\n def __init__(self, tcp_sender):\n \"\"\"\n Args:\n tcp_sender: sends messages to Unity\n \"\"\"\n self.srv_class = RosUnityHandshake._request_class()\n self.tcp_sender = tcp_sender\n self.incoming_ip = ''\n\n # The client thread lets us know what the incoming IP is, so we can use it later\n def set_incoming_ip(self, ip):\n self.incoming_ip = ip\n\n def send(self, data):\n message = self.srv_class.deserialize(data)\n if message.ip == '': # if the message specifies an IP, Unity has set an IP override, so use it\n self.tcp_sender.process_handshake(self.incoming_ip, message.port)\n else: # if not, just talk back to the incoming IP\n self.tcp_sender.process_handshake(message.ip, message.port)\n return RosUnityHandshakeResponse(self.tcp_sender.unity_ip)\n","sub_path":"tcp_endpoint/src/tcp_endpoint/RosUnityHandshakeService.py","file_name":"RosUnityHandshakeService.py","file_ext":"py","file_size_in_byte":1054,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"283170968","text":"import uuid\n\nfrom django.core.validators import MinValueValidator, MaxValueValidator\nfrom django.db import models\nfrom django.utils.translation import gettext_lazy as _\n\n\nclass TimeStampedModel(models.Model):\n created_at = models.DateTimeField(\n _('created at'), auto_now_add=True, null=True, blank=True\n )\n updated_at = models.DateTimeField(\n _('updated at'), auto_now=True, null=True, blank=True\n )\n\n class Meta:\n abstract = True\n\n\nclass Genres(TimeStampedModel, models.Model):\n genre_id = models.UUIDField(\n _('genre uuid'), primary_key=True, default=uuid.uuid4,\n editable=False, unique=True\n )\n genre_name = models.TextField(_('genre name'), blank=False)\n genre_desc = models.TextField(\n _('genre description'), blank=True, null=True\n )\n\n class Meta:\n verbose_name = _('genre')\n verbose_name_plural = _('genres')\n db_table = 'content\".\"genres'\n\n def __str__(self):\n return self.genre_name\n\n\nclass People(TimeStampedModel, models.Model):\n person_id = models.UUIDField(\n _('movie uuid'), primary_key=True, default=uuid.uuid4,\n editable=False, unique=True\n )\n full_name = models.TextField(_('full name'), blank=False)\n person_desc = models.TextField(\n _('person description'), blank=True, null=True\n )\n birthday = models.DateField(_('birthday'), blank=True, null=True)\n\n class Meta:\n verbose_name = _('person')\n verbose_name_plural = _('people')\n db_table = 'content\".\"people'\n\n def __str__(self):\n return self.full_name\n\n\nclass Movies(TimeStampedModel, models.Model):\n class MovieType(models.TextChoices):\n MOVIE = 'movie', _('movie')\n SERIAL = 'serial', _('serial')\n\n movie_id = models.UUIDField(\n _('movie uuid'), primary_key=True, default=uuid.uuid4,\n editable=False, unique=True\n )\n movie_title = models.TextField(_('movie title'), blank=False)\n movie_desc = models.TextField(_('movie desc'), blank=True, null=True)\n movie_type = models.CharField(_('movie type'), max_length=10,\n choices=MovieType.choices)\n movie_rating = models.DecimalField(\n _('rating'), max_digits=2, decimal_places=1,\n validators=[MinValueValidator(0), MaxValueValidator(10)],\n blank=True, null=True\n )\n movie_genres = models.ManyToManyField(\n Genres,\n through='MovieGenres',\n through_fields=('movie', 'genre'),\n )\n movie_people = models.ManyToManyField(\n People,\n through='MoviePeople',\n through_fields=('movie', 'person')\n )\n\n class Meta:\n verbose_name = _('movie')\n verbose_name_plural = _('movies')\n db_table = 'content\".\"movies'\n ordering = ['movie_title']\n\n def __str__(self):\n return self.movie_title\n\n\nclass MoviePeople(models.Model):\n class PersonRole(models.TextChoices):\n ACTOR = 'actor', _('actor')\n DIRECTOR = 'director', _('director')\n WRITER = 'writer', _('writer')\n\n movie_people_id = models.UUIDField(\n _('movie people uuid'), primary_key=True, default=uuid.uuid4,\n editable=False, unique=True\n )\n movie = models.ForeignKey(Movies, related_name='people_related', on_delete=models.CASCADE)\n person = models.ForeignKey(People, on_delete=models.CASCADE)\n person_role = models.CharField(max_length=10, choices=PersonRole.choices)\n\n class Meta:\n verbose_name = _('movie person')\n verbose_name_plural = _('movie people')\n db_table = 'content\".\"movie_people'\n\n def __str__(self):\n return f'{self.movie} ({self.person}, {self.person_role})'\n\n\nclass MovieGenres(models.Model):\n movie_genres_id = models.UUIDField(\n _('movie genres uuid'), primary_key=True, default=uuid.uuid4,\n editable=False, unique=True\n )\n movie = models.ForeignKey(Movies, on_delete=models.CASCADE)\n genre = models.ForeignKey(Genres, on_delete=models.CASCADE)\n\n class Meta:\n verbose_name = _('movie genre')\n verbose_name_plural = _('movie genres')\n db_table = 'content\".\"movie_genres'\n\n def __str__(self):\n return f'{self.movie} ({self.genre})'\n","sub_path":"movies_admin/movies/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"413854577","text":"from typing import List, Iterable\n\nfrom movie_web_app.adapters.repository import AbstractRepository\nfrom movie_web_app.domainmodel.model import User,Review,Movie ,make_review\nfrom movie_web_app.domainmodel.genre import Genre\nimport movie_web_app.adapters.repository as repo\n\n\nclass NonExistentmovieException(Exception):\n pass\n\n\nclass UnknownUserException(Exception):\n pass\n\ndef get_movie_ids_for_genre(genre_name, repo: AbstractRepository):\n movie_ids = repo.get_movie_ids_for_genre(genre_name)\n\n return movie_ids\n\ndef get_movie(movie_id: int, repo: AbstractRepository):\n movie = repo.get_movie(movie_id)\n\n if movie is None:\n raise NonExistentmovieException\n\n return movie_to_dict(movie)\n\n# ============================================\n# Functions to convert model entities to dicts\n# ============================================\n\ndef movie_to_dict(movie: Movie):\n movie_dict = {\n 'id': movie.id,\n 'year': movie.year,\n 'title': movie.title,\n 'description': movie.description,\n # 'hyperlink': movie.hyperlink,\n # 'image_hyperlink': movie.image_hyperlink,\n 'reviews': reviews_to_dict(movie.reviews),\n 'genres': genres_to_dict(movie.genres)\n }\n return movie_dict\n\n\ndef movies_to_dict(movies: Iterable[Movie]):\n return [movie_to_dict(movie) for movie in movies]\n\ndef review_to_dict(review: Review):\n review_dict = {\n 'username': review.user.user_name,\n 'movie_id': review.rating,\n 'review_text': review.review_text,\n 'timestamp': review.timestamp\n }\n return review_dict\n\n\ndef reviews_to_dict(reviews: Iterable[Review]):\n return [review_to_dict(review) for review in reviews]\n\n\ndef genre_to_dict(genre: Genre):\n genre_dict = {\n 'name': genre.genre_name,\n 'genreged_movies': [movie.id for movie in repo.repo_instance.get_result(genre.genre_name)]\n }\n return genre_dict\n\n\ndef genres_to_dict(genres: Iterable[Genre]):\n return [genre_to_dict(genre) for genre in genres]\n\n\n# ============================================\n# Functions to convert dicts to model entities\n# ============================================\n\ndef dict_to_movie(dict):\n movie = Movie(dict.title, dict.year)\n movie.id=dict.id\n movie.description = dict.description\n movie.hyperlink = dict.hyperlink\n # Note there's no reviews or genres.\n return movie\n\n","sub_path":"movie_web_app/genre_blueprint/services.py","file_name":"services.py","file_ext":"py","file_size_in_byte":2391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"214172567","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jan 9 19:26:14 2017\n\n@author: Kinia\n\"\"\"\n\n\nimport numpy as np\nfrom scipy.stats import norm, hypergeom as hg, binom\nimport matplotlib.pyplot as plt\nfrom math import floor\n \ndef countarrayZ(n1, n2, x1, x2, variance):\n Z = np.zeros(np.shape(x1))\n b = (variance > 0) # bo inaczej wywali błąd, czy może zostać Z=0?\n if (np.any(variance<0)):\n print(\"wariancja<0!!\")\n b0 = (variance == 0)\n Z[b] = (x1[b]/n1 - x2[b]/n2) / np.sqrt(variance[b])\n Z[b0] = x1[b0]/n1 - x2[b0]/n2\n return Z\n \nalpha = 0.05\n\nlicznik=50\n\n#N1 = 30 # cała populacja\n#vectorM1 = np.array([21, 24, 27]) # ilość z daną cechą\nN1 = 1000\nvectorM1 = np.array([500, 400, 300])\nn1 = 25 # probka\nvectorp1 = vectorM1/N1\n \n#N2 = 50 # cała populacja\n#M2 = 30 # ilość z daną cechą\nN2 = 3000\nM2 = 1800\nn2 = 50 # probka\np2 = M2/N2\n\narrayPowerZ = np.zeros((3, licznik)) \narrayPowerZb = np.zeros((3, licznik)) \n \nfor j in range(1, licznik+1):\n \n n=j #n2\n \n n1=j # n1 = n2 ?\n \n for i in range(3):\n \n # test Z (ze skończoną poprawką)\n \n M1 = vectorM1[i]\n p1 = vectorp1[i] \n \n L1 = max(0, M1 - N1 + n1)\n L2 = max(0, M2 - N2 + n)\n U1 = min(n1, M1)\n U2 = min(n, M2)\n \n size1 = U1 + 1 - L1\n size2 = U2 + 1 - L2 \n vectorK1 = np.arange(L1, U1 + 1).reshape((size1, 1)) # pionowy wektor\n vectorK2 = np.arange(L2, U2 + 1)\n arrayK1 = np.tile(vectorK1, (1, size2)) # wektory K1 w pionie\n arrayK2 = np.tile(vectorK2, (size1, 1)) # wektory K2 w poziomie\n \n h1 = hg.pmf(vectorK1, N1, M1, n1) \n h2 = hg.pmf(vectorK2, N2, M2, n)\n arrayH1 = np.tile(h1, (1, size2)) # wektory w pionie\n arrayH2 = np.tile(h2, (size1, 1)) # wektory w poziomie \n arrayH = arrayH1 * arrayH2\n \n variance_k = ((N1 - n1)/(n1*(N1 - 1)) + (N2 - n)/(n*(N2 - 1))) * ((arrayK1 + arrayK2)/(n1 + n)) * (1 - (arrayK1 + arrayK2)/(n1 + n))\n Z_k = countarrayZ(n1, n, arrayK1, arrayK2, variance_k)\n \n quantile = norm.ppf(1-alpha/2)\n bZ = np.abs(Z_k) > quantile # indykator\n b0 = (variance_k == 0) #w tym przypadku porownujemy estp1 i estp2\n bZ[b0] = (arrayK1[b0]/n1 != arrayK2[b0]/n)\n powerZ = np.sum(arrayH[bZ])\n arrayPowerZ[i, j-1] = powerZ\n \n # test bez skończonej poprawki\n \n size1 = n1 + 1\n size2 = n + 1\n vectorK1 = np.arange(size1).reshape((size1, 1))\n vectorK2 = np.arange(size2)\n arrayK1 = np.tile(vectorK1, (1, size2))\n arrayK2 = np.tile(vectorK2, (size1, 1))\n \n b1 = binom.pmf(vectorK1, n1, p1) \n b2 = binom.pmf(vectorK2, n, p2)\n arrayB1 = np.tile(b1, (1, size2))\n arrayB2 = np.tile(b2, (size1, 1)) \n arrayB = arrayB1 * arrayB2\n \n vectorestp = (arrayK1 + arrayK2)/(n1 + n)\n varianceb_k = vectorestp*(1 - vectorestp)*(1/n1 + 1/n) \n \n Zb_k = countarrayZ(n1, n, arrayK1, arrayK2, varianceb_k)\n \n \n bZb = np.zeros((size1, size2))\n \n for r in range(size1):\n for c in range(size2):\n \n if (varianceb_k[r,c] != 0):\n \n estpK = (arrayK1[r,c] + arrayK2[r,c])/(n1 + n)\n \n sizex1 = n1 + 1\n sizex2 = n + 1 \n vectorX1 = np.arange(sizex1).reshape((sizex1, 1)) # pionowy wektor\n vectorX2 = np.arange(sizex2)\n arrayX1 = np.tile(vectorX1, (1, sizex2)) # wektory X1 w pionie\n arrayX2 = np.tile(vectorX2, (sizex1, 1)) # wektory X2 w poziomie\n \n bx1 = binom.pmf(vectorX1, n1, estpK) \n bx2 = binom.pmf(vectorX2, n, estpK)\n arrayBx1 = np.tile(bx1, (1, sizex2))\n arrayBx2 = np.tile(bx2, (sizex1, 1)) \n arrayBx = arrayBx1 * arrayBx2\n \n estp = (arrayX1 + arrayX2)/(n1 + n)\n \n varianceb_x = estp*(1 - estp)*(1/n1 + 1/n)\n Zb_x = countarrayZ(n1, n, arrayX1, arrayX2, varianceb_x) \n bZx = np.abs(Zb_x) >= np.abs(Zb_k[r,c]) # indykator\n bx0 = varianceb_x == 0\n bZx[bx0] = np.abs(Zb_x[bx0]) >= abs(arrayK1[r,c]/n1 - arrayK2[r,c]/n)\n pvalueZb = np.sum(arrayBx[bZx])\n bZb[r,c] = pvalueZb <= alpha # zapisuje się 0 lub 1\n elif (arrayK1[r,c]/n1 != arrayK2[r,c]/n): \n bZb[r,c] = 1\n print(\"coś\")\n \n \n \n bZb = (bZb == 1)\n powerZb = np.sum(arrayB[bZb])\n arrayPowerZb[i, j-1] = powerZb\n \n\nvectorX = np.arange(1, licznik+1)\n\nfig = plt.figure()\ncolor = ['b', 'g', 'r']\nlegZ1 = \"test Z: $p_1=\" + str(vectorp1[0]) + \"$\"\nlegZ2 = \"$p_1=\" + str(vectorp1[1]) + \"$\"\nlegZ3 = \"$p_1=\" + str(vectorp1[2]) + \"$\"\nlegE1 = \"test Zb: $p_1=\" + str(vectorp1[0]) + \"$\"\nlegE2 = \"$p_1=\" + str(vectorp1[1]) + \"$\"\nlegE3 = \"$p_1=\" + str(vectorp1[2]) + \"$\"\nlegZ = [legZ1, legZ2, legZ3]\nlegE = [legE1, legE2, legE3]\nfor i in range (3):\n plt.plot(vectorX, arrayPowerZb[i], color[i], label = legE[i])\nfor i in range (3):\n plt.plot(vectorX, arrayPowerZ[i], color[i], ls = '--', label = legZ[i])\nplt.grid(True)\nplt.xlabel(\"$n$\", fontsize=14)\n#plt.xlabel(\"$n_1(n_2=2n_1)$\", fontsize=14)\nplt.ylabel(\"moc testu\")\ntitle = \"$N_1=\" + str(N1) + \"$, $\" + \"N_2=\" + str(N2) + \"$, $\" + \"p_2=\" + str(p2) + \"$\"\nplt.title(title, fontsize=14)\n#plt.legend([legZ1, legZb1, legZ2, legZb2, legZ3, legZb3], loc='upper center', bbox_to_anchor=(0.5, -0.2), ncol=3)\nplt.legend(loc='center left', bbox_to_anchor=(1, 0.5), fancybox=True)\n","sub_path":"programy/2ZbtestZtest_power_n.py","file_name":"2ZbtestZtest_power_n.py","file_ext":"py","file_size_in_byte":5949,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"307516813","text":"from flask import Flask, url_for\n\n#create the application.\napp = Flask(__name__)\n\n@app.route('/')\ndef index():\n return \"
This is an index page
\"\n\n@app.route('/welcome/')\n@app.route('/welcome/')\ndef welcome(name = None):\n if name is None:\n return \"
Hello %s !
\" % \"Unknown User\"\n else:\n return \"
Hello %s !
\" % name\n\n@app.route('/test1/')\ndef test1():\n fname = 'welcome'\n return \"url for function %s=%s\" % (fname, url_for(fname))\n\n@app.route('/test2/')\ndef test2():\n fname, name = 'welcome', 'foo'\n return \"url for function= %s with name= %s is %s\" % (fname,name,url_for(fname, name = name))\n\n\nif __name__ == '__main__':\n app.debug = True\n app.run()\n","sub_path":"url-for.py","file_name":"url-for.py","file_ext":"py","file_size_in_byte":781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"274766579","text":"#!/usr/bin/env python\n\nimport logging\nimport os\nimport pexpect\nfrom data_processing import DataProcess\n\n\nclass TransferData(DataProcess):\n \"\"\"\n transfers the data files from output directory to remote vm - hadoop cluster\n \"\"\"\n\n def __init__(self, input_path, output_path, backup_path):\n super(TransferData, self).__init__(input_path, output_path, backup_path)\n\n def transfer_data(self, remote_user, remote_machine):\n \"\"\"\n transfers the data from input path to output path. output path is usually a node in hadoop cluster\n :param: remote_user :string: the username in hadoop node\n :param: remote_machine:string : the ip address/hostname of the hadoop cluster node\n :return:\n \"\"\"\n self.logger.info(\"Starting to transfer the filtered data to hadoop cluster\")\n files = self.check_path(self.input_path)\n if files:\n status = self.create_output_path()\n assert status, \"Error: Could not create output path\"\n status = self.create_backup_path()\n assert status, \"Error: Could not create backup path\"\n for file in files:\n file_path = os.path.join(self.input_path, file)\n scp_cmd = 'scp {} {}@{}:/{}'.format(file_path, remote_user, remote_machine, self.output_path)\n self.logger.info(\"----- scp_cmd = {}\".format(scp_cmd))\n pexpect.run(scp_cmd)\n # take the back up of the transfered file\n status = self.take_backup(file_path)\n if not status:\n self.logger.info(\"Error: Failed to take a backup of the file\")\n self.wait_timeout(1)\n else:\n self.logger.info(\"There are no files to transfer. Will wait for 2secs\")\n self.wait_timeout(2)\n\n\nclass TestTransferData(object):\n \"\"\"\n unit tests for TransferData class\n \"\"\"\n\n @classmethod\n def setup_class(cls):\n cls.input_path = \"/home/latha/my_django/filter_out\"\n cls.backup_path = \"/home/latha/my_django/ransfer_backup\"\n cls.output_path = '/home/latha/my_django/transfer_out'\n cls.remote_user = 'latha'\n cls.remote_machine = 'localhost'\n cls.td = TransferData(cls.input_path, cls.output_path, cls.backup_path)\n\n def test_transferd(self):\n \"\"\"\n unittest to verify the functionality of transfer_data process\n :return:\n \"\"\"\n self.td.transfer_data(self.remote_user, self.remote_machine)\n files = self.td.check_path(self.td.output_path)\n assert files, \"The copying of files to Hadoop cluster failed\"\n files = self.td.check_path(self.td.backup_path)\n assert files, \"Error: Backup failed\"\n\n","sub_path":"data_analysis/transferd.py","file_name":"transferd.py","file_ext":"py","file_size_in_byte":2750,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"42832061","text":"\n\n#calss header\nclass _SHAMROCK():\n\tdef __init__(self,): \n\t\tself.name = \"SHAMROCK\"\n\t\tself.definitions = [u'a plant that has three round leaves on each stem']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_shamrock.py","file_name":"_shamrock.py","file_ext":"py","file_size_in_byte":332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"216453389","text":"\"\"\"Expense Tracker\"\"\"\nimport logging\nimport expense_calculation_class_objects\nimport pickle\nimport pprint\nimport json\nimport yaml\nimport time\nfrom datetime import datetime\n\n\nlogging.basicConfig(filename='../log/Error_log.log', level=logging.DEBUG, \n format='%(asctime)s %(levelname)s %(name)s %(message)s')\nlogger=logging.getLogger(__name__)\n\n\ndef get_expense_options(keyvalue):\n \"\"\"\n takes the key value of the option to be selected and returns the options related to that key\n returns none in case of error\n \"\"\"\n try:\n print('please select a option from below : ')\n with open('config/expense_details.json') as f:\n data = json.load(f)\n for key,value in data[keyvalue].items():\n print(key + ' : '+ value)\n return data[keyvalue]\n except Exception as e:\n logging.exception(e)\n return None\n\ndef selected_option(keyvalue):\n \"\"\"\n takes the key value as input and returns value for selected key \n \"\"\"\n try:\n options = get_expense_options(keyvalue)\n option_selected = input(\"selected a option : \")\n return options[option_selected]\n except Exception as e:\n logging.exception(e)\n return None\n\n\ndef write_object_to_file(expense):\n \"\"\"\n writes the expense object to the json file\n \"\"\"\n try:\n with open(\"../data/test.json\",'w') as output:\n json.dump(expense,output)\n with open(\"../data/test.yaml\",'w') as output:\n yaml.dump(expense, output, default_flow_style=False)\n except Exception as e:\n logging.exception(e)\n return None\n\n\ndef expense_calculator():\n \"\"\"\n Algorithm\n \"\"\"\n expense = dict()\n expense[\"id\"] = int(round(time.time() * 1000)) \n expense[\"create_time\"] = datetime.now().strftime(\"%m-%d-%Y %H:%M:%S PST\") \n expense[\"Type\"] = selected_option(\"Type\")\n expense[\"Category\"] = selected_option(\"Category\")\n expense[\"Amount\"] = int(input(\"Enter the amount : $ \"))\n write_object_to_file(expense)\n\nif __name__ == \"__main__\":\n expense_calculator()\n","sub_path":"scripts/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"456833908","text":"p=[10,20,30]\npp=[10,20]\ndef fuu(r,q):\n r=[10]\n r.append(40)\n print(r)#[10,40]\n q.append(50)\n print(q) #[10,20,50]\nfuu(p,pp)\nprint(p) # this is not affected at all [10,20,30]\nprint(pp)#[this will be affected as both pp and q refer to same memory location [10,20,50]\n#print(r)#error as r is local we can not call outside\n\n\n\na=[1,2,3]\ndef fun(b):\n global a\n b=[10]\n b.append(40)\n print(b)\n a=[5,7] #here the value is changing\n a.append(6)\nfun(a)\nprint(a)\n\n#if u are declaring somthing global or nonlocal and changing that inside the fun it will be changed for outside also\n#global varible will be outside of the function and nonlocal varible should be in the outer function\n#outside of inner function means for non local nested function is needed\n#in both the cases the value will be over written if u change.\n\n\n\ndef outer():\n x = \"local\"\n def inner():\n nonlocal x\n x = \"nonlocal\"\n print(\"inner:\", x)\n inner()\n print(\"outer:\", x) #for this also the value is changing\nouter()","sub_path":"pra_project/variable.py","file_name":"variable.py","file_ext":"py","file_size_in_byte":1037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"51376636","text":"from django.core.management.base import BaseCommand\nfrom fantasyfirst.celery import app\n\n\nclass Command(BaseCommand):\n def handle(self, *args, **options):\n scheduled = app.control.inspect().scheduled()\n for scheduled_list in list(scheduled.values()):\n for task in scheduled_list:\n app.control.revoke(task['request']['id'])\n","sub_path":"ff/management/commands/ff_purge_tasks.py","file_name":"ff_purge_tasks.py","file_ext":"py","file_size_in_byte":366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"528555324","text":"from django.core.management.base import BaseCommand\nfrom blog.models import Post\nfrom blog.thirdPartyApplications.automateBot import Bot\n\n\nclass Command(BaseCommand):\n def add_arguments(self, parser):\n parser.add_argument(\"--action\", choices=(\"showonly\", \"realperform\"))\n\n def handle(self, *args, **options):\n self.stdout.write(self.style.SUCCESS(\"---------------------> AUTOMATE BOT START\"))\n bot = Bot()\n if options['action'] == \"realperform\":\n bot.real_perform()\n else:\n print(bot.max_likes_per_user)\n self.stdout.write(self.style.SUCCESS(\"---------------------> AUTOMATE BOT FINISHED\"))\n\n\n\n","sub_path":"MPB/blog/management/commands/automatebot.py","file_name":"automatebot.py","file_ext":"py","file_size_in_byte":664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"442495152","text":"from concurrent.futures.process import ProcessPoolExecutor\r\nfrom typing import List, Callable, Tuple\r\n\r\nfrom rx import from_future, merge\r\n\r\nfrom hashtag_counter import HashTag\r\n\r\n\r\nclass Runner:\r\n def __init__(self, on_success, on_error, on_complete):\r\n self.on_success = on_success\r\n self.on_error = on_error\r\n self.on_complete = on_complete\r\n\r\n def execute(self, requester: Callable[[HashTag], Tuple[str, dict]], payloads: List[HashTag]):\r\n \"\"\"\r\n Executes the given requester func (HashTag -> [(str, dict)]) in parallel\r\n creating a new process for each request.\r\n :param requester: Function used for performing the API request.\r\n :param payloads: List of hash tags to past to the requester func\r\n :return: None\r\n \"\"\"\r\n observables = []\r\n with ProcessPoolExecutor() as executor:\r\n for payload in payloads:\r\n _future = executor.submit(requester, payload)\r\n observables.append(from_future(_future))\r\n merge(*observables).subscribe(self.on_success, self.on_error, self.on_complete)\r\n","sub_path":"hashtag_counter/core/runner.py","file_name":"runner.py","file_ext":"py","file_size_in_byte":1121,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"415414760","text":"import subprocess\nimport os\n\nfrom bencher.ui import UI\n\n# Temporary: only works with make\n\n\nclass Make:\n\n def __init__(self, agent, args=None):\n self.agent = agent\n self.args = args\n\n if agent is 'make':\n if not os.path.exists('Makefile'):\n UI.red(\"No Makefile found!\")\n exit(1)\n\n def make_test(self, test):\n UI.purple(\"BUILDING:\")\n make = subprocess.run([self.agent, test], stdout=subprocess.PIPE)\n if make.returncode != 0:\n UI.red(str(make.stdout.decode()))\n UI.red(\"RETURN CODE: \" + str(make.returncode))\n else:\n UI.green(str(make.stdout.decode()))\n\n def clean_up(self):\n make = subprocess.run([self.agent, 'clean'], stdout=subprocess.PIPE)\n if make.returncode != 0:\n UI.red(str(make.stdout.decode()))\n UI.red(\"RETURN CODE: \" + str(make.returncode))\n else:\n UI.green(str(make.stdout.decode()))\n","sub_path":"bencher/make.py","file_name":"make.py","file_ext":"py","file_size_in_byte":983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"350397864","text":"\nfrom datetime import datetime\n\nfrom models.shared import db\n\nclass Unit(db.Model): # type: ignore\n __tablename__ = \"unit\"\n id = db.Column(db.Integer, primary_key=True)\n created_at = db.Column(db.DateTime(), default=datetime.utcnow)\n updated_at = db.Column(db.DateTime(), onupdate=datetime.utcnow)\n\n symbol = db.Column(db.String(255), nullable=False)\n measure_of = db.Column(db.String(255), nullable=False)\n description = db.Column(db.String(255), default=0)\n\n __table_args__ = (db.UniqueConstraint(\"symbol\"), {'extend_existing': True},)","sub_path":"models/unit.py","file_name":"unit.py","file_ext":"py","file_size_in_byte":562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"343339864","text":"#! usr/bin/env python3\n\nfrom .common import path_cost, random_permutation\nfrom ..switch import decide\n\nimport math\nimport random\n\n\"\"\"\n2.5\n\nIterated Local Search improves upon Multi-Restart Search by\nsampling in the broader neighborhood of candidate solutions\nand using a Local Search technique to refine solutions to\ntheir local optima. Iterated Local Search explores a sequence\nof solutions created as perturbations of the current best\nsolution, the result of which is refined using an embedded\nheuristic.\n\nThe code listing applies the algorithm to the Berlin52 instance\nof the Traveling Salesman Problem, taken from TSPLIB. The problem seeks a\npermutation of the order to visit cities (called a tour) that minimizes the\ntotal distance traveled. The optimal tour distance for the Berlin52 instance is\n7542 units.\n\"\"\"\n\ndef stochastic_two_opt(permutation):\n \"\"\"\n Looks for a random subsequence in the permutation and reverses them.\n\n See also: https://en.wikipedia.org/wiki/2-opt\n \"\"\"\n perm = [permutation[i] for i in range(len(permutation))]\n upper_bound = len(perm) - 1\n c1, c2 = random.randint(0, upper_bound), random.randint(0, upper_bound)\n exclude = [c1]\n \n if c1 == 0:\n exclude.append(upper_bound)\n else:\n exclude.append(c1 - 1)\n \n if c1 == upper_bound:\n exclude.append(0)\n else:\n exclude.append(c1 + 1)\n \n while c2 in exclude:\n c2 = random.randint(0, upper_bound)\n \n if c2 < c1:\n c1, c2 = c2, c1\n \n perm_range = perm[c1:c2]\n perm_range.reverse()\n perm[c1:c2] = perm_range\n \n return perm\n\n#FIXME modifying and returning an argument?\ndef local_search(best, cities, max_no_improv):\n count = 0\n \n while count < max_no_improv:\n candidate = {}\n candidate[\"vector\"] = stochastic_two_opt(best[\"vector\"])\n candidate[\"cost\"] = path_cost(candidate[\"vector\"], cities)\n \n if candidate[\"cost\"] < best[\"cost\"]:\n count = 0\n else:\n count += 1\n \n if candidate[\"cost\"] < best[\"cost\"]:\n best = candidate\n \n return best\n\ndef double_bridge_move(perm):\n \"\"\"\n Partitions the permutation into 4 subsequences and then shuffles those\n subsequences to create a new permutation.\n \"\"\"\n pos1 = 1 + random.randint(0, math.floor(len(perm) / 4))\n pos2 = pos1 + 1 + random.randint(0, math.floor(len(perm) / 4))\n pos3 = pos2 + 1 + random.randint(0, math.floor(len(perm) / 4))\n p1 = perm[0:pos1] + perm[pos3:len(perm)]\n p2 = perm[pos2:pos3] + perm[pos1:pos2]\n return p1 + p2\n\ndef perturbation(cities, best):\n candidate = {}\n candidate[\"vector\"] = double_bridge_move(best[\"vector\"])\n candidate[\"cost\"] = path_cost(candidate[\"vector\"], cities)\n return candidate\n\ndef search(cities, max_iterations, max_no_improv, output_format=\"human\"):\n best = {}\n best[\"vector\"] = random_permutation(cities)\n best[\"cost\"] = path_cost(best[\"vector\"], cities)\n best = local_search(best, cities, max_no_improv)\n \n for i in range(max_iterations): \n candidate = perturbation(cities, best)\n candidate = local_search(candidate, cities, max_no_improv)\n \n if candidate[\"cost\"] < best[\"cost\"]:\n best = candidate\n \n if output_format == \"csv\":\n print(\"%s,%s\" % (i, best[\"cost\"]))\n else:\n print(\"Iteration #\" + str(i) + \" best = \" + str(best[\"cost\"]))\n \n return best\n\nif __name__ == \"__main__\":\n from .common import berlin52\n \n # Algorithm configuration\n max_iterations = 100\n max_no_improv = 50\n output_format = decide()\n \n # Execute the algorithm\n best = search(berlin52, max_iterations, max_no_improv, output_format)\n\n if output_format == \"csv\":\n print(\"%s,%s\" % (best[\"cost\"], \",\".join([str(i) for i in best[\"vector\"]])))\n else:\n print(\"Done. Best solution: c = \" + str(best[\"cost\"]) +\", v = \" + str(best[\"vector\"]))\n","sub_path":"python/stochastic/iterated_local_search.py","file_name":"iterated_local_search.py","file_ext":"py","file_size_in_byte":3977,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"550347987","text":"import keras\r\nimport pandas as pd\r\nimport numpy as np\r\nfrom keras.models import Sequential\r\nfrom keras.layers import Dense, Activation\r\nfrom keras import optimizers\r\nfrom keras.utils import np_utils\r\nfrom keras.models import load_model\r\nfrom keras.models import model_from_json\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn.preprocessing import MinMaxScaler\r\n\r\n#Start Date: 1999-01-04\r\n#End Date : 2018-10-19\r\n\r\nprint(\"Loading Data...\\n\")\r\ndata = pd.read_excel('SData.xlsx')\r\ndata2= pd.read_excel('Dollars to Euro.xlsx')\r\ndata3= pd.read_excel('OilinEurope.xlsx')\r\n\r\ngolddata = data.ix[1045:, 1]\r\nproddata = data.ix[1045:, 2]\r\nconsdata = data.ix[1045:, 3]\r\ndtoedata = data2.ix[0:,1]\r\noildata = data3.ix[3033:,1]\r\n\r\nprint(\"Data Loaded\\n\")\r\n#print(\"GOLD IS : \\n\",golddata)\r\n#print(\"Prod is :\\n \",proddata)\r\n#print(\"Cons is : \\n\",consdata)\r\n#print(\"Dollars to Euro is : \\n\",dtoedata)\r\n#print(\"The Oil data is :\\n\",oildata)\r\n\r\ncount=0\r\nfor i in golddata.values:\r\n count+=1\r\nprint(\"The Gold Count value is : \",count)\r\n\r\n#gold=np.array(golddata)\r\n#np.transpose(gold)\r\ngold=golddata.values\r\n#print(\"The shape of gold array is : \",np.shape(golddata))\r\n#print(\"THe GOLD IS \\n\",gold)\r\nprocon=np.zeros(shape=(count,3))\r\nk=0\r\nfor i in proddata.values:\r\n procon[k][0]=i\r\n k=k+1\r\nk=0\r\nfor j in dtoedata.values:\r\n procon[k][1]=j\r\n k=k+1\r\nk=0\r\nfor j in oildata.values:\r\n procon[k][2]=j\r\n k=k+1\r\n#print(\"The Shape of Producer and Consumer Index array is \",np.shape(procon))\r\n#print(procon)\r\n\r\n\r\n\r\nprint(\"Splitting Data into Training Set and Test Set\\n\")\r\n\r\ntraingold=gold[0:4500]\r\ntestgold=gold[4500:5000]\r\n#print(\"The shape of Gold Training data is : \",np.shape(traingold),\" And Gold Test is : \",np.shape(testgold))\r\nxtrain=procon[0:4500]\r\nxtest=procon[4500:5000]\r\n#xtrain=proddata[0:5000]\r\n#xtest=proddata[5000:]\r\n#print(\"The shape of ProCon Training data is : \",np.shape(xtrain),\" And ProCon Test is : \",np.shape(xtest))\r\n#print(\"The Training X data is : \\n\",xtrain)\r\n#print(\"The Training Gold data is : \\n\",traingold)\r\n\r\n'''\r\n# normalize the dataset\r\nscaler = MinMaxScaler(feature_range=(0,10))\r\ngolddata = scaler.fit_transform(golddata)\r\nprocon = scaler.fit_transform(procon)\r\nconsdata = scaler.fit_transform(consdata)\r\n####\r\n\r\nmodel = Sequential()\r\nmodel.add(Dense(6, input_shape=(2,)))\r\nmodel.add(Dense(1))\r\nmodel.compile(loss='mean_squared_error', optimizer='adam')\r\nmodel.fit(xtrain, traingold, epochs=100, batch_size=1, verbose=2)\r\n###\r\n\r\n\r\n'''\r\n\r\nmodel=Sequential()\r\nmodel.add(Dense(6,input_shape=(3,)))\r\nmodel.add(Dense(6,activation='relu'))\r\nmodel.add(Dense(5,activation='linear'))\r\nmodel.add(Dense(1))\r\n#rmsprop=keras.optimizers.rmsprop(lr=0.1, rho=0.009)\r\n#sgd = keras.optimizers.SGD(lr=0.01)\r\nmodel.compile(loss='mean_squared_error',optimizer='adam')\r\nprint(\"\\nThe Model framework is : \\n\")\r\nmodel.summary()\r\nprint(\"Do you want to show the Training Progress ?\\nPress : 1)Show\\t2)Hide\")\r\nshow=int(input())\r\nif(show==1):\r\n show=2\r\nelif(show==2):\r\n show=0\r\nelse:\r\n print(\"Invalid Input\")\r\nprint(\"\\n\\tThe training has started...\\n\")\r\nmodel.fit(xtrain,traingold,epochs=1000,batch_size=45,verbose=show)\r\nscore = model.evaluate(xtest,testgold,verbose=0)\r\nprint(\"The model metrics is : \",model.metrics_names)\r\npredict=model.predict(xtest)\r\n#accuracy=accuracy_score(xtest,testgold)\r\nprint(\"Do you want to show the Predicted values ?\\nPress : 1)Show\\t2)Hide\")\r\nshow=int(input())\r\nif(show==1):\r\n print(\"\\nThe predicted gold values are :\\n\")\r\n print(predict)\r\nelif(show==2):\r\n print(\"\\nMoving on to Graphical Representation...\\n\")\r\nelse:\r\n print(\"Invalid Input\")\r\nprint(\"The Score is : \",score)\r\n\r\nplt.plot(testgold,c='b',label=\"GoldTest\")\r\nplt.plot(predict,c='r',label=\"Predicted\")\r\n#plt.plot(xtrain,c='g',label=\"Training Data\")\r\n#plt.plot(traingold,c='y',label=\"Gold Training\")\r\nplt.legend(loc='upper left')\r\nplt.show()\r\n\r\n##############################################################\r\n\r\n#print(model.get_weights())\r\n\r\n#serialize model to JSON\r\nprint(\"Do you want to save the training model for later use \\nPress : 1)Save\\t2)Donot Save\")\r\nsave=int(input())\r\nif(save==1):\r\n print(\"Saving the model framework and weights\\n\")\r\n model_json = model.to_json()\r\n with open(\"jsonModel.json\",\"w\") as json_file:\r\n json_file.write(model_json)\r\n model.save_weights(\"h5Model.h5\")\r\n print(\"The model is saved sucessfully\")\r\nelif(save==2):\r\n print(\"The model is dumped\\nExiting\")\r\nelse:\r\n print(\"Invalid Input\")\r\n\r\n\r\n","sub_path":"NeuralModel.py","file_name":"NeuralModel.py","file_ext":"py","file_size_in_byte":4468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"142883547","text":"from tempfile import NamedTemporaryFile\nimport shutil\nimport csv\n\nfilename = 'data/character-predictions_edited_onehot.csv'\ntempfile = NamedTemporaryFile(delete=False)\n\nwith open(filename, 'rb') as csvFile, tempfile:\n reader = csv.reader(csvFile, delimiter=',', quotechar='\"')\n writer = csv.writer(tempfile, delimiter=',', quotechar='\"')\n for row in reader:\n if row[4] != 'culture':\n row[4] = cultures.index(row[4])\n if row[5] != 'house':\n row[5] = houses.index(row[5])\n writer.writerow(row)\n\nshutil.move(tempfile.name, filename)\n","sub_path":"onehot_preprocessing.py","file_name":"onehot_preprocessing.py","file_ext":"py","file_size_in_byte":582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"404556768","text":"#Import libraries\r\nfrom selenium import webdriver\r\nimport pandas as pd\r\nimport numpy as np\r\n\r\n#Initial Setup\r\nchrome_path = r\"C:\\Users\\Kyria\\Desktop\\chromedriver.exe\"\r\ndriver = webdriver.Chrome(chrome_path)\r\nbase_url = \"https://store.steampowered.com/search/?sort_by=Reviews_DESC&filter=topsellers&page=\"\r\nframes = list()\r\npage_number = 8\r\n\r\nfor i in range(1,page_number+1):\r\n url = base_url + str(i)\r\n driver.get(url)\r\n names_content = driver.find_elements_by_class_name(\"title\")\r\n ratings_content = driver.find_elements_by_class_name(\"positive\")\r\n prices_content = driver.find_elements_by_css_selector(\".col.search_price.responsive_secondrow\")\r\n names = list()\r\n for name in names_content:\r\n names.append(name.text)\r\n ratings = list()\r\n for rating in ratings_content:\r\n ratings.append(rating.get_attribute(\"data-tooltip-html\"))\r\n percent_rating = list()\r\n user_number = list()\r\n for rating in ratings:\r\n percent_rating.append(rating.split(\" \", 1)[1][0:3])\r\n user_number.append(rating.split(\" user\", 1)[0].split(\"the \")[1])\r\n prices = list()\r\n for price in prices_content:\r\n prices.append(price.text)\r\n df_new = np.array([names, percent_rating, user_number, prices])\r\n info = pd.DataFrame(data=df_new.transpose(), columns=[\"Game\", \"Rating\", \"Number of Reviews\", \"Price\"])\r\n frames.append(info)\r\n\r\ninfo_df = pd.concat(frames)\r\n\r\ninfo_df.to_csv(\"Steam Info.csv\", index=False)\r\ndriver.close()\r\n","sub_path":"steam_scraping.py","file_name":"steam_scraping.py","file_ext":"py","file_size_in_byte":1481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"165243478","text":"def construct_dict(S):\n char_dict = {} # char_dict - словник символів\n for c in S:\n if c not in char_dict:\n char_dict[c] = S.count(c)\n return char_dict\n\ndef find_most_popular_char(D):\n max_char = '' # найпопулярніший символ\n max_num = 0 # кількість його входжень\n for c, num in D.items():\n if num > max_num:\n max_char = c\n max_num = num\n return max_char, max_num\nS = input(\"Задайте рядок \")\nD = construct_dict(S)\nC, N = find_most_popular_char(D)\nprint(\"символ '%s' входить у рядок %d разів\" % (C, N))","sub_path":"Student4Week/Decorator.py","file_name":"Decorator.py","file_ext":"py","file_size_in_byte":661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"306863091","text":"#!/usr/bin/env python\n\nimport hashlib\nimport re\nimport shutil\n\nfrom pathlib import Path\nfrom pprint import pprint\n\nimport click\nimport jinja2\nimport markdown\nimport yaml\nimport webassets\n\nfrom slugify import slugify\n\nfrom config import read_config\nfrom file_hasher import CachedFileHasher\nfrom mediabuilder import MediaBuilder, group_by_builder\nfrom myextension import LgtExtension\n\n#\n\nTEMPLATE_INDEX = 'index.html'\nTEMPLATE_ARTICLE = 'article.html'\nTEMPLATE_GROUPS_CSS = 'groups.css'\n\nBUILD_INDEX = 'index.html'\nBUILD_CSS_DIR = 'css'\nBUILD_GROUPS_CSS = TEMPLATE_GROUPS_CSS\n\nCONTEXT_FILENAME = 'context.yaml'\nARTICLE_FILENAME = 'article.md'\n\nJINJA_AUTOESCAPE = ['html', 'xml']\n\nRE_DIRNAME = re.compile(r'\\d+ (?PXXX )?(?P.+)')\n\nMARKDOWN_EXTENSIONS = ['meta']\nMARKDOWN_OUTPUT_FORMAT = 'html5'\n\nWEBASSETS_ENV = {\n 'debug': True,\n 'load_path': [\n '.',\n 'static'\n ],\n 'url': '/'\n}\n\nWEBASSETS_CSS_FILES = [\n 'css/app.scss'\n]\nWEBASSETS_CSS = {\n 'filters': 'libsass',\n 'output': 'css/app.css'\n}\n\n#\n\ndef copy_static(src, dst):\n src = str(src)\n dst = str(dst)\n try:\n shutil.rmtree(dst)\n print('Deleted existing folder {}'.format(dst))\n except:\n pass\n\n print('Copying static path {} to {}'.format(src, dst))\n shutil.copytree(src, dst)\n\ndef run_webassets(config):\n env = webassets.Environment(\n directory=config['build_path'],\n **WEBASSETS_ENV\n )\n\n css = webassets.Bundle(\n *WEBASSETS_CSS_FILES,\n **WEBASSETS_CSS\n )\n env.register('css', css)\n\n pprint('Web assets css:')\n pprint(env['css'].urls())\n\ndef dirpath_to_name(path):\n m = RE_DIRNAME.match(path.name)\n if m is None:\n raise RuntimeError('{} is not a valid directory name'.format(path))\n groups = m.groupdict()\n increment_index = groups['do_not_increment_index'] is None\n title = groups['title']\n return increment_index, title\n\ndef dirpath_to_hash(path):\n hasher = hashlib.md5()\n hasher.update(repr(str(path)).encode())\n return hasher.hexdigest()\n\ndef iter_group_articles(path):\n if is_article_dir(path):\n yield path, False\n for p in listdirs(path):\n yield p, True\n\ndef copy_file(src, dst):\n dst.parent.mkdir(exist_ok=True)\n print('Copy file', src, dst)\n shutil.copy2(str(src), str(dst))\n\nclass MediaPublisher(object):\n def __init__(self, media_builder, cache_dir, build_dir, dst_dir, src_dir = Path()):\n self.media_builder = media_builder\n self.cache_dir = cache_dir\n self.build_dir = build_dir\n self.src_dir = src_dir\n self.dst_dir = dst_dir\n\n def __call__(self, preset_name, path):\n path = self.src_dir / path\n for media in self.media_builder(preset_name, self.cache_dir, path):\n cache_path = media['dst']\n media['dst'] = self.dst_dir / cache_path.name\n copy_file(cache_path, media['dst'])\n media['url'] = Path('/') / media['dst'].relative_to(self.build_dir)\n yield media\n\n\ndef build_article_builder(config, context):\n config['media_cache_path'].mkdir(parents=True, exist_ok=True)\n\n build_path = config['build_path']\n\n hash_file = CachedFileHasher(config['media_cache_path'] / config['hash_cache_filename'])\n media_builder = MediaBuilder(\n context['media']['presets'],\n hash_file\n )\n request_media = MediaPublisher(\n media_builder,\n config['media_cache_path'],\n config['build_path'],\n config['build_path'] / 'media'\n )\n MD = markdown.Markdown(\n extensions=MARKDOWN_EXTENSIONS + [LgtExtension(request_media=request_media)],\n output_format=MARKDOWN_OUTPUT_FORMAT\n )\n\n article_index = 0\n\n def build_article(src_dir, is_subarticle):\n src_markdown_path = src_dir / ARTICLE_FILENAME\n increment_index, title = dirpath_to_name(src_dir)\n increment_index &= is_subarticle\n nonlocal article_index\n article_index += 1 if increment_index else 0\n slug = slugify(title)\n dst_dir = build_path / slug\n dst_html_path = dst_dir / BUILD_INDEX\n\n MD.reset()\n request_media.src_dir = src_dir\n\n print('Reading markdown {}'.format(src_markdown_path))\n html = MD.convert(src_markdown_path.read_text())\n status = MD.Meta.get('status', [None])[0]\n status_tag = ' {}'.format(status) if status else ''\n title += status_tag\n\n backgrounds = None\n background_src = src_dir / 'background.jpg'\n if background_src.exists():\n medias = request_media('cardbackground', background_src)\n backgrounds = group_by_builder(medias)\n\n return {\n 'src_dir': src_dir,\n 'dst_dir': dst_dir,\n 'index': article_index if increment_index and is_subarticle else None,\n 'is_subarticle': is_subarticle,\n 'title': title,\n 'slug': slug,\n 'src_markdown_path': src_markdown_path,\n 'dst_html_path': dst_html_path,\n 'href': '/{}'.format(slug),\n 'html': html,\n 'location': MD.Meta.get('location', [None])[0],\n 'backgrounds': backgrounds\n }\n return build_article\n\ndef is_article_dir(path):\n return (path / ARTICLE_FILENAME).is_file()\n\ndef listdirs(path):\n return [x for x in sorted(path.iterdir()) if x.is_dir()]\n\ndef load_svgs(config):\n dir = config['static_path'] / 'img'\n return {\n 'svgs': dict([(p.stem, p.read_text()) for p in dir.glob('*.svg')])\n }\n\ndef jinja_render_context(config):\n data_path = config['data_path']\n build_path = config['build_path']\n\n with (data_path / CONTEXT_FILENAME).open() as fp:\n context = yaml.load(fp)\n\n context['index_html_path'] = build_path / BUILD_INDEX\n\n context['groups_css_path'] = build_path / BUILD_CSS_DIR / BUILD_GROUPS_CSS\n\n context['header_image_src'] = data_path / 'header.svg'\n context['header_image_dst'] = build_path / 'media' / 'header.svg'\n context['header_image'] = '/media/header.svg'\n\n build_article = build_article_builder(config, context)\n\n context['articles'] = []\n context['article_groups'] = []\n for group_dir in listdirs(data_path):\n group = {\n 'name': None if is_article_dir(group_dir) else dirpath_to_name(group_dir)[1],\n 'class': 'group-{}'.format(dirpath_to_hash(group_dir))\n }\n\n group_context_yaml = group_dir / CONTEXT_FILENAME\n if group_context_yaml.exists():\n group.update(read_config(group_context_yaml))\n articles = list([build_article(*article) for article in iter_group_articles(group_dir)])\n context['article_groups'].append((group, articles))\n for article in articles:\n context['articles'].append((group, article))\n\n return context\n\ndef iter_with_prev_and_next(it):\n it = list(it)\n size = len(it)\n for i in range(size):\n prev_i = i - 1\n next_i = i + 1\n prev = it[prev_i] if prev_i >= 0 else None\n next = it[next_i] if next_i < size else None\n cur = it[i]\n\n yield prev, cur, next\n\n#\n\n@click.command()\n@click.argument('config_path', type=click.Path(exists=True))\ndef build(config_path):\n # Convert strings to Path\n\n config = read_config(Path(config_path))\n\n # Copy static files\n\n copy_static(config['static_path'], config['build_path'])\n\n ctxt = {}\n\n # Load SVG\n\n ctxt.update(load_svgs(config))\n\n # Render templates\n\n ctxt.update(jinja_render_context(config))\n\n # Copy sitedata\n\n shutil.copy2(ctxt['header_image_src'], ctxt['header_image_dst'])\n\n # Environment\n\n env = jinja2.Environment(\n loader=jinja2.FileSystemLoader(str(config['template_path'])),\n autoescape=jinja2.select_autoescape(JINJA_AUTOESCAPE)\n )\n\n # Template\n\n with ctxt['index_html_path'].open('w') as fp:\n env.get_template(TEMPLATE_INDEX).stream(ctxt).dump(fp)\n\n for prev, cur, next in iter_with_prev_and_next(ctxt['articles']):\n prev_group, prev_article = prev if prev else (None, None)\n cur_group, cur_article = cur\n next_group, next_article = next if next else (None, None)\n\n ctxt['group'] = cur_group\n ctxt['prev_article'] = prev_article\n ctxt['article'] = cur_article\n ctxt['next_article'] = next_article\n\n cur_article['dst_dir'].mkdir(exist_ok=True)\n\n with cur_article['dst_html_path'].open('w') as fp:\n env.get_template(TEMPLATE_ARTICLE).stream(ctxt).dump(fp)\n\n with ctxt['groups_css_path'].open('w') as fp:\n env.get_template(TEMPLATE_GROUPS_CSS).stream(ctxt).dump(fp)\n\n run_webassets(config)\n\nif __name__ == '__main__':\n build()\n","sub_path":"scripts/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":8743,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"568951203","text":"#!/usr/bin/python3\n\n# Copyright (c) 2017-2021 Dell Inc. or its subsidiaries.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport sys\nimport threading\n\n\nclass ThreadWithExHandling(threading.Thread):\n def __init__(self, logger, object_identity=None, group=None, target=None,\n name=None, args=(), kwargs=None, verbose=None):\n threading.Thread.__init__(self, group=group, target=target, name=name,\n args=args, kwargs=kwargs)\n self.logger = logger\n self.ex = None\n self.object_identity = object_identity\n\n def run(self):\n try:\n threading.Thread.run(self)\n except:\n self.ex = sys.exc_info()[0]\n self.logger.exception(str(self.ex))\n","sub_path":"src/common/thread_helper.py","file_name":"thread_helper.py","file_ext":"py","file_size_in_byte":1257,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"534720861","text":"from lib import process_github_data\nfrom lib import get_github_session\nfrom lib.dates import get_start_date\n\nsize_labels = {'QA/XS': 0.5, 'QA/S': 1, 'QA/M': 3, 'QA/L': 5, 'QA/XL': 10}\n\n\ndef get_filtered_repos(repos):\n reps = process_github_data()\n return [r for r in reps if r.full_name in repos]\n\n\ndef get_users_by_ids(users_ids):\n users = []\n for userx in users_ids:\n users.append(get_github_session().get_user(userx))\n return users\n\n\ndef get_all_users_issues(repos, users, milestone):\n all_issues = []\n working_label = '[zube]: QA Working'\n done_label = '[zube]: Done'\n for repo in repos:\n for user in users:\n o_milestone = None\n milestones = repo.get_milestones(state='open')\n o_milestone = [m for m in milestones if m.title == milestone[0]]\n if repo.name != 'rke':\n if repo.name == 'dashboard':\n working_label = '[zube]: To Test'\n all_issues.extend(repo.get_issues(assignee=user,\n state='open',\n milestone=o_milestone[0],\n labels=[working_label],\n sort='updated')\n )\n all_issues.extend(repo.get_issues(assignee=user,\n state='closed',\n milestone=o_milestone[0],\n labels=[done_label]\n )\n )\n # set the working label back to QA Working if repo was dashboard\n if repo.name == 'dashboard':\n working_label = '[zube]: QA Working'\n else:\n all_issues.extend(repo.get_issues(assignee=user,\n state='open',\n labels=[working_label],\n sort='updated')\n )\n all_issues.extend(repo.get_issues(assignee=user,\n state='closed',\n labels=[done_label]\n )\n )\n\n return all_issues\n\n\ndef create_data_for_spreadsheet(issues, users):\n worksheet_data = []\n for issue in issues:\n for user in users:\n if user in issue.assignees:\n size_label = []\n if len(issue.labels) > 0:\n size_label = [label.name for label in issue.labels if label.name in size_labels.keys()]\n dupes = [wd[2] for wd in worksheet_data\n if f'{issue.number} {issue.title}' in wd[2] and user.name in wd[0]]\n if len(dupes) > 0:\n continue\n if issue.state == 'closed':\n diff = get_start_date() - issue.closed_at\n if diff and diff.days > 0:\n continue\n test_event_date = None\n for e in issue.get_events().reversed:\n if issue.repository.name == 'dashboard':\n if e.event == 'labeled' and e.label.name == '[zube]: To Test':\n test_event_date = e.created_at\n break\n else:\n if e.event == 'labeled' and e.label.name == '[zube]: QA Working':\n test_event_date = e.created_at\n break\n worksheet_data.append([\n user.name,\n issue.repository.name,\n f'{issue.number} {issue.title}',\n 'Closed' if issue.state == 'closed' else 'Working',\n '' if len(size_label) == 0 else size_label[0],\n issue.html_url,\n test_event_date if issue.state != 'closed' else issue.closed_at\n ])\n return worksheet_data\n","sub_path":"lib/gh.py","file_name":"gh.py","file_ext":"py","file_size_in_byte":4279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"8018193","text":"#######################################\n# CONSTANTS\n#######################################\nimport string\n\nDIGITS = '0123456789'\nLETTERS = string.ascii_letters\nLETTERS_DIGITS = LETTERS + DIGITS\nWEIRD_LETTERS = LETTERS + DIGITS + '.' + '_' + ' ' + '\"' + \":\" + \";\"\n\n\n#######################################\n# ERRORS\n#######################################\n\nclass Error:\n def __init__(self, pos_start, pos_end, error_name, details):\n self.pos_start = pos_start\n self.pos_end = pos_end\n self.error_name = error_name\n self.details = details\n \n def as_string(self):\n result = f'{self.error_name}: {self.details}\\n'\n result += f'File {self.pos_start.fn}, line {self.pos_start.ln + 1}'\n return result\n\nclass IllegalCharError(Error):\n def __init__(self, pos_start, pos_end, details):\n super().__init__(pos_start, pos_end, 'Illegal Character', details)\n\nclass ExpectedCharError(Error):\n def __init__(self, pos_start, pos_end, details):\n super().__init__(pos_start, pos_end, 'Expected Character', details)\n\nclass InvalidSyntaxError(Error):\n def __init__(self, pos_start, pos_end, details):\n super().__init__(pos_start, pos_end, \"Invalid Syntax\", details)\n\n#######################################\n# POSITION\n#######################################\n\nclass Position:\n\tdef __init__(self, idx, ln, col, fn, ftxt):\n\t\tself.idx = idx\n\t\tself.ln = ln\n\t\tself.col = col\n\t\tself.fn = fn\n\t\tself.ftxt = ftxt\n\n\tdef advance(self, current_char=None):\n\t\tself.idx += 1\n\t\tself.col += 1\n\n\t\tif current_char == '\\n':\n\t\t\tself.ln += 1\n\t\t\tself.col = 0\n\n\t\treturn self\n\n\tdef copy(self):\n\t\treturn Position(self.idx, self.ln, self.col, self.fn, self.ftxt)\n\n#######################################\n# TOKENS\n#######################################\n\nTT_INT\t\t\t= 'INT'\nTT_FLOAT \t= 'FLOAT'\nTT_IDENTIFIER\t= 'IDENTIFIER'\nTT_KEYWORD\t\t= 'KEYWORD'\nTT_PLUS \t= 'PLUS'\nTT_MINUS \t= 'MINUS'\nTT_MUL \t= 'MUL'\nTT_DIV \t= 'DIV'\nTT_POW\t\t\t= 'POW'\nTT_EQ\t\t\t= 'EQ'\nTT_LPAREN \t= 'LPAREN'\nTT_RPAREN \t= 'RPAREN'\nTT_EE\t\t\t= 'EE' #\nTT_NE\t\t = 'NE' #\nTT_LT\t\t = 'LT' #\nTT_GT\t\t\t= 'GT' #\nTT_LTE\t\t\t= 'LTE' #\nTT_GTE\t\t\t= 'GTE' #\nTT_EOF\t\t\t= 'EOF'\nTT_NEWLINE = 'NEWLINE'\nTT_OPENBRACKET = 'OPENBRACKET'\nTT_CLOSEBRACKET = 'CLOSEBRACKET'\nTT_QUOTE = 'QUOTE'\nTT_LARRAY = 'LARRAY'\nTT_RARRAY = 'RARRAY'\nTT_COMMA = 'COMMA'\n\nKEYWORDS = [\n 'double',\n 'String',\n 'System.out.print',\n 'System.out.println',\n 'args',\n 'main',\n\n 'abstract',\n 'continue',\n 'for',\n 'new',\n 'switch',\n 'assert',\n 'default',\n 'goto',\n 'package',\n 'synchronized',\n 'boolean',\n 'do',\n 'if',\n 'private',\n 'this',\n 'break',\n 'double',\n 'implements',\n 'protected',\n 'throw',\n 'byte',\n 'else',\n 'import',\n 'public',\n 'throws',\n 'case',\n 'enum',\n 'instanceof',\n 'return',\n 'transient',\n 'catch',\n 'extends',\n 'int',\n 'short',\n 'try',\n 'char',\n 'final',\n 'interface',\n 'static',\n 'void',\n 'class',\n 'finally',\n 'long',\n 'strictfp',\n 'volatile',\n 'const',\n 'float',\n 'native',\n 'super',\n 'while'\n]\n\nclass Token:\n def __init__(self, type_, value=None, pos_start=None, pos_end=None):\n self.type = type_\n self.value = value\n\n if pos_start:\n self.pos_start = pos_start.copy()\n self.pos_end = pos_start.copy()\n self.pos_end.advance()\n\n if pos_end:\n self.pos_end = pos_end.copy()\n\n def matches(self, type_, value):\n return self.type == type_ and self.value == value\n\n def __repr__(self):\n if self.value: return f'{self.type}:{self.value}\\n'\n return f'{self.type}\\n'\n\n#######################################\n# LEXER\n#######################################\n\nclass Lexer:\n def __init__(self, fn, text):\n self.fn = fn\n self.text = text\n self.pos = Position(-1, 0, -1, fn, text)\n self.current_char = None\n self.advance()\n\n def advance(self):\n self.pos.advance(self.current_char)\n self.current_char = self.text[self.pos.idx] if self.pos.idx < len(self.text) else None\n\n def make_tokens(self):\n tokens = []\n\n while self.current_char != None:\n if self.current_char in ' \\t':\n self.advance()\n elif self.current_char == ';':\n tokens.append(Token(TT_NEWLINE, pos_start=self.pos))\n self.advance()\n elif self.current_char == '\\n':\n self.advance()\n elif self.current_char == '\"':\n tokens.append(self.make_print_statement())\n self.advance()\n elif self.current_char == ',':\n tokens.append(Token(TT_COMMA, pos_start=self.pos))\n self.advance()\n elif self.current_char in DIGITS:\n tokens.append(self.make_number())\n elif self.current_char in LETTERS:\n tokens.append(self.make_identifier())\n elif self.current_char == '+':\n tokens.append(Token(TT_PLUS, pos_start=self.pos))\n self.advance()\n elif self.current_char == '-':\n tokens.append(Token(TT_MINUS, pos_start=self.pos))\n self.advance()\n elif self.current_char == '*':\n tokens.append(Token(TT_MUL, pos_start=self.pos))\n self.advance()\n elif self.current_char == '/':\n tokens.append(Token(TT_DIV, pos_start=self.pos))\n self.advance()\n elif self.current_char == '^':\n tokens.append(Token(TT_POW, pos_start=self.pos))\n self.advance()\n elif self.current_char == '(':\n tokens.append(Token(TT_LPAREN, pos_start=self.pos))\n self.advance()\n elif self.current_char == ')':\n tokens.append(Token(TT_RPAREN, pos_start=self.pos))\n self.advance()\n elif self.current_char == '!':\n token, error = self.make_not_equals()\n if error: return [], error\n tokens.append(token)\n elif self.current_char == '=':\n tokens.append(self.make_equals())\n elif self.current_char == '<':\n tokens.append(self.make_less_than())\n elif self.current_char == '>':\n tokens.append(self.make_greater_than())\n elif self.current_char == '{':\n tokens.append(Token(TT_OPENBRACKET, pos_start=self.pos))\n self.advance()\n elif self.current_char == '}':\n tokens.append(Token(TT_CLOSEBRACKET, pos_start=self.pos))\n self.advance()\n elif self.current_char == '[':\n tokens.append(Token(TT_LARRAY, pos_start=self.pos))\n self.advance()\n elif self.current_char == ']':\n tokens.append(Token(TT_RARRAY, pos_start=self.pos))\n self.advance()\n else:\n pos_start = self.pos.copy()\n char = self.current_char\n self.advance()\n return [], IllegalCharError(pos_start, self.pos, \"'\" + char + \"'\")\n\n tokens.append(Token(TT_EOF, pos_start=self.pos))\n return tokens, None\n\n def make_number(self):\n num_str = ''\n dot_count = 0\n pos_start = self.pos.copy()\n\n while self.current_char != None and self.current_char in DIGITS + '.':\n if self.current_char == '.':\n if dot_count == 1: break\n dot_count += 1\n num_str += self.current_char\n self.advance()\n\n if dot_count == 0:\n return Token(TT_INT, int(num_str), pos_start, self.pos)\n else:\n return Token(TT_FLOAT, float(num_str), pos_start, self.pos)\n\n def make_identifier(self):\n id_str = ''\n pos_start = self.pos.copy()\n\n while self.current_char != None and self.current_char in LETTERS_DIGITS + '.' + '_':\n id_str += self.current_char\n self.advance()\n\n tok_type = TT_KEYWORD if id_str in KEYWORDS else TT_IDENTIFIER\n return Token(tok_type, id_str, pos_start, self.pos)\n\n def make_not_equals(self):\n pos_start = self.pos.copy()\n self.advance()\n\n if self.current_char == '=':\n self.advance()\n return Token(TT_NE, pos_start=pos_start, pos_end=self.pos), None\n\n self.advance()\n return None, ExpectedCharError(pos_start, self.pos, \"'=' (after '!')\")\n\n def make_equals(self):\n tok_type = TT_EQ\n pos_start = self.pos.copy()\n self.advance()\n\n if self.current_char == '=':\n self.advance()\n tok_type = TT_EE\n\n return Token(tok_type, pos_start=pos_start, pos_end=self.pos)\n\n def make_less_than(self):\n tok_type = TT_LT\n pos_start = self.pos.copy()\n self.advance()\n\n if self.current_char == '=':\n self.advance()\n tok_type = TT_LTE\n\n return Token(tok_type, pos_start=pos_start, pos_end=self.pos)\n\n def make_greater_than(self):\n tok_type = TT_GT\n pos_start = self.pos.copy()\n self.advance()\n\n if self.current_char == '=':\n self.advance()\n tok_type = TT_GTE\n\n return Token(tok_type, pos_start=pos_start, pos_end=self.pos)\n\n def make_print_statement(self):\n id_str = ''\n pos_start = self.pos.copy()\n quoteCount = 0\n\n while self.current_char != None and self.current_char in WEIRD_LETTERS:\n id_str += self.current_char\n if self.current_char == '\"':\n quoteCount += 1\n if quoteCount == 2:\n break\n self.advance()\n\n tok_type = TT_KEYWORD if id_str in KEYWORDS else TT_IDENTIFIER\n return Token(tok_type, id_str, pos_start, self.pos)\n\n\n#######################################\n# RUN\n#######################################\n\ndef run(fn, text):\n lexer = Lexer(fn, text)\n tokens, error = lexer.make_tokens()\n\n return tokens, error","sub_path":"src/scanner.py","file_name":"scanner.py","file_ext":"py","file_size_in_byte":10226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"290753925","text":"\"\"\"\nMake lots of files all identified in hex, but each one has a single non-hex letter in it.\nWhen you \"ls -S1 *X*\" for each letter, the file names create a picture.\n(-S will sort descending. Each file should be smaller than the one before it.)\n(Which means that, after enough lines of image, the files will have negative size. Have fun.)\n\n1A5FCCEA0561FB2XEBEA46E38F16BB79AD383F6D\n2.....B40434C024711XB6F5AC9685D3926F37BB\n2.71DC7AC79X576717E4EA33D85D7A678F5745F3\n5....C5B72C2A6E210DE046BF4FEE21A3E9C2B71\n5.2E65851DB030XD9563372F79BB26F9671613E3\n5.....852D45X421BE04D231ADE46293D7B51EE1\n7CF6952DB750D5CX887B0EDE5BFEDA7F3EA5CEAF\n82E8D45A447D91FA7F6A87F937B2D66F1ECCFD01\n8D648C280961C1DEAB7029F6F56D1CE5F016DC8B\n9E15398B81BC3C31EDE7A2C4AF7A6E6A3099D5D0\nAE23FFCA718060B9340D76D3338E79E299601216\nBBBF5B0B9AD1236B99EDDF9AE27937A92940DF4B\nC2D3BD4BE8733A1220DA38D03D5C9CC17725911B\nC3E84E1D3FF6897DF15539A2CFEF988979CCB613\nC7BA9561A86E906781FAE676C34E08ACB601B0B3\nEA4262EF5B6BBCB881B3F258F14098AFF294DB61\nEE46DAF2CEFAEDD4D65B74717C2FE3FFA83DBA7C\nF728806BE2D3935A96D4B02F235A1C395F1E0ABC\nF744EBA426017EF8F669D38303D536D6D99CA428\nFA17F48B15DD17D6011AF3B64D3A28E353C89E3E\n\n\"\"\"\nimport base64\nimport os\nimport random\n\nimg1 = \"\"\"\n. . . . . ..... . . ..... .... . ..... .\n. . . . . . . . . . . . . . . .\n. .. . . . . .. . . . . . . . . .\n. . . . . . .... . . . . . . ..... . .....\n. . .. . . . . .. . . . . . . . .\n. . . . . . . . . . . . . . . .\n. . . . ..... . . . .... . . . . .\n\"\"\".split(\"\\n\")\nimg2 = \"\"\"\n ... ..... . . ..... .... . ..... ..... ..... . . .....\n. . . . . . . . . . . . . . . .\n. . .. . . . . . . . . . . . .\n. .... . . . .... .... ..... . .... .... . . ....\n. .. . . .. . . . . . . . . . . .\n. . . . . . . . . . . . . . . .\n .... ..... . . ..... . . . . . ..... . . ..... .....\n\"\"\".split(\"\\n\")\nimg3 = \"\"\"\n.... . . . . .... . . . ... .....\n. . . . . . . . . . . . . . .\n. . . . . . . . . . . . . .\n.... . . . . . . ..... . . ... .\n. . . . . . . . . . . . . .\n. . . . . . . . . . . . . . .\n.... ... . ..... .... . . ..... . ... .\n\"\"\".split(\"\\n\")\nimg4 = \"\"\"\n.... ..... . . . . ..... .... . . ... ...\n. . . . . . . . . . . . . . . .\n. . . . . . . . . . .. .. . .\n. . .... . . . . .... .... . . . ... .\n. . . . . . . . . . . . . . ..\n. . . . . . . . . . . . . . . .\n.... ..... ..... . . ..... . . . . ... ...\n\"\"\".split(\"\\n\")\n\ndef generate_filenames(img, key, width=0):\n\t\"\"\"\n\tGenerate a list of file names corresponding to the lines of image,\n\twith each one containing the given key.\n\t\n\twidth will be calculated if not given or too small.\n\t\"\"\"\n\tneedwidth = max(len(l) for l in img)\n\tif width < needwidth + 2:\n\t\twidth = max(needwidth, 35) + 5\n\t\tif width % 2: width += 1 # Use an even number of hex digits to make them look like bytes\n\tfilenames = []\n\tgutter = random.randrange(1, width - needwidth - 1) # Digits (characters) of left gutter. The rest is right gutter.\n\tfor line in img:\n\t\tbase = random.randbytes(width // 2).hex().upper()\n\t\tfn = list(base)\n\t\tspares = width\n\t\tfor pos, chr in enumerate(line, gutter):\n\t\t\tif chr != ' ': spares -= 1; fn[pos] = chr\n\t\t# Insert the key on some current alphanumeric character (might be in the gutter)\n\t\tkeypos = random.randrange(spares) + 1\n\t\tfor pos, chr in enumerate(fn):\n\t\t\tif chr not in \"0123456789ABCDEF\": continue\n\t\t\tkeypos -= 1\n\t\t\tif not keypos: break\n\t\tfn[pos] = key\n\t\tfilenames.append(\"\".join(fn))\n\tif len(set(filenames)) < len(filenames):\n\t\t# Oops, got a collision. Try again, with slightly longer file names\n\t\t# to reduce the chance of recollision. Note that collisions *across*\n\t\t# groups can't happen if they have unique keys, but just in case, the\n\t\t# file writing would bomb if it ran into that problem.\n\t\treturn generate_filenames(img, key, width + 2)\n\treturn filenames\n\ndef generate_files(filenames, pat):\n\tn = len(filenames)\n\tif not n: return\n\tsizes = random.sample(range(n, n * 100), n)\n\tsizes.sort(reverse=True)\n\tfor fn, size in zip(filenames, sizes):\n\t\tprint(f\"%{len(str(len(filenames)))+3}d %s\" % (size, fn))\n\t\twith open(pat % fn, \"xb\") as f:\n\t\t\t# Generate enough random data to get 'size' bytes of\n\t\t\t# base 64. We have to get *exactly* that many, in case\n\t\t\t# two files need to differ by only one byte (b64 can't\n\t\t\t# generate certain byte sizes).\n\t\t\tdata = base64.b64encode(random.randbytes(4 * (size // 3) + 1))\n\t\t\tdata = data.strip(b\"=\")[:size]\n\t\t\tf.write(data)\n\nwidth = max(len(l) for l in img1 + img2 + img3 + img4) + 5\nwidth += width % 2\nos.makedirs(\"mess\", exist_ok=True)\nfor fn in os.listdir(\"mess\"):\n\tif fn.endswith(\".pub\"): os.unlink(\"mess/\" + fn)\ngenerate_files(generate_filenames(img1, key=\"W\", width=width), \"mess/%s.pub\")\ngenerate_files(generate_filenames(img2, key=\"X\", width=width), \"mess/%s.pub\")\ngenerate_files(generate_filenames(img3, key=\"Y\", width=width), \"mess/%s.pub\")\ngenerate_files(generate_filenames(img4, key=\"Z\", width=width), \"mess/%s.pub\")\n","sub_path":"messyglob.py","file_name":"messyglob.py","file_ext":"py","file_size_in_byte":5774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"460654580","text":"import os\nimport sys\n\nimport pandas as pd\nfrom openpyxl import load_workbook\n\nimport CubeConstants\n\nif len(sys.argv) == 2:\n dir_name = sys.argv[1]\nelse:\n print(\"Invalid argument count\")\n exit(0)\n\nfiles = sorted(os.listdir(dir_name))\nwriter = pd.ExcelWriter(dir_name + \"/\" + CubeConstants.excel_file_name, engine='xlsxwriter')\nsheet_names = []\nfor file in files:\n sheet_names.append(file)\n with open(dir_name + \"/\" + file, 'r') as f:\n data_dict = {}\n lines = f.read().split(\"\\n\")\n for line in lines:\n if len(line) == 0:\n break\n current_array = line.split()\n key = current_array[0]\n value = current_array[1:]\n data_dict.update({key: value})\n\n df = pd.DataFrame(data_dict)\n df.to_excel(writer, sheet_name=str(file), index=False)\n f.close()\nwriter.save()\n\naverages_to_write = []\ndf = pd.read_excel(dir_name + \"/\" + CubeConstants.excel_file_name, sheet_name=None)\nfor sheet in sheet_names:\n data = df[sheet]\n averages = ['average']\n for i in range(data.shape[0]):\n sum_of_retries = 0\n for j in range(1, len(data.columns)):\n sum_of_retries += int(data.loc[i][j])\n averages.append(sum_of_retries / (len(data.columns) - 1))\n\n averages_to_write.append(averages)\n\n\nwriter = pd.ExcelWriter(dir_name + \"/\" + CubeConstants.excel_file_name, engine='openpyxl')\nfor index, averages in enumerate(averages_to_write):\n data_dict = {}\n key = averages[0]\n value = averages[1:]\n data_dict.update({key: value})\n\n writer.book = load_workbook(dir_name + \"/\" + CubeConstants.excel_file_name)\n writer.sheets = dict((ws.title, ws) for ws in writer.book.worksheets)\n reader = pd.read_excel(dir_name + \"/\" + CubeConstants.excel_file_name)\n\n df = pd.DataFrame(data_dict)\n df.to_excel(writer, sheet_name=sheet_names[index], startcol=6, index=False)\n writer.close()\nwriter.save()\n","sub_path":"CreateExcelSheet.py","file_name":"CreateExcelSheet.py","file_ext":"py","file_size_in_byte":1944,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"305999813","text":"# Copyright 2014 Rackspace, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport moecache\n\nimport random\nimport unittest\n\nimport helpers\n\n\n# test moecache client-side sharding\nclass TestSharding(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n cls.servers = list(map(helpers.start_new_memcached_server,\n random.sample(range(11213, 11314), 4)))\n\n @classmethod\n def tearDownClass(cls):\n for memcached, _ in cls.servers:\n try:\n memcached.terminate()\n except:\n print('for some reason memcached not running')\n else:\n memcached.wait()\n\n def setUp(self):\n self.client = moecache.Client([('127.0.0.1', port)\n for _, port in self.servers])\n\n def tearDown(self):\n self.client.close()\n\n def test_random(self):\n pairs = [(helpers.random_key(), str(n)) for n in range(100)]\n visible_pairs = dict(pairs).items()\n\n for k, v in pairs:\n self.client.set(k, v, exptime=60)\n\n for k, v in visible_pairs:\n cached_v = self.client.get(k)\n self.assertEqual(cached_v, v)\n\n for k, _ in visible_pairs:\n self.client.delete(k)\n\n for k, _ in visible_pairs:\n self.assertIsNone(self.client.get(k))\n\n def test_stats(self):\n stats = self.client.stats('items')\n self.assertEqual(stats, [{}] * 4)\n","sub_path":"tests/test_sharding.py","file_name":"test_sharding.py","file_ext":"py","file_size_in_byte":1983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"631033898","text":"def function(l):\n for i in range(len(l)-1):\n if l[i] > l[i+1]:\n return False\n return True\n\n\ndef findposition(l):\n for i in range(len(l)):\n if function(l[:i+1]) and function(l[i+1:]) and min(l[:i+1]) >= max(l[i+1:]):\n return i\n return -1\n\n\nn = int(input())\ns = input().split(' ')\nl = []\nfor i in range(len(s)):\n l.append(int(s[i]))\n\nif function(l):\n print(0)\nelif findposition(l) == -1:\n print(-1)\nelse:\n print(len(l)-findposition(l)-1)","sub_path":"Code/CodeRecords/2836/60691/293863.py","file_name":"293863.py","file_ext":"py","file_size_in_byte":495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"539261225","text":"\n\nfrom xai.brain.wordbase.nouns._catechism import _CATECHISM\n\n#calss header\nclass _CATECHISMS(_CATECHISM, ):\n\tdef __init__(self,): \n\t\t_CATECHISM.__init__(self)\n\t\tself.name = \"CATECHISMS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"catechism\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_catechisms.py","file_name":"_catechisms.py","file_ext":"py","file_size_in_byte":259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"216023581","text":"# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport collections\nimport logging\nimport numpy as np\nfrom .... import core\nfrom ....framework import Program, Operator, Variable, program_guard\nfrom .... import unique_name\nfrom ....layer_helper import LayerHelper\nfrom ....param_attr import ParamAttr\nfrom ....initializer import Constant\nfrom ....log_helper import get_logger\n\n_logger = get_logger(\n __name__, logging.INFO, fmt='%(asctime)s-%(levelname)s: %(message)s')\n\n\nclass QuantizeTranspilerV2(object):\n def __init__(self,\n weight_bits=8,\n activation_bits=8,\n weight_quantize_type='abs_max',\n activation_quantize_type='abs_max',\n quantizable_op_type=['conv2d', 'depthwise_conv2d', 'mul'],\n skip_pattern=['skip_quant']):\n \"\"\"\n Add quant_dequant op before the quantized op to quantize the fluid Program.\n It is a patch for distributed quantization, we will support others module for\n distributed quantization.\n\n Args:\n weight_bits(int): the bit of quantized weight.\n activation_bits(int): the bit of quantized activation.\n weight_quantize_type(str): the quantization type for weight.\n Only support to be 'abs_max' for now.\n activation_quantize_type(str): the quantization type for activation.\n Only support to be 'abs_max' for now.\n quantizable_op_type(str): set the op type for quantization.\n skip_pattern(str|list): The user-defined quantization skip pattern, which\n will be presented in the name scope of an op. When the skip pattern is\n detected in an op's name scope, the corresponding op will not be quantized.\n \"\"\"\n self._weight_bits = weight_bits\n self._activation_bits = activation_bits\n\n assert activation_quantize_type == \"abs_max\", \\\n \"activation_quantize_type should be abs_max for now.\"\n assert weight_quantize_type == \"abs_max\", \\\n \"weight_quantize_type should be abs_max for now.\"\n self._activation_quantize_type = activation_quantize_type\n self._weight_quantize_type = weight_quantize_type\n\n self._quantizable_ops = quantizable_op_type\n self._quantizable_grad_ops = [\n '%s_grad' % (op) for op in self._quantizable_ops\n ]\n\n self._skip_pattern = skip_pattern\n self.helper = LayerHelper(self.__class__.__name__)\n\n def apply(self, program, startup_program):\n \"\"\"\n Apply quantization to fluid Program.\n\n Args:\n program(Program): the train or test program to be quantized.\n startup_program(Program): the corresponding startup_program.\n Returns:\n None\n \"\"\"\n assert isinstance(program, Program), \\\n \"program must be the instance of Program\"\n assert isinstance(startup_program, Program), \\\n \"startup_program must be the instance of Program\"\n\n quant_dequant_vars = [\n collections.OrderedDict() for _ in range(len(program.blocks))\n ]\n with program_guard(program, startup_program):\n for block in program.blocks:\n ops = list(block.ops)\n for op in ops:\n if op.type in self._quantizable_ops and \\\n (not self._is_skip_quant(op)):\n self._transform_forward(block, op, quant_dequant_vars)\n for block in program.blocks:\n ops = list(block.ops)\n for op in ops:\n if op.type in self._quantizable_grad_ops and \\\n (not self._is_skip_quant(op)):\n self._transform_backward(block, op, quant_dequant_vars)\n\n def _is_skip_quant(self, op):\n \"\"\"\n Analyse whether the op should skip quantization or not.\n \"\"\"\n user_skipped = False\n if isinstance(self._skip_pattern, list):\n user_skipped = op.has_attr(\"op_namescope\") and \\\n any(pattern in op.attr(\"op_namescope\") \\\n for pattern in self._skip_pattern)\n elif isinstance(self._skip_pattern, str):\n user_skipped = op.has_attr(\"op_namescope\") and \\\n op.attr(\"op_namescope\").find(\n self._skip_pattern) != -1\n return user_skipped\n\n def _transform_forward(self, block, op, quant_dequant_vars):\n op._set_attr(\"quantization_type\", \"qat_with_weight\")\n idx = block.ops.index(op)\n block_id = block.idx\n for in_name in op.input_arg_names:\n if in_name in quant_dequant_vars[block_id]:\n quant_dequant_var = quant_dequant_vars[block_id][in_name]\n else:\n in_var = block.var(in_name)\n quant_bits = self._weight_bits if in_var.persistable \\\n else self._activation_bits\n quant_type = self._weight_quantize_type if in_var.persistable \\\n else self._activation_quantize_type\n if quant_type == \"abs_max\":\n quant_dequant_var = self._insert_quant_dequant_abs_max_op(\n block, idx, in_var, quant_bits)\n else:\n _logger.error(\"Quant_type only supported to be abs_max\")\n quant_dequant_vars[block_id][in_name] = quant_dequant_var\n op._rename_input(in_name, quant_dequant_var.name)\n\n def _transform_backward(self, block, op, quant_dequant_vars):\n block_id = block.idx\n no_dequanted_input_vars = True\n for name in op.input_arg_names:\n if name in quant_dequant_vars[block_id]:\n dequant_var = quant_dequant_vars[block_id][name]\n op._rename_input(name, dequant_var.name)\n no_dequanted_input_vars = False\n if no_dequanted_input_vars:\n raise ValueError(\"There is no dequanted inputs for op %s.\" %\n (op.type))\n\n def _insert_quant_dequant_abs_max_op(self, block, idx, in_var, quant_bits):\n quant_dequant_var = block.create_var(\n type=in_var.type,\n name=\"{}.quant_dequant\".format(in_var.name),\n shape=in_var.shape,\n dtype=in_var.dtype)\n scale_var = self.helper.create_parameter(\n attr=ParamAttr(\n name=\"{}.quant_dequant.scale\".format(in_var.name),\n initializer=Constant(0.001),\n trainable=False),\n shape=[1],\n dtype=in_var.dtype)\n scale_var.stop_gradient = True\n\n inputs = {'X': in_var}\n outputs = {'Out': quant_dequant_var, 'OutScale': scale_var}\n attrs = {'bit_length': quant_bits}\n block._insert_op(\n idx,\n type='fake_quantize_dequantize_abs_max',\n attrs=attrs,\n inputs=inputs,\n outputs=outputs)\n return quant_dequant_var\n","sub_path":"python/paddle/fluid/contrib/slim/quantization/quantize_transpiler_v2.py","file_name":"quantize_transpiler_v2.py","file_ext":"py","file_size_in_byte":7644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"228301436","text":"import discord\r\nfrom discord.ext import commands\r\nfrom settings import *\r\nfrom objects.channel import Channel\r\nfrom objects.server import Server\r\n\r\nclass Logger:\r\n def __init__(self, bot):\r\n self.bot = bot\r\n self.themes = Themes()\r\n \r\n async def on_message(self, message):\r\n try:\r\n channel = message.channel\r\n\r\n _channel = Channel(channel.id)\r\n _server = Server(message.server.id)\r\n\r\n if _channel.is_log_blocked:\r\n return\r\n \r\n if _server.logging_allowed is False:\r\n return\r\n \r\n if _server.logs_channel is None:\r\n return\r\n \r\n attach = message.attachments[0]['url']\r\n\r\n embed = discord.Embed(\r\n title=\"📷 | Image Sent\",\r\n color=self.themes.MAIN_COL,\r\n timestamp=message.timestamp\r\n )\r\n embed.set_author(\r\n name=message.author,\r\n icon_url=message.author.avatar_url\r\n )\r\n embed.add_field(\r\n name=\"User ID\",\r\n value=message.author.id,\r\n inline=False\r\n )\r\n embed.add_field(\r\n name=\"Channel\",\r\n value=message.channel.mention,\r\n inline=False\r\n )\r\n embed.add_field(\r\n name=\"Image URL\",\r\n value=attach,\r\n inline=False\r\n )\r\n embed.set_image(url=attach)\r\n\r\n logs = discord.utils.get(message.server.channels, id=_server.logs_channel)\r\n\r\n await self.bot.send_message(logs, embed=embed)\r\n except:\r\n pass\r\n \r\n async def on_message_delete(self, message):\r\n _channel = Channel(message.channel.id)\r\n _server = Server(message.server.id)\r\n\r\n if _channel.is_log_blocked:\r\n return\r\n\r\n if _server.logging_allowed is False:\r\n return\r\n \r\n if _server.logs_channel is None:\r\n return\r\n \r\n embed = discord.Embed(\r\n title=\"❌ | Message DELETED\",\r\n color=self.themes.MAIN_COL,\r\n timestamp=message.timestamp\r\n )\r\n embed.set_author(\r\n name=message.author,\r\n icon_url=message.author.avatar_url\r\n )\r\n embed.add_field(\r\n name=\"User ID\",\r\n value=message.author.id,\r\n inline=False\r\n )\r\n embed.add_field(\r\n name=\"Channel\",\r\n value=message.channel.mention,\r\n inline=False\r\n )\r\n embed.add_field(\r\n name=\"Message\",\r\n value=f\"\\u200b{message.content}\",\r\n inline=False\r\n )\r\n\r\n logs = discord.utils.get(message.server.channels, id=_server.logs_channel)\r\n\r\n await self.bot.send_message(logs, embed=embed)\r\n \r\n async def on_message_edit(self, before, after):\r\n try:\r\n _channel = Channel(before.channel.id)\r\n _server = Server(before.server.id)\r\n\r\n if _channel.is_log_blocked:\r\n return\r\n\r\n if _server.logging_allowed is False:\r\n return\r\n \r\n if _server.logs_channel is None:\r\n return\r\n \r\n embed = discord.Embed(\r\n title=\"〽 | Message EDITED\",\r\n color=self.themes.MAIN_COL,\r\n timestamp=before.timestamp\r\n )\r\n embed.set_author(\r\n name=before.author,\r\n icon_url=before.author.avatar_url\r\n )\r\n embed.add_field(\r\n name=\"User ID\",\r\n value=before.author.id,\r\n inline=False\r\n )\r\n embed.add_field(\r\n name=\"Channel\",\r\n value=before.channel.mention,\r\n inline=False\r\n )\r\n embed.add_field(\r\n name=\"Message Before\",\r\n value=before.content,\r\n inline=False\r\n )\r\n embed.add_field(\r\n name=\"Message After\",\r\n value=after.content,\r\n inline=False\r\n )\r\n\r\n logs = discord.utils.get(before.server.channels, id=_server.logs_channel)\r\n\r\n await self.bot.send_message(logs, embed=embed)\r\n except:\r\n pass\r\n\r\ndef setup(bot):\r\n bot.add_cog(Logger(bot))","sub_path":"cogs/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":4520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"464928843","text":"# -*- coding: utf-8 -*-\n\"\"\"\nSpyder Editor\n\"\"\"\n\nfrom bs4 import BeautifulSoup\nimport requests\nimport pandas as pd\n\nr = requests.get('https://www.kbb.com/car-reviews/')\nsoup = BeautifulSoup(r.text, 'html5lib')\n\nmake_list = []\n\nfor link in soup.find_all('a'):\n try:\n if (link.get('class')[0] == 'js-browseby-make'): \n txt = ''\n for i in link.text:\n if (not(i == '\\n') and not(i == '\\t')):\n txt += i\n if not(txt in make_list):\n make_list.append(txt)\n except:\n continue\n\nprint('Car Makes ready!')\n\ndict_make_model = {}\n\nfor make in make_list:\n make = \"-\".join(make.split(\" \"))\n r = requests.get('https://www.kbb.com/'+make)\n soup = BeautifulSoup(r.text, 'html5lib')\n model_list = []\n for link in soup.find_all('h3'):\n txt = ''\n for i in link.text:\n if (not(i == '\\n') and not(i == '\\t')):\n txt += i\n if not(txt in model_list):\n model_list.append(txt)\n dict_make_model[make] = model_list[:-1]\n\nprint('Make Model Matched!')\n\nquit\n \nKBB_cons_ratings = pd.DataFrame(\n columns=['Make', 'Model', 'Year', 'Cons_Rating'])\n \nfor i in dict_make_model:\n for j in dict_make_model[i]:\n make = i\n model = j[5+len(make)+1:] \n this_car_link = 'https://www.kbb.com/'+ make +'/'+ model +'/'\n r = requests.get(this_car_link)\n soup = BeautifulSoup(r.text, 'html5lib')\n for sec in soup.find_all('section'):\n if sec.get('data-analytics') == 'overview':\n overview_section = sec\n this_car_years = []\n for lis in overview_section.find_all('li'):\n this_car_years.append(lis.text)\n print(make + ' ' + model + ' has ' + ' '.join(this_car_years) + ' year models' )\n for year in this_car_years:\n if len(year) == 4:\n try:\n r = requests.get(this_car_link+'/'+year)\n soup = BeautifulSoup(r.text, 'html5lib') \n texts = []\n for link in soup.find_all('div'):\n texts.append(link.text)\n locate_rating = texts.index(\"KBB Consumer Rating\") + 3\n KBB_cons_ratings = KBB_cons_ratings.append({'Make': make, 'Model': model, 'Year': year, 'Cons_Rating': texts[locate_rating] }, ignore_index=True)\n print(texts[locate_rating] + 'is the KBB Consumer Rating for ' + make + ' ' + model + ' ' + year)\n except ValueError:\n print(\"Rating not found for \"+ make + ' ' + model + ' ' + year)\n except:\n print(\"Something else went wrong for \"+ make + ' ' + model + ' ' + year)\n else:\n print('Stupid Scrapper!')\n\nprint('Beautiful Soup!')\n\nprint('Here is a taster')\n\nprint(KBB_cons_ratings.head())\n\nexport_csv = KBB_cons_ratings.to_csv (r'KBB_consumer_ratings.csv', index = None, header=True)\n\n\n\n\n","sub_path":"db/scrappers/consumer_ratings_scrapper.py","file_name":"consumer_ratings_scrapper.py","file_ext":"py","file_size_in_byte":2997,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"554544131","text":"import os, sys\nimport glob\nfrom multiprocessing import Process\nimport numpy as np\nfrom skimage.filters import scharr, sobel, roberts, prewitt\nfrom skimage.feature import canny\nfrom skimage.color import rgb2gray\nfrom scipy import ndimage as ndi\n\nimport matplotlib.pyplot as plt\nimport imageio\n\nimport PyQt5.QtCore as qtc\nfrom PyQt5.QtWidgets import QApplication, QMainWindow, QLabel, QWidget, QFileDialog, QPushButton, QVBoxLayout\nfrom PyQt5.QtGui import QPixmap, QImage, QColor\n#from PyQt5.QtGui.QColor\n\n\nclass App(QMainWindow):\n\tdef __init__(self):\n\t\tsuper(App, self).__init__()\n\t\tself.title = 'PyQt5 image - pythonspot.com'\n\t\tself.left = 10\n\t\tself.top = 10\n\t\tself.width = 640\n\t\tself.height = 480\n\t\tself.label = QLabel(self)\n\t\tself.label.mousePressEvent = self.label_object\n\n\t\tself.central_widget = QWidget()\n\t\tself.setCentralWidget(self.central_widget)\n\t\tself.layout = QVBoxLayout(self.central_widget)\n\t\tself.layout.addWidget(self.label)\n\t\tself.label.move(0, 500)\n\n\t\tself.edge_type = \"scharr\"\n\t\tself.edge_folders = {}\n\t\tself.qim = 0\n\t\tself.label_im = 0\n\t\tself.initUI()\n\n\tdef initUI(self):\n\t\tself.setWindowTitle(self.title)\n\t\tself.setGeometry(self.left, self.top, self.width, self.height)\n\n\t\tnumImages = 10\n\t\tfirstNonPanelImg = 13\n\t\t# Create folder selector widget\n\t\tdlg = QFileDialog()\n\t\tdlg.setFileMode(QFileDialog.Directory)\n\t\t#filenames = QStringList()\n\t\tdir_names = []\n\t\tif dlg.exec_():\n\t\t\tdir_names = dlg.selectedFiles()\n\t\tprint(dir_names)\n\n\t\tbutton = QPushButton('Next Image', self)\n\t\t#button.move(100,70)\n\t\tbutton.clicked.connect(self.next_image_clicker)\n\t\tself.load_images(dir_names)\n\t\tself.show()\n\n\tdef load_images(self, dir_names):\n\t\tfor dir in dir_names:\n\t\t\tself.current_edges_folder = dir + \"-\" + self.edge_type + \"edges\"\n\t\t\tself.edge_folders[self.current_edges_folder] = []\n\n\t\t\tif self.current_edges_folder not in self.edge_folders:\n\t\t\t\tself.edge_folders.append(self.current_edges_folder)\n\t\t\tdirectory = os.fsencode(dir)\n\t\t\tfor file in reversed(sorted(os.listdir(directory))):\n\t\t\t\tprint(\"file:\" + str(file))\n\t\t\t\tself.current_file_name = os.fsdecode(file)\n\t\t\t\tim_type = self.current_file_name.split('.')[-1]\n\t\t\t\timg_path = f\"{dir}/{self.current_file_name}\"\n\t\t\t\tout_file_name = self.current_file_name.split('.')[0] + \"-edges.\" + im_type\n\t\t\t\tout_image_path = os.path.join(dir+\"-\" + self.edge_type + \"edges\", out_file_name)\n\t\t\t\tif not os.path.isdir(dir+\"-\" + self.edge_type + \"edges\"):\n\t\t\t\t\tos.mkdir(dir+\"-\" + self.edge_type + \"edges\")\n\t\t\t\tif not os.path.isfile(out_image_path):\n\t\t\t\t\tim_edges = self.edges(img_path)\n\t\t\t\t\timageio.imwrite(out_image_path, im_edges)\n\t\t\t\tif out_image_path not in self.edge_folders[self.current_edges_folder]:\n\t\t\t\t\tself.edge_folders[self.current_edges_folder].append(out_image_path)\n\t\t# for key in self.edge_folders.keys():\n\t\t# \tprint(f\"{key}: {self.edge_folders[key]}\\n\")\n\n\tdef edges(self, image_path):\n\t\t#edges = sobel(gray)*256\n\t\timg = imageio.imread(image_path)\n\t\tgray = rgb2gray(img)\n\t\t# * 255 so that it's not scaled between 0-1 for when it converts to uint8\n\t\tif self.edge_type == \"canny\":\n\t\t\tedges = canny(gray, sigma=6)\n\t\tif self.edge_type == \"sobel\":\n\t\t\tedges = sobel(gray)*256\n\t\tif self.edge_type == \"scharr\":\n\t\t\tedges = scharr(gray)*256\n\t\tif self.edge_type == \"roberts\":\n\t\t\tedges = roberts(gray)*256\n\t\tif self.edge_type == \"prewitt\":\n\t\t\tedges = prewitt(gray)*256\n\t\t#fill_objects = ndi.binary_fill_holes(edges)\n\t\treturn edges.astype('uint8')\n\n\t@qtc.pyqtSlot()\n\tdef next_image_clicker(self):\n\t\tif self.qim:#save labels\n\t\t\tout_dir = self.current_edges_folder.split('-')[-1] + \"-labels\"\n\t\t\tif not os.path.isdir(out_dir):\n\t\t\t\tos.mkdir(out_dir)\n\t\t\tfilename_split = self.current_file_name.split('.')\n\t\t\tfile_path = os.path.join('.', out_dir, filename_split[0] + \"-labels.\" + filename_split[1])\n\t\t\tprint(\"labelled data path:\" + file_path)\n\t\t\tself.label_im.save(file_path)\n\t\tself.show_next_img()\n\n\tdef show_next_img(self):\n\t\t# print(f\"current folder: {self.current_edges_folder}\\nfiles: {self.edge_folders[self.current_edges_folder]}\\n\")\n\t\topen_image_path = self.edge_folders[self.current_edges_folder].pop()\n\t\tprint(f\"current folder: {self.current_edges_folder}\\nfile: {open_image_path}\")\n\t\tself.qim = QImage(open_image_path)\n\t\tself.label_im = QImage(self.qim.width(), self.qim.height(), QImage.Format_RGB32)\n\t\tself.update_qpix()\n\n\tdef label_object(self , event):\n\t\tself.clicked = True\n\t\tx = event.pos().x()\n\t\ty = event.pos().y()\n\t\tprint(f\"{x}, {y}\")\n\t\tself.floodfill_queue(x, y)\n\n\tdef floodfill_queue(self, x, y):\n\t\tqim_max = self.qim.width()\n\t\twhile_tol = 15\n\t\tq = [(x, y)]\n\t\ttouched = []\n\t\twhile q and len(touched) < 10000:\n\t\t\tn = q.pop(0)\n\t\t\tx = n[0]\n\t\t\ty = n[1]\n\t\t\tfor new_point in [(x+1, y), (x-1, y), (x, y+1), (x, y-1)]:\n\t\t\t\tif x > 0 and y > 0 and x best_score):\n best_predictions = combined_predictions\n best_index = i\n best_score = new_score\n if best_index > -1:\n return np.append(best_index_list, best_index), best_score\n else:\n return best_index_list, best_score\n\n\ndef combine_predictions_list(predictions_list, index_list=None):\n \"\"\"Combine predictions in predictions_list[index_list].\n\n By taking the mean of their get_combineable_predictions views.\n\n E.g. for regression it is the actual\n predictions, and for classification it is the probability array (which\n should be calibrated if we want the best performance). Called both for\n combining one submission on cv folds (a single model that is trained on\n different folds) and several models on a single fold.\n Called by\n _get_bagging_score : which combines bags of the same model, trained on\n different folds, on the heldout test set\n _get_cv_bagging_score : which combines cv-bags of the same model, trained\n on different folds, on the training set\n get_next_best_single_fold : which does one step of the greedy forward\n selection (of different models) on a single fold\n _get_combined_predictions_single_fold : which does the full loop of greedy\n forward selection (of different models), until improvement, on a single\n fold\n _get_combined_test_predictions_single_fold : which computes the combination\n (constructed on the cv valid set) on the holdout test set, on a single\n fold\n _get_combined_test_predictions : which combines the foldwise combined\n and foldwise best test predictions into a single megacombination\n\n Parameters\n ----------\n predictions_list : list of instances of Predictions\n Each element of the list is an instance of Predictions of a given model\n on the same data points.\n index_list : None | list of integers\n The subset of predictions to be combined. If None, the full set is\n combined.\n\n Returns\n -------\n combined_predictions : instance of Predictions\n A predictions instance containing the combined (averaged) predictions.\n \"\"\"\n Predictions = type(predictions_list[0])\n combined_predictions = Predictions.combine(predictions_list, index_list)\n return combined_predictions\n\n\ndef _get_score_cv_bags(event, score_type, predictions_list, ground_truths,\n test_is_list=None):\n \"\"\"\n Computes the bagged score of the predictions in predictions_list.\n\n Called by Submission.compute_valid_score_cv_bag and\n db_tools.compute_contributivity.\n\n Parameters\n ----------\n event : instance of Event\n Needed for the type of y_comb and\n predictions_list : list of instances of Predictions\n ground_truths : instance of Predictions\n test_is_list : list of integers\n Indices of points that should be bagged in each prediction. If None,\n the full prediction vectors will be bagged.\n Returns\n -------\n score_cv_bags : instance of Score ()\n \"\"\"\n if test_is_list is None: # we combine the full list\n test_is_list = [range(len(predictions.y_pred))\n for predictions in predictions_list]\n\n y_comb = np.array(\n [event.Predictions(n_samples=len(ground_truths.y_pred))\n for _ in predictions_list])\n score_cv_bags = []\n for i, test_is in enumerate(test_is_list):\n y_comb[i].set_valid_in_train(predictions_list[i], test_is)\n combined_predictions = combine_predictions_list(y_comb[:i + 1])\n valid_indexes = combined_predictions.valid_indexes\n score_cv_bags.append(score_type.score_function(\n ground_truths, combined_predictions, valid_indexes))\n # XXX maybe use masked arrays rather than passing valid_indexes\n return combined_predictions, score_cv_bags\n","sub_path":"ramp-database/rampdb/tools/tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":7648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"26900414","text":"# Licensed under the BSD 3-Clause License\n# Copyright (C) 2021 GeospaceLab (geospacelab)\n# Author: Lei Cai, Space Physics and Astronomy, University of Oulu\n\n__author__ = \"Lei Cai\"\n__copyright__ = \"Copyright 2021, GeospaceLab\"\n__license__ = \"BSD-3-Clause License\"\n__email__ = \"lei.cai@oulu.fi\"\n__docformat__ = \"reStructureText\"\n\nfrom geospacelab.datahub import VariableModel as Var\nimport geospacelab.visualization.mpl.colormaps as cm\n\nimport numpy as np\n\ndatabase = 'ESA/EarthOnline'\n\ntimestamps = {\n 'SC_DATETIME': 'SC_DATETIME',\n}\n\n\ndefault_colormap = \"gist_ncar\"\n\ndefault_plot_config = {\n 'line': {\n 'linestyle': '-',\n 'linewidth': 1.5,\n 'marker': '',\n 'markersize': 3,\n },\n 'pcolormesh': {\n 'cmap': default_colormap,\n }\n}\n\nconfigured_variables = {}\nvisual = 'on'\n\ndepend_0 = {'UT': 'SC_DATETIME',\n 'GEO_LAT': 'SC_GEO_LAT', 'GEO_LON': 'SC_GEO_LON',\n 'AACGM_LAT': 'SC_AACGM_LAT', 'AACGM_LON': 'SC_AACGM_LON', 'AACGM_MLT': 'SC_AACGM_MLT'}\n# depend_c = {'SPECTRA': 'EMISSION_SPECTRA'}\n\n####################################################################################################################\nvar_name = 'rho_n'\nvar = Var(name=var_name, ndim=1, variable_type='scalar', visual=visual)\n# set variable attrs\nvar.fullname = 'Neutral mass density'\nvar.label = r'$\\rho_n$'\nvar.unit = 'kg/m-3'\nvar.unit_label = r'kg$\\cdot$m$^{-3}$'\nvar.group = r'$\\rho$'\n# var.error = var_name + '_err'\nvar.depends = {0: depend_0}\n# set plot attrs\nplot_config = var.visual.plot_config\nplot_config.config(**default_plot_config)\nplot_config.style = '1noE'\n# set axis attrs\naxis = var.visual.axis\naxis[1].data = \"@v.value\"\n# axis[1].lim = [np.nan, np.nan]\naxis[2].label = '@v.label'\naxis[1].unit = '@v.unit_label'\n\nconfigured_variables[var_name] = var\n\n\n####################################################################################################################\nvar_name = 'SC_GEO_LAT'\nvar = Var(name=var_name, ndim=1, variable_type='scalar', visual=visual)\n# set variable attrs\nvar.fullname = 'S/C geographic latitude'\nvar.label = r'GLAT'\nvar.unit = 'degree'\nvar.unit_label = r'$^\\circ$'\nvar.group = r'GEO'\n# var.error = var_name + '_err'\nvar.depends = {0: depend_0}\n# set plot attrs\nplot_config = var.visual.plot_config\nplot_config.config(**default_plot_config)\nplot_config.style = '1noE'\n# set axis attrs\naxis = var.visual.axis\naxis[1].data = \"@v.value\"\n# axis[1].lim = [-2000, 2000]\naxis[1].label = '@v.label'\naxis[1].unit = '@v.unit_label'\n\nconfigured_variables[var_name] = var\n\n####################################################################################################################\nvar_name = 'SC_GEO_LON'\nvar = Var(name=var_name, ndim=1, variable_type='scalar', visual=visual)\n# set variable attrs\nvar.fullname = 'S/C geographic latitude'\nvar.label = r'GLON'\nvar.unit = 'degree'\nvar.unit_label = r'$^\\circ$'\nvar.group = r'GEO'\n# var.error = var_name + '_err'\nvar.depends = {0: depend_0}\n# set plot attrs\nplot_config = var.visual.plot_config\nplot_config.config(**default_plot_config)\nplot_config.style = '1noE'\n# set axis attrs\naxis = var.visual.axis\naxis[1].data = \"@v.value\"\n# axis[1].lim = [-2000, 2000]\naxis[1].label = '@v.label'\naxis[1].unit = '@v.unit_label'\n\nconfigured_variables[var_name] = var\n","sub_path":"geospacelab/datahub/sources/tud/champ/dns_acc/variable_config.py","file_name":"variable_config.py","file_ext":"py","file_size_in_byte":3337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"473944837","text":"# -*- coding: utf-8 -*-\n\"\"\"Fold LOFAR data. Use sub-bands; sub-channeling not yet implemented\"\"\"\nfrom __future__ import division, print_function\n\nimport numpy as np\n# use FFT from scipy, since unlike numpy it does not cast up to complex128\nfrom scipy.fftpack import fft, ifft, fftfreq\nimport astropy.units as u\n\nfrom scintellometry.folding.mpilofile import mpilofile\nfrom mpi4py import MPI\n\nfrom fromfile import fromfile\n\ndispersion_delay_constant = 4149. * u.s * u.MHz**2 * u.cm**3 / u.pc\n\ndef fold(file1, file2, dtype, fbottom, fwidth, nchan,\n nt, ntint, nskip, ngate, ntbin, ntw, dm, fref, phasepol,\n coherent=False, do_waterfall=True, do_foldspec=True, verbose=True,\n progress_interval=100, comm=None):\n \"\"\"Fold pre-channelized LOFAR data, possibly dedispersing it\n\n Parameters\n ----------\n file1, file2 : string\n names of the files holding real and imaginary subchannel timeseries\n dtype : numpy dtype\n way the data are stored in the file (normally '>f4')\n fbottom : float\n frequency of the lowest channel (frequency units)\n fwidth : float\n channel width (frequency units, normally 200*u.MHz/1024.)\n nchan : int\n number of frequency channels\n nt, ntint : int\n number nt of sets to use, each containing ntint samples;\n hence, total # of samples used is nt*ntint for each channel.\n nskip : int\n number of records (nskip*ntint*4*nchan bytes) to skip before reading\n ngate, ntbin : int\n number of phase and time bins to use for folded spectrum\n ntbin should be an integer fraction of nt\n ntw : int\n number of time samples to combine for waterfall (does not have to be\n integer fraction of nt)\n dm : float\n dispersion measure of pulsar, used to correct for ism delay\n (column number density)\n fref: float\n reference frequency for dispersion measure\n phasepol : callable\n function that returns the pulsar phase for time in seconds relative to\n the start of the file that is read (i.e., including nskip)\n coherent : bool\n Whether to do dispersion coherently within finer channels\n do_waterfall, do_foldspec : bool\n whether to construct waterfall, folded spectrum (default: True)\n verbose : bool\n whether to give some progress information (default: True)\n progress_interval : int\n Ping every progress_interval sets\n comm : MPI communicator (default: None\n \"\"\"\n if comm is not None:\n rank = comm.rank\n size = comm.size\n else:\n rank = 0\n size = 1\n def mpilofile(comm, file):\n return open(file)\n\n # initialize folded spectrum and waterfall\n if do_foldspec:\n foldspec = np.zeros((nchan, ngate, ntbin))\n icount = np.zeros((nchan, ngate, ntbin))\n else:\n foldspec = None\n icount = None\n if do_waterfall:\n nwsize = nt*ntint//ntw\n waterfall = np.zeros((nchan, nwsize))\n else:\n waterfall = None\n\n # # of items to read from file.\n itemsize = np.dtype(dtype).itemsize\n count = nchan*ntint\n if verbose and rank == 0:\n print('Reading from {}\\n and {}'.format(file1, file2))\n\n with mpilofile(comm, file1) as fh1, \\\n mpilofile(comm, file2) as fh2:\n if nskip > 0:\n if verbose and rank == 0:\n print('Skipping {0} bytes'.format(nskip))\n # if # MPI processes > 1 we seek in for-loop\n if size == 1:\n fh1.seek(nskip * count * itemsize)\n fh2.seek(nskip * count * itemsize)\n\n\n dtsample = (1./fwidth).to(u.s)\n tstart = dtsample * nskip * ntint\n\n # pre-calculate time delay due to dispersion in course channels\n freq = fbottom + fwidth*np.arange(nchan)\n dt = (dispersion_delay_constant * dm *\n (1./freq**2 - 1./fref**2)).to(u.s).value\n\n if coherent:\n # pre-calculate required turns due to dispersion in fine channels\n fcoh = (freq[np.newaxis,:] +\n fftfreq(ntint, dtsample.value)[:,np.newaxis] * u.Hz)\n # fcoh[fine, channel]\n # (check via eq. 5.21 and following in\n # Lorimer & Kramer, Handbook of Pulsar Astrono\n dang = (dispersion_delay_constant * dm * fcoh *\n (1./freq - 1./fcoh)**2) * u.cycle\n dedisperse = np.exp(dang.to(u.rad).value * 1j\n ).conj().astype(np.complex64)\n\n for j in xrange(rank, nt, size):\n if verbose and j % progress_interval == 0:\n print('Doing {:6d}/{:6d}; time={:18.12f}'.format(\n j+1, nt, (tstart+dtsample*j*ntint).value))\n # time since start of file\n\n # just in case numbers were set wrong -- break if file ends\n # better keep at least the work done\n try:\n # data stored as series of floats in two files,\n # one for real and one for imaginary\n if size > 1:\n fh1.seek((nskip + j)*count*itemsize)\n fh2.seek((nskip + j)*count*itemsize)\n raw1 = fromfile(fh1, dtype, count*itemsize).reshape(-1,nchan)\n raw2 = fromfile(fh2, dtype, count*itemsize).reshape(-1,nchan)\n except(EOFError):\n break\n\n # int 8 test\n iraw = (raw1*128.).astype(np.int8)\n raw1 = iraw.astype(np.float32)/128.\n iraw = (raw2*128.).astype(np.int8)\n raw2 = iraw.astype(np.float32)/128.\n\n if coherent:\n chan = raw1 + 1j*raw2\n # vals[#int, #chan]; FT channels to finely spaced grid\n fine = fft(chan, axis=0, overwrite_x=True)\n # fine[#fine, #chan]; correct for dispersion w/i chan\n fine *= dedisperse\n # fine[#fine, #chan]; FT back to channel timeseries\n chan = ifft(fine, axis=0, overwrite_x=True)\n # vals[#int, #chan]\n power = chan.real**2 + chan.imag**2\n # power[#int, #chan]; timeit -> 0.6x shorter than abs(chan)**2\n else:\n power = raw1**2 + raw2**2\n # power[#int, #chan]\n\n # current sample positions in stream\n isr = j*ntint + np.arange(ntint)\n\n if do_waterfall:\n # loop over corresponding positions in waterfall\n for iw in xrange(isr[0]//ntw, isr[-1]//ntw + 1):\n if iw < nwsize: # add sum of corresponding samples\n waterfall[:,iw] += np.sum(power[isr//ntw == iw],\n axis=0)\n\n if do_foldspec:\n tsample = (tstart + isr*dtsample).value # times since start\n ibin = j*ntbin//nt\n for k in xrange(nchan):\n t = tsample - dt[k] # dedispersed times\n phase = phasepol(t) # corresponding PSR phases\n iphase = np.remainder(phase*ngate,\n ngate).astype(np.int)\n # sum and count samples by phase bin\n foldspec[k,:,ibin] += np.bincount(iphase, power[:,k], ngate)\n icount[k,:,ibin] += np.bincount(iphase, None, ngate)\n\n\n\n if verbose:\n print('read {0:6d} out of {1:6d}'.format(j+1, nt))\n\n if do_waterfall:\n nonzero = waterfall == 0.\n waterfall -= np.where(nonzero,\n np.sum(waterfall, 1, keepdims=True) /\n np.sum(nonzero, 1, keepdims=True), 0.)\n\n return foldspec, icount, waterfall\n","sub_path":"scintellometry/folding/fold_lofar.py","file_name":"fold_lofar.py","file_ext":"py","file_size_in_byte":7751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"135694227","text":"\nfrom PIL import Image, ImageTk\nfrom tkinter import Tk, TclError, Toplevel\n\nfrom abc import ABCMeta\nfrom abc import abstractmethod\n\nclass Interface(object):\n\n __metaclass__ = ABCMeta\n fen = None\n\n @abstractmethod\n def display_widgets(self, master, color_style_1=None,\n color_style_2=None, active_color=None):\n \n raise NotImplementedError\n\n @abstractmethod\n def display_menu(self, master):\n\n raise NotImplementedError\n\n def create_icon(self, image_dict):\n \n for key, value in image_dict.items():\n img = Image.open(value)\n image_dict[key] = ImageTk.PhotoImage(img)\n\n return image_dict\n\n def create_fen(self):\n if self.fen is not None:\n try:\n self.destroy_fen()\n except TclError:\n self.fen = None\n self.fen = Toplevel() # creating window\n\n return self.fen\n\n def destroy_fen(self):\n self.fen.destroy()\n self.fen = None\n \n","sub_path":"interface.py","file_name":"interface.py","file_ext":"py","file_size_in_byte":1025,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"225862134","text":"import sys\nsys.path.append(\"..\")\nsys.path.append(\"../../src/neuralNetwork\")\nsys.path.append(\"../../src/constrainedChasingEscapingEnv\")\nsys.path.append(\"../../src/algorithms\")\nsys.path.append(\"../../src\")\nimport os\nimport numpy as np\nimport pickle\n\nimport envNoPhysics as env\nimport policies\nimport wrapperFunctions\nimport mcts\nimport play\nfrom analyticGeometryFunctions import computeAngleBetweenVectors, computeVectorNorm\nfrom evaluationFunctions import GetSavePath\n\n\ndef main():\n # env\n sheepID = 0\n wolfID = 1\n posIndex = [0, 1]\n numOfAgent = 2\n actionSpace = [(0, 1), (1, 0), (-1, 0), (0, -1), (1, 1), (-1, -1), (1, -1), (-1, 1)]\n numActionSpace = len(actionSpace)\n xBoundary = [0, 180]\n yBoundary = [0, 180]\n killZoneRadius = 5\n\n getSheepPos = wrapperFunctions.GetAgentPosFromState(sheepID, posIndex)\n getWolfPos = wrapperFunctions.GetAgentPosFromState(wolfID, posIndex)\n checkBoundaryAndAdjust = env.StayInBoundaryByReflectVelocity(xBoundary, yBoundary)\n wolfDriectChasingPolicy = policies.HeatSeekingDiscreteDeterministicPolicy(actionSpace, getWolfPos, getSheepPos, computeAngleBetweenVectors)\n transition = env.TransiteForNoPhysics(checkBoundaryAndAdjust)\n sheepTransition = lambda state, action: transition(np.array(state), [np.array(action), wolfDriectChasingPolicy(state)])\n\n initPosition = np.array([[30, 30], [20, 20]])\n initNoise = [0, 0]\n reset = env.Reset(numOfAgent, initPosition, initNoise)\n isTerminal = env.IsTerminal(getWolfPos, getSheepPos, killZoneRadius, computeVectorNorm)\n\n # mcts policy\n cInit = 1\n cBase = 100\n calculateScore = mcts.CalculateScore(cInit, cBase)\n selectChild = mcts.SelectChild(calculateScore)\n\n mctsUniformActionPrior = lambda state: {action: 1 / len(actionSpace) for action in actionSpace}\n getActionPrior = mctsUniformActionPrior\n initializeChildren = mcts.InitializeChildren(actionSpace, sheepTransition, getActionPrior)\n expand = mcts.Expand(isTerminal, initializeChildren)\n\n maxRollOutSteps = 10\n rolloutPolicy = lambda state: actionSpace[np.random.choice(range(numActionSpace))]\n rewardFunction = lambda state, action: 1\n heuristic = lambda state: 0\n estimateValue = mcts.RollOut(rolloutPolicy, maxRollOutSteps, sheepTransition, rewardFunction, isTerminal, heuristic)\n\n numSimulations = 200\n mctsPolicy = mcts.MCTS(numSimulations, selectChild, expand, estimateValue, mcts.backup, mcts.selectGreedyAction)\n mctsPolicyDistOutput = mcts.MCTS(numSimulations, selectChild, expand, estimateValue, mcts.backup, mcts.establishSoftmaxActionDist)\n\n # sample trajectories\n maxRunningSteps = 30\n agentDist2Action = play.agentDistToGreedyAction\n worldDist2Action = lambda worldDist: play.worldDistToAction(agentDist2Action, worldDist)\n sampleTrajWithActionDist = play.SampleTrajectoryWithActionDist(maxRunningSteps, transition, isTerminal, reset, worldDist2Action)\n policyDistOutput = lambda state: [mctsPolicyDistOutput(state), wolfDriectChasingPolicy(state)]\n\n numTrajs = 2\n trajs = [sampleTrajWithActionDist(policyDistOutput) for _ in range(numTrajs)]\n print(\"Avg traj length = {}\".format(np.mean([len(traj) for traj in trajs])))\n\n dataDirectory = '../../data/compareValueDataStandardizationAndLossCoefs/trainingData/trajectories'\n if not os.path.exists(dataDirectory):\n os.makedirs(dataDirectory)\n extension = '.pickle'\n getSavePath = GetSavePath(dataDirectory, extension)\n varDict = {}\n varDict[\"initPos\"] = list(initPosition.flatten())\n varDict[\"rolloutSteps\"] = maxRollOutSteps\n varDict[\"numSimulations\"] = numSimulations\n varDict[\"maxRunningSteps\"] = maxRunningSteps\n varDict[\"numTrajs\"] = numTrajs\n varDict[\"cBase\"] = 100\n savePath = getSavePath(varDict)\n\n saveOn = True\n if saveOn:\n with open(savePath, \"wb\") as f:\n pickle.dump(trajs, f)\n print(\"Saved trajectories in {}\".format(savePath))\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"exec/compareValueDataStandardizationAndLossCoefs/generateTrajectoriesForNN.py","file_name":"generateTrajectoriesForNN.py","file_ext":"py","file_size_in_byte":3990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"113638530","text":"import urllib.request\nimport xml.etree.ElementTree as ET\nfrom ONUcheck.entities.individuals import XMLIndividual\n\n\ndef xml_security_sanctions():\n __url__ = \"https://scsanctions.un.org/resources/xml/en/consolidated.xml\"\n content, headers = urllib.request.urlretrieve(__url__)\n return content\n\ndef security_sanctions_indv():\n\n content = xml_security_sanctions()\n tree_obj = ET.parse(content)\n root_obj = tree_obj.getroot()\n\n list_indv = []\n\n # method to get the individuals only\n for child in root_obj:\n if child.tag == \"INDIVIDUALS\":\n for individual in child:\n indv = XMLIndividual(individual)\n list_indv.append(indv)\n return list_indv\n\n\n\ndef security_sanctions_ent():\n\n content = xml_security_sanctions()\n tree_obj = ET.parse(content)\n root_obj = tree_obj.getroot()\n\n list_ent = []\n\n # method to get the entities only\n for child in root_obj:\n if child.tag == \"ENTITIES\":\n for entity in child:\n list_ent.append(entity)\n return list_ent\n","sub_path":"ONUcheck/utils/requests.py","file_name":"requests.py","file_ext":"py","file_size_in_byte":1067,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"360534337","text":"#!/usr/bin/env python3\n\"\"\"\nGlobusArchiver.py helps users archive data to the Campaign Store (and other Globus Endpoints)\n\"\"\"\n\nimport sys\n\nif sys.version_info[0] < 3:\n raise Exception(f\"Must be using Python 3.6 or later\")\nif sys.version_info[0] == 3 and sys.version_info[1] < 6:\n raise Exception(f\"Must be using Python 3.6 or later\")\n\n######################\n# PYTHON LIB IMPORTS\n#####################\nimport subprocess\nimport shlex\nimport os\nimport json\nimport webbrowser\nimport ssl\nimport threading\nimport glob\nimport copy\nimport smtplib\nimport email\nimport datetime\nimport socket\nimport random\nimport string\nimport shutil\n\n#####################\n# CONFIG MASTER STUFF\n#####################\nimport logging\n\n# manage externals\nsys.path.append(os.path.join(os.path.abspath(os.path.dirname(__file__)), 'configmaster'))\ntry:\n from ConfigMaster import ConfigMaster\nexcept ImportError:\n print(f\"{os.path.basename(__file__)} needs ConfigMaster to run.\")\n print(f\"Please review README.md for details on how to run manage_externals\")\n exit(1)\n\n\n\n# GlobusArchiver.py version info\n'''\nVersion 1.3\n\nChangeLog\n1.3 - Added whenEmpty configuration options\n'''\n\nversion_info = (1, 3)\nversion = \".\".join(map(str, version_info))\n\ndefaultParams = \"\"\"\n\n######################################\n# GLOBUS CONFIGURATION\n######################################\n\n\n# Imports used in the configuration file\nimport os\nimport socket\nimport datetime\n\n\n#####################################\n## GENERAL CONFIGURATION\n#####################################\n\n# GlobusArchiver.py submits one task to Globus\n# This is used to identify the task on the Globus Web API\n# Through painful trial and error, I have determined this cannot have a period in it.\n\ntaskLabel = f\"GA-unnamed-%Y%m%d\"\n\n# I would recommend uncommenting the taskLabel definition below, but because of the way ConfigMaster currently works\n# I cannot have __file__ in the default params.\n\n# This uses the config file name as part of the label, but strips the extension and replaces '.' with '_'\n# taskLabel = f\"{(os.path.splitext(os.path.basename(__file__))[0]).replace('.','_')}-%Y%m%d\"\n\n############### TEMP DIR ##################\n\n# tempDir is used for:\n# - Staging Location for .tar Files\n# - Staging location if doStaging is True\n\n# Default, $TMPDIR if it is defined, otherwise $HOME if defined, otherwise '.'.\ntempDir = os.path.join(os.getenv(\"TMPDIR\",os.getenv(\"HOME\",\".\")), \"GlobusArchiver-tmp\")\n\n# You may want to keep the tmp area around for debugging\ncleanTemp = True\n\n############### EMAIL ##################\n\n# Deliver a report to these email addresses\n# Use a list of 3-tuples (\"name\", \"local-part\", \"domain\")\nemailAddresses = [(\"Paul Prestopnik\", \"prestop\", \"ucar.edu\")] \n\n# This is the email address that will be used in the \"from\" field\nfromEmail = emailAddresses[0]\n\n# Format of email subject line. Can refer to errors, archiveDate, configFile, and host\n# notated in curly braces.\nemailSubjectFormat = \"{errors} with GlobusArchiver on {host} - {configFile} - {archiveDate}\"\n\n# format of date timestamp in email subject. This format will be used to substitute\n# {archiveDate} in the emailSubjectFormat\nemailSubjectDateFormat = \"%Y/%m/%d\"\n\n\n#####################################\n## AUTHENTICATION \n#####################################\n\n# You can define the endpoint directly \n# This default value is the NCAR CampaignStore \n# the value was obtained by running:\n# $ globus endpoint search 'NCAR' --filter-owner-id 'ncar@globusid.org' | grep Campaign | cut -f1 -d' '\narchiveEndPoint = \"6b5ab960-7bbf-11e8-9450-0a6d4e044368\"\n\n# The refresh token is what lets you use globus without authenticating every time. We store it in a local file.\n# !!IMPORTANT!!!\n# You need to protect your Refresh Tokens. \n# They are an infinite lifetime credential to act as you.\n# Like passwords, they should only be stored in secure locations.\n# e.g. placed in a directory where only you have read/write access\nglobusTokenFile = os.path.join(os.path.expanduser(\"~\"),\".globus-ral\",\"refresh-tokens.json\")\n\n\n####################################\n## ARCHIVE RUN CONFIGURATION\n####################################\n\n######### Archive Date/Time #################\n#\n# This is used to set the date/time of the Archive.\n# The date/time can be substituted into all archive-item strings, by using\n# standard strftime formatting.\n\n# This value is added (so use a negative number to assign a date in the past) \n# to now() to find the archive date/time.\narchiveDayDelta=-2\n\n# If this is set, it overrides the archiveDayDelta. If you want to use\n# archiveDayDelta to set the Archive Date/Time, make sure this is \n# set to an empty string. This string must be parseable by one of the\n# format strings defined in archiveDateTimeFormats.\narchiveDateTimeString=\"\"\n\n# You can add additional strptime formats\narchiveDateTimeFormats=[\"%Y%m%d\",\"%Y%m%d%H\",\"%Y-%m-%dT%H:%M:%SZ\"]\n\n#####################################\n# ARCHIVE SUBMISSION CONFIGURATION\n#####################################\n\n# Set to False to process data but don't actually submit the tasks to Globus\nsubmitTasks = True\n\n# Number of seconds to wait to see if transfer completed\n# Report error if it doesn't completed after this time\n# Default is 21600 (6 hours)\ntransferStatusTimeout = 6*60*60\n\n# Globus is supposed to be fault tolerant, so we sometimes get errors, but it's ok and it recovers on it's own.\n# Set this to the maximum number of errors you want to accept before giving up and quiting.\n# We poll once a minute, so this number of errors is also the number of minutes it will sit in a failure state before giving up.\n# Unfortunately we cannot distinguish between transient errors and permanent failures yet. \nmaxGlobusErrors = 60\n\n# syncLevel - specify when files are overwritten:\n#\n# \"exists\" - If the destination file is absent, do the transfer.\n# \"size\" - If destination file size does not match the source, do the transfer.\n# \"mtime\" - If source has a newer modififed time than the destination, do the transfer.\n# \"checksum\" - If source and destination contents differ, as determined by a checksum of their contents, do the transfer.\nsyncLevel = \"checksum\"\n\n####################################\n## ARCHIVE ITEM CONFIGURATION\n####################################\n#\n# This is a dictionary of dictionaries. The following keys are supported in the inner dictionary:\n#\n# source - local (current) location of the data\n#\n# destination - remote location to put data (generally /gpfs/csfs1/ral/.. for the CampaignStore)\n#\n# doZip - optional, and defaults to False\n#\n# doStaging - optional, and defaults to False\n#\n# removeLinks - optional. Defaults to True. Only applies when staging data. \n#\n# tarFileName - optional and defaults to \"\". TAR is only done if tar_filename is a non-empty string\n# if multiple archiveItems have the same tar_filename,\n#\n# cdDirTar - This must match the beginning of the source, and is not included in the path within tar files.\n#\n# whenEmpty - If a source has zero files at level do we log this:\n# - \"WARN\", \"ERROR\", \"INFO\", \"DEBUG\", \"CRITICAL\" \n# - default - \"ERROR\"\n#\n# skipUnderscoreFiles - optional, and defaults to False\n#\n# expectedNumFiles - optional, only used in some circumstances\n#\n# expectedFileSize - optional, only used in some circumstances\n#\n# dataType - Not currently used.\n#\n# dataFormat - Not currently used.\n#\n# comment - Not currently used.\n\narchiveItems = {\n\"icing-cvs-data\":\n {\n \"source\": \"/d1/prestop/backup/test1\",\n \"destination\": \"/gpfs/csfs1/ral/nral0003\",\n \"doZip\": False,\n },\n\"icing-cvs-data2\":\n {\n \"source\": \"/d1/prestop/backup/test2\",\n \"destination\": \"/gpfs/csfs1/ral/nral0003\",\n \"doZip\": False,\n \"tarFileName\": \"test2.tar\",\n \"cdDirTar\": \"/d1/prestop/backup\",\n \"expectedNumFiles\": 3,\n \"expectedFileSize\": 1024,\n }\n}\n\"\"\"\n\n##################\n# GLOBUS IMPORTS\n##################\nimport globus_sdk\n\n########################################\n# Copied from https://github.com/globus/native-app-examples/blob/master/utils.py\ntry:\n import http.client as http_client\nexcept ImportError:\n import httplib as http_client\n\ntry:\n from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler\nexcept ImportError:\n from http.server import HTTPServer, BaseHTTPRequestHandler\n\ntry:\n import Queue\nexcept ImportError:\n import queue as Queue\n\ntry:\n from urlparse import urlparse, parse_qs\nexcept ImportError:\n from urllib.parse import urlparse, parse_qs\n\n\ndef enable_requests_logging():\n http_client.HTTPConnection.debuglevel = 4\n\n logging.basicConfig()\n logging.getLogger().setLevel(logging.DEBUG)\n requests_log = logging.getLogger('requests.packages.urllib3')\n requests_log.setLevel(logging.DEBUG)\n requests_log.propagate = True\n\n\ndef is_remote_session():\n return os.environ.get('SSH_TTY', os.environ.get('SSH_CONNECTION'))\n\n\nclass RedirectHandler(BaseHTTPRequestHandler):\n\n def do_GET(self):\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n self.wfile.write(b'You\\'re all set, you can close this window!')\n\n code = parse_qs(urlparse(self.path).query).get('code', [''])[0]\n self.server.return_code(code)\n\n def log_message(self, format, *args):\n return\n\n\nclass RedirectHTTPServer(HTTPServer, object):\n\n def __init__(self, listen, handler_class, https=False):\n super(RedirectHTTPServer, self).__init__(listen, handler_class)\n\n self._auth_code_queue = Queue.Queue()\n\n if https:\n self.socket = ssl.wrap_socket(\n self.socket, certfile='./ssl/server.pem', server_side=True)\n\n def return_code(self, code):\n self._auth_code_queue.put_nowait(code)\n\n def wait_for_code(self):\n return self._auth_code_queue.get(block=True)\n\n\ndef start_local_server(listen=('', 4443)):\n server = RedirectHTTPServer(listen, RedirectHandler)\n thread = threading.Thread(target=server.serve_forever)\n thread.daemon = True\n thread.start()\n\n return server\n\n\n#####################################\n# ConfigMaster \n#####################################\np = ConfigMaster()\np.setDefaultParams(defaultParams)\np.init(__doc__, allow_extra_parameters=True)\n\n########################################################\n# global constants\n########################################################\nCLIENT_ID = \"f70debeb-31cc-40c0-8d65-d747641428b4\"\nREDIRECT_URI = 'https://auth.globus.org/v2/web/auth-code'\nSCOPES = ('openid email profile '\n 'urn:globus:auth:scope:transfer.api.globus.org:all')\n\n###########################\n# Global for the email\n###########################\nemail_msg = email.message.EmailMessage()\nemail_critical = False\nemail_errors = 0\nemail_warnings = 0\n\n\n########################################################\n# Function definitions\n########################################################\n\ndef stringToLogFunction(s):\n s = s.upper()\n if s == \"WARN\" or s == \"WARNING\":\n return logging.warning\n if s == \"ERROR\":\n return logging.error\n if s == \"DEBUG\":\n return logging.debug\n if s == \"INFO\":\n return logging.info\n if s == \"CRITICAL\":\n return logging.critical\n\ndef safe_mkdirs(d):\n logging.info(f\"making dir: {d}\")\n if not os.path.exists(d):\n os.makedirs(d, 0o700, exist_ok=True)\n\n\ndef run_cmd(cmd, exception_on_error=False):\n '''\n runs a command with blocking\n\n returns a CompletedProcess instance \n - you can get to stdout with .stdout.decode('UTF-8').strip('\\n') \n '''\n logging.debug(f\"running command: {cmd}\")\n\n # I know you shouldn't use shell=True, but splitting up a piped cmd into\n # multiple separate commands is too much work right now.\n # shell=True is also required if using wildcards\n # TODO: https://stackoverflow.com/questions/13332268/how-to-use-subprocess-command-with-pipes\n # https://stackoverflow.com/questions/295459/how-do-i-use-subprocess-popen-to-connect-multiple-processes-by-pipes\n if '|' in cmd or ';' in cmd or '*' in cmd or '?' in cmd:\n cmd_out = subprocess.run(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE,\n encoding='utf-8')\n else:\n splitcmd = shlex.split(cmd)\n cmd_out = subprocess.run(splitcmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE,\n encoding='utf-8')\n\n if cmd_out.returncode != 0:\n log_and_email(f'Command returned non-zero exit status: {cmd_out.returncode}\\n\\tcmd: {cmd}.', logging.warning)\n log_and_email(f'\\tstderr: {cmd_out.stderr}', logging.warning)\n if exception_on_error:\n raise subprocess.CalledProcessError(cmd_out.returncode, cmd)\n\n return cmd_out\n\n\ndef parse_archive_date_time():\n # Set dateTime based on archiveDayDelta\n archive_date_time = datetime.datetime.now() + datetime.timedelta(days=int(p.opt[\"archiveDayDelta\"]))\n\n # If archiveDateTimeString is set, then try to use that to set dateTime\n if p.opt[\"archiveDateTimeString\"]:\n for format in p.opt[\"archiveDateTimeFormats\"]:\n logging.debug(f\"Checking {p.opt['archiveDateTimeString']} for format {format}\")\n try:\n archive_date_time = datetime.datetime.strptime(p.opt[\"archiveDateTimeString\"], format)\n except ValueError:\n continue\n\n return archive_date_time\n\n # if not matched, error and exit\n logging.error(f\"--archiveDateTimeString value ({p.opt['archiveDateTimeString']}) did not match any \"\n f\"--archiveDateTimeFormats items: {p.opt['archiveDateTimeFormats']}\")\n exit(1)\n\n return archive_date_time\n\n\ndef add_tar_groups_info():\n for item_key, item_info in p.opt[\"archiveItems\"].items():\n # for each tar'd item, first assume it is the last/only item in this tar file.\n if item_info.get(\"tarFileName\"):\n item_info[\"last_tar_in_group\"] = True\n item_info[\"tar_group_name\"] = \"\"\n else:\n continue\n\n # Now look at all other archive items and see if they are TARing to the same target\n past_this_item = False\n for item_key2, item_info2 in p.opt[\"archiveItems\"].items():\n if not item_info2.get(\"tarFileName\"):\n continue\n if item_key == item_key2:\n past_this_item = True\n item_info[\"tar_group_name\"] += item_key2\n continue\n if item_info[\"tarFileName\"] == item_info2[\"tarFileName\"]:\n item_info[\"tar_group_name\"] += item_key2\n if past_this_item and item_info[\"tarFileName\"] == item_info2[\"tarFileName\"]:\n item_info[\"last_tar_in_group\"] = False\n\n\n# I don't think we need the item_label anymore\n# def add_item_label():\n# for item, item_info in p.opt[\"archiveItems\"].items():\n# if item_info.get(\"itemLabel\"):\n# item_info[\"item_label\"] = item_info[\"itemLabel\"]\n# else:\n# item_info[\"item_label\"] = item + \"_%Y%m%d\"\n# substitute date/time strings and env variables in item info\n# TODO: do I need to do this?\n# item_info[\"item_label\"] = p.opt[\"archive_date_time\"].strftime(item_info[\"item_label\"])\n# item_info[\"item_label\"] = os.path.expandvars(item_info[\"item_label\"])\n\n\ndef randomword(length):\n letters = string.ascii_lowercase\n return ''.join(random.choice(letters) for i in range(length))\n\n\ndef handle_configuration():\n # TODO: do some error checking for user.\n # i.e. no duplicate keys in archiveItems, what else?\n\n archive_date_time = parse_archive_date_time()\n logging.info(f\"ARCHIVE DATE TIME: {archive_date_time}\")\n\n # I think we can do this. Let's use snake_case vs. CamelCase to distinguish between\n # values we are just storing in p.opt vs. actual config params.\n # NOTE/TODO: I wish I hadn't used snake vs. Camel for this. primarily there is no\n # distinction between the two for a single word. Instead I could have used a leading underscore to distinguish. \n p.opt[\"archive_date_time\"] = archive_date_time\n\n # add_item_label()\n add_tar_groups_info()\n\n # add random subdir to tmp dir\n p.opt[\"tempDir\"] = os.path.join(p.opt[\"tempDir\"], randomword(8))\n\n for item, item_info in p.opt[\"archiveItems\"].items():\n\n # strip trailing slash from source and destination and otherwise normalize\n item_info[\"source\"] = os.path.normpath(item_info[\"source\"])\n item_info[\"destination\"] = os.path.normpath(item_info[\"destination\"])\n if item_info.get(\"cdDirTar\"):\n item_info[\"cdDirTar\"] = os.path.normpath(item_info[\"cdDirTar\"])\n\n # TODO: make this pretty: https://stackoverflow.com/questions/3229419/how-to-pretty-print-nested-dictionaries\n logging.debug(\"After handle_configuration(), configuration looks like this:\")\n logging.debug(f\"{p.opt}\")\n\n\ndef load_tokens_from_file(filepath):\n \"\"\"Load a set of saved tokens.\"\"\"\n logging.info(f\"Attempting load of tokens from {filepath}\")\n with open(filepath, 'r') as f:\n tokens = json.load(f)\n\n return tokens\n\n\ndef save_tokens_to_file(filepath, tokens):\n \"\"\"Save a set of tokens for later use.\"\"\"\n safe_mkdirs(os.path.dirname(filepath))\n logging.info(f\"Attempting save of tokens to {filepath}\")\n with open(filepath, 'w') as f:\n json.dump(tokens, f)\n # TODO: make sure mode is set restrictively on this file\n\n\ndef update_tokens_file_on_refresh(token_response):\n \"\"\"\n Callback function passed into the RefreshTokenAuthorizer.\n Will be invoked any time a new access token is fetched.\n \"\"\"\n save_tokens_to_file(p.opt[\"globusTokenFile\"], token_response.by_resource_server)\n\n\ndef do_native_app_authentication(client_id, redirect_uri,\n requested_scopes=None):\n \"\"\"\n Does a Native App authentication flow and returns a\n dict of tokens keyed by service name.\n \"\"\"\n client = globus_sdk.NativeAppAuthClient(client_id=client_id)\n # pass refresh_tokens=True to request refresh tokens\n client.oauth2_start_flow(requested_scopes=requested_scopes,\n redirect_uri=redirect_uri,\n refresh_tokens=True)\n\n url = client.oauth2_get_authorize_url()\n\n print(f'\\n\\nAuthorization needed. Please visit this URL:\\n{url}\\n')\n\n if not is_remote_session():\n webbrowser.open(url, new=1)\n\n auth_code = input('Enter the auth code: ').strip()\n\n token_response = client.oauth2_exchange_code_for_tokens(auth_code)\n\n # return a set of tokens, organized by resource server name\n return token_response.by_resource_server\n\n\ndef get_transfer_client():\n tokens = None\n try:\n # if we already have tokens, load and use them\n tokens = load_tokens_from_file(p.opt[\"globusTokenFile\"])\n except:\n pass\n\n if not tokens:\n # if we need to get tokens, start the Native App authentication process\n tokens = do_native_app_authentication(CLIENT_ID, REDIRECT_URI, SCOPES)\n\n try:\n save_tokens_to_file(p.opt[\"globusTokenFile\"], tokens)\n except:\n pass\n\n transfer_tokens = tokens['transfer.api.globus.org']\n\n auth_client = globus_sdk.NativeAppAuthClient(client_id=CLIENT_ID)\n\n authorizer = globus_sdk.RefreshTokenAuthorizer(\n transfer_tokens['refresh_token'],\n auth_client,\n access_token=transfer_tokens['access_token'],\n expires_at=transfer_tokens['expires_at_seconds'],\n on_refresh=update_tokens_file_on_refresh)\n\n transfer = globus_sdk.TransferClient(authorizer=authorizer)\n\n myproxy_lifetime = 720 # in hours. What's the maximum?\n try:\n r = transfer.endpoint_autoactivate(p.opt[\"archiveEndPoint\"], if_expires_in=3600)\n while (r[\"code\"] == \"AutoActivationFailed\"):\n print(\"Endpoint requires manual activation, please use your UCAS name/password for this activation. \"\n \"You can activate via the command line or via web browser:\\n\"\n \"WEB BROWSER -- Open the following URL in a browser to activate the \"\n \"endpoint:\")\n print(f\"https://app.globus.org/file-manager?origin_id={p.opt['archiveEndPoint']}\")\n print(\"CMD LINE -- run this from your shell: \")\n print(\n f\"globus endpoint activate --myproxy --myproxy-lifetime {myproxy_lifetime} {p.opt['archiveEndPoint']}\")\n input(\"Press ENTER after activating the endpoint:\")\n r = transfer.endpoint_autoactivate(p.opt[\"archiveEndPoint\"], if_expires_in=3600)\n\n except globus_sdk.exc.GlobusAPIError as ex:\n print(\"endpoint_autoactivation failed.\")\n print(ex)\n if ex.http_status == 401:\n sys.exit('Refresh token has expired. '\n 'Please delete refresh-tokens.json and try again.')\n else:\n raise ex\n return transfer\n\n\ndef do_transfers(transfer):\n local_ep = globus_sdk.LocalGlobusConnectPersonal()\n local_ep_id = local_ep.endpoint_id\n\n p.opt[\"task_label\"] = p.opt[\"archive_date_time\"].strftime(p.opt[\"taskLabel\"])\n\n # p.opt[\"task_label\"] = p.opt[\"task_label\"].decode('utf-8')\n\n logging.info(f\"Creating TransferData object with label '{p.opt['task_label']}'\")\n # logging.info(f\"task_label - {type(p.opt['task_label'])}\")\n\n tdata = globus_sdk.TransferData(transfer, local_ep_id, p.opt[\"archiveEndPoint\"], label=p.opt[\"task_label\"], sync_level=p.opt[\"syncLevel\"])\n # tdata = globus_sdk.TransferData(transfer, local_ep_id, p.opt[\"archiveEndPoint\"])\n\n # keep track of any fatal errors in all items, skip the transfer if are found\n isOK = True\n\n logging.info(\"\\nBEGINNING PROCESSING OF archiveItems\")\n for item, item_info in p.opt[\"archiveItems\"].items():\n logging.info(f\"Starting on {item}\")\n\n # check that skip_underscores is only set if we are staging or TARing\n if item_info.get(\"skipUnderscoreFiles\"):\n if not item_info.get(\"doStaging\") and not item_info.get(\"tarFileName\"):\n log_and_email(f\"skipUnderscoreFiles is True, but not staging or TARing for {item}\", logging.fatal)\n isOK = False\n continue\n\n ii = copy.deepcopy(item_info)\n ii[\"key\"] = item\n logging.verbose(f\"Storing {item} as key\")\n # substitute date/time strings and env variables in item info\n # logging.verbose(f\"ii keys: {ii.keys()}\")\n for ii_key in (\"source\", \"destination\", \"tarFileName\", \"cdDirTar\"):\n if ii.get(ii_key):\n logging.verbose(f\"swapping {ii_key}: {ii[ii_key]}\")\n ii[ii_key] = p.opt[\"archive_date_time\"].strftime(ii[ii_key])\n ii[ii_key] = os.path.expandvars(ii[ii_key])\n logging.verbose(f\"after swap {ii_key}: {ii[ii_key]}\")\n\n # initialize number of files to 0\n ii['num_files'] = 0\n\n add_to_email(f\"\\nSOURCE: {ii['source']}\\n\")\n add_to_email(f\"DESTINATION: {ii['destination']}\\n\")\n\n if \"*\" in ii[\"source\"] or \"?\" in ii[\"source\"]: # Is there a '*' or '?' in the source?\n logging.verbose(f\"Found wildcard in source: {ii['source']}\")\n expanded_sources = glob.glob(ii['source'])\n ii[\"glob\"] = True\n\n if len(expanded_sources) == 0:\n log_and_email(f\"Source expands to zero targets: {ii['source']}. SKIPPING!\", stringToLogFunction(ii.get(\"whenEmpty\",\"ERROR\")))\n continue\n logging.info(f\"{ii['source']} expanded to {len(expanded_sources)} items\") \n\n else:\n ii[\"glob\"] = False\n\n if ii.get(\"glob\") == True:\n # can't handle both dirs and files in a glob\n file_glob = False\n dir_glob = False\n for es in expanded_sources:\n if os.path.isfile(es):\n file_glob = True\n if os.path.isdir(es):\n dir_glob = True\n if file_glob and dir_glob:\n log_and_email(\n f\"glob: {ii['source']} expands to files and dirs. Not allowed. Skipping this archive item.\",\n logging.error)\n continue\n\n if ii.get(\"glob\") == True and not ii.get(\"tarFileName\"):\n for es_ix, es in enumerate(expanded_sources):\n # skip files that start with underscore if set to skip them\n if ii.get(\"skipUnderscoreFiles\") and es.startswith('_'):\n continue\n\n ii[\"source\"] = es\n\n # if not last item\n if es_ix != len(expanded_sources) - 1:\n ii[\"last_glob\"] = False\n else:\n ii[\"last_glob\"] = True\n if not prepare_and_add_transfer(transfer, tdata, ii):\n continue\n\n else:\n if not ii[\"glob\"] and not os.path.exists(ii[\"source\"]):\n log_and_email(f\"{ii['source']} does not exist. Skipping this archive item.\", stringToLogFunction(ii.get(\"whenEmpty\",\"ERROR\")))\n continue\n\n # setting last glob to True for tarring with a glob so expected file size/number is checked\n ii[\"last_glob\"] = True\n if not prepare_and_add_transfer(transfer, tdata, ii):\n continue\n\n # submit all tasks for transfer\n if isOK and p.opt['submitTasks']:\n submit_transfer_task(transfer, tdata)\n\n\ndef prepare_and_add_transfer(transfer, tdata, item_info):\n logging.info(f\"\\nTRANSFER -- {item_info['source']}\")\n try:\n\n if prepare_transfer(item_info):\n # TODO: check_sizes(item_info) -- this is done during prepare, could be refactored to here?\n add_transfer_item(transfer, tdata, item_info)\n return True\n else:\n return False\n\n except Exception as e:\n log_and_email(f\"prepare_transfer raised exception {e}\", logging.error)\n\n\n# recursively creates parents to make path\ndef make_globus_dir(transfer, path):\n logging.debug(f\"Making path: {path} on endpoing via Globus\")\n dest_path = os.path.sep\n for element in path.split(os.path.sep):\n dest_path = os.path.join(dest_path, element)\n try:\n transfer.operation_ls(p.opt[\"archiveEndPoint\"], path=dest_path)\n except globus_sdk.exc.TransferAPIError as e:\n transfer.operation_mkdir(p.opt[\"archiveEndPoint\"], path=dest_path)\n\n\ndef prepare_transfer(ii):\n if not ii[\"source\"].startswith('/'):\n log_and_email(f\"Item source: {ii['source']} must be absolute. SKIPPING!\", logging.error)\n return False\n if not ii[\"destination\"].startswith('/'):\n log_and_email(f\"item destination: {ii['destination']} must be absolute. SKIPPING!\", logging.error)\n return False\n\n # error and skip if cdDirTar is not a subset of source\n if ii.get('cdDirTar') and ii['source'].find(ii['cdDirTar']) == -1:\n log_and_email(f\"source {ii['source']} must contain cdDirTar ({ii['cdDirTar']}. SKIPPING!\",\n logging.error)\n return False\n\n # Don't need this? transfer should automatically make dirs as needed.\n # try:\n # transfer.operation_ls(p.opt[\"archiveEndPoint\"], path=ii[\"destination\"])\n # except globus_sdk.exc.TransferAPIError as e:\n # log_and_email(f\"Destination path ({ii['destination']}) does not exist on archiveEndPoint. SKIPPING!\",\n # logging.error)\n # try:\n # transfer.operation_mkdir(p.opt[\"archiveEndPoint\"], path=ii[\"destination\"]])\n # except \n # \n # return False\n\n if ii.get(\"doStaging\"):\n logging.verbose(f\"Building staging dir from {p.opt['tempDir']} and {ii['key']}\")\n staging_dir = os.path.join(p.opt[\"tempDir\"], f\"Item-{ii['key']}-Staging\")\n logging.debug(f\"Using {staging_dir} for staging.\")\n cmd = f\"mkdir -p {staging_dir}\"\n run_cmd(cmd, exception_on_error=True)\n\n # handle simple case (no cdDirTar)\n if not ii.get('cdDirTar'):\n cmd = f\"cp -r \"\n\n # default removeLinks is True\n if ii.get(\"removeLinks\", True):\n cmd += \"-P \"\n\n cmd += f\"{ii['source']} {staging_dir}\"\n run_cmd(cmd, exception_on_error=True)\n lastDir = os.path.basename(ii['source'])\n ii['source'] = os.path.join(staging_dir, lastDir)\n else:\n # we've got a cdDirTar\n cmd = f\"cp -r --parents \"\n\n # default removeLinks is True\n if ii.get(\"removeLinks\", True):\n cmd += \"-P \"\n\n cmd += f\"{ii['source']} {staging_dir}\"\n\n run_cmd(cmd, exception_on_error=True)\n ii[\"cdDirTar\"] = os.path.join(staging_dir, ii[\"cdDirTar\"].lstrip(os.sep))\n ii['source'] = os.path.join(staging_dir, ii[\"source\"].lstrip(os.sep))\n logging.debug(f\"After staging, cdDirTar has been changed to {ii['cdDirTar']}\")\n\n logging.debug(f\"After staging, source has been changed to {ii['source']}\")\n\n # sometimes \"cp -P\" doesn't seem to be working to remove links, so let's remove them again, just in case\n if ii.get(\"removeLinks\", True):\n cmd = f\"find {ii['source']} -depth -type l -delete\"\n run_cmd(cmd, exception_on_error=True)\n\n if ii.get(\"doZip\"):\n\n # if source had a glob and is being TAR'd then it doesn't\n # get expanded. In that case, isdir and isfile will return\n # false, but we should instead expand the glob to check for files and dirs\n\n # if there is no glob, then this expands to the same thing\n expanded_sources = glob.glob(ii['source'])\n\n logging.debug(f\"original source: {ii['source']}\")\n #logging.debug(f\"expanded sources: {expanded_sources}\")\n logging.debug(f\"Glob expanded to {len(expanded_sources)} items. First item: {expanded_sources[0]}, last item: {expanded_sources[-1]}\") \n\n # already checked to only allow expansion to either files or dirs, so we are safe to just check first expansion.\n source_is_dir = os.path.isdir(expanded_sources[0])\n source_is_file = os.path.isfile(expanded_sources[0])\n\n cmd = \"yes n | gzip \" # need to pipe a 'n', because gzip is getting stuck asking \"already exists; do you wish to overwrite (y or n)? \"\n\n if source_is_dir:\n cmd += \"-r \"\n cmd += \"-S .gz \"; # force .gz suffix in case of differing gzip version\n cmd += ii['source'];\n logging.debug(f\"ZIPing file via cmd: {cmd}\")\n\n gzip_success = True\n # gzip returns an warning status if the file already exists (e.g. metars.txt and metars.txt.gz).\n # We don't want GlobusArchiver.py to fail if this happens, so only fail if it had an error (warning or success ok) (was getting inconsitent returncode, so checking stderr directly now)\n cmd_out = run_cmd(cmd)\n if cmd_out.returncode != 0 and \"unchanged\" in cmd_out.stderr:\n logging.debug(\"Skipping gzip of {ii['source']} -- already gzipped.\")\n gzip_success = False\n \n elif cmd_out.returncode != 0 and \"Argument list too long\" in cmd_out.stderr:\n # this can fail if the cmd line is too long, in which case, let's expand the glob in python and call gzip on each item separately.\n logging.info(\"gzip failed - attempting globbed version\")\n for es in expanded_sources:\n\n cmd = \"yes n | gzip \" # need to pipe a 'n', because gzip is getting stuck asking \"already exists; do you wish to overwrite (y or n)? \"\n if source_is_dir:\n cmd += \"-r \"\n cmd += \"-S .gz \" # force .gz suffix in case of differing gzip version\n cmd += es\n logging.debug(f\"ZIPing file via cmd: {cmd}\")\n\n # gzip returns an warning status if the file already exists (e.g. metars.txt and metars.txt.gz).\n # We don't want GlobusArchiver.py to fail if this happens, so only fail if it had an error (warning or success ok)\n\n cmd_out = run_cmd(cmd)\n if cmd_out.returncode != 0 and \"unchanged\" not in cmd_out.stderr:\n logging.debug(\"Skipping gz of {es} - already gzipped.\")\n gzip_success = False\n \n \n elif cmd_out.returncode != 0:\n logging.warn(\"gzip failed. Continuing this archive item without gzip.\")\n gzip_success = False\n\n \n\n # if source is a single file (or glob that expands to files) we need to add .gz to the end\n if source_is_file and gzip_success:\n ii['source'] += \".gz\"\n\n if ii.get(\"tarFileName\"):\n # check if input is empty directory and skip if so\n if os.path.isdir(ii['source']) and not os.listdir(ii['source']):\n log_and_email(f\"Source directory is empty: {ii['source']}. SKIPPING!\",\n logging.error)\n return False\n tar_dir = os.path.join(p.opt[\"tempDir\"], f\"Item-{ii['tar_group_name']}-Tar\")\n safe_mkdirs(tar_dir)\n tar_path = os.path.join(tar_dir, ii[\"tarFileName\"])\n\n # if cdDirTar is set, cd into that directory and create the tarball using the\n # relative path to source from cdDirTar. If source and cdDirTar are the same, use *\n if ii.get(\"cdDirTar\"):\n cmd = f\"cd {ii['cdDirTar']}; tar rf {tar_path} \"\n relative_path = ii['source'].replace(ii['cdDirTar'], '').lstrip(os.path.sep)\n if relative_path == '':\n relative_path = '*'\n cmd += relative_path\n else:\n cmd = f\"tar rf {tar_path} {ii['source']}\"\n\n if ii.get(\"skipUnderscoreFiles\"):\n cmd += \" --exclude \\\"_*\\\"\"\n cmd_out = run_cmd(cmd)\n\n # If this failed, it's probably because the glob expanded into too long of a cmd line, so\n # we need to handle each file individually. This is slower than letting tar handle multiple\n # files at once, so that is why we first attempt letting the shell expand the glob.\n if cmd_out.returncode != 0 and \"Argument list too long\" in cmd_out.stderr:\n logging.info(\"tar failed - attempting globbed version\")\n source = ii['source']\n\n if ii.get(\"cdDirTar\"):\n relative_path = ii['source'].replace(ii['cdDirTar'], '').lstrip(os.path.sep)\n if relative_path == '':\n relative_path = '*'\n os.chdir(ii.get(\"cdDirTar\"))\n source = relative_path\n\n # if there is no glob, then this expands to the same thing, but we are probably not here if there is no glob\n logging.debug(\"attempting to glob: {source}\")\n expanded_sources = glob.glob(source)\n logging.debug(f\"Glob expanded to {len(expanded_sources)} items.\")\n try:\n logging.debug(f\"First item: {expanded_sources[0]}, last item: {expanded_sources[-1]}\")\n except:\n logging.warn(f\"no items in expanded_source\")\n for es in expanded_sources:\n\n # if cdDirTar is set, cd into that directory and create the tarball using the\n # relative path to source from cdDirTar. If source and cdDirTar are the same, use *\n if ii.get(\"cdDirTar\"):\n cmd = f\"cd {ii['cdDirTar']}; tar rf {tar_path} \"\n relative_path = es.replace(ii['cdDirTar'], '').lstrip(os.path.sep)\n if relative_path == '':\n relative_path = '*'\n cmd += relative_path\n else:\n cmd = f\"tar rf {tar_path} {es}\"\n\n if ii.get(\"skipUnderscoreFiles\"):\n cmd += \" --exclude \\\"_*\\\"\"\n\n cmd_out = run_cmd(cmd)\n if cmd_out.returncode != 0:\n return False\n\n # created the tar file, so now set the source to the tar file \n ii[\"source\"] = os.path.join(tar_dir, ii[\"tarFileName\"])\n\n cmd = f\"tar tf {ii['source']} | wc -l\"\n\n cmd_out = run_cmd(cmd)\n if cmd_out.returncode != 0:\n return False\n\n # logging.verbose(f\"got output: {cmd_out.stdout}\")\n ii[\"num_files\"] = int(cmd_out.stdout)\n else:\n # if source is a directory, list the number of files inside\n # otherwise just increment number of files\n if os.path.isdir(ii[\"source\"]):\n ii[\"num_files\"] = len(os.listdir(ii[\"source\"]))\n else:\n ii[\"num_files\"] += 1\n\n # if not ii[\"glob\"] or ii.get(\"tarFileName\"):\n # ii[\"file_size\"] = os.path.getsize(ii[\"source\"])\n\n if ii.get(\"expectedFileSize\") and (not ii[\"glob\"] or ii.get(\"last_glob\")):\n if ii.get(\"file_size\"):\n if ii[\"file_size\"] < ii[\"expectedFileSize\"]:\n log_and_email(\n f\"file_size < expectedFileSize: {ii['file_size']} < {ii['expectedFileSize']})\",\n logging.warning)\n else:\n log_and_email(\n f\"expectedFileSize given, but file_size not calculated\", logging.warning)\n\n if ii.get(\"expectedNumFiles\") and (not ii[\"glob\"] or ii.get(\"last_glob\")):\n if ii.get(\"num_files\"):\n if ii[\"num_files\"] < ii[\"expectedNumFiles\"]:\n log_and_email(\n f\"Item has {ii['num_files']} files but expects {ii['expectedNumFiles']} files!\",\n logging.warning)\n else:\n logging.verbose(f\"Number of files ({ii['num_files']}) is equal to or greater than \"\n f\"expectedNumFiles ({ii['expectedNumFiles']})\")\n else:\n # this should never happen\n log_and_email(\n f\"expectedNumFiles given, but num_files not calculated\", logging.warning)\n return True\n\n\ndef add_to_email(email_str):\n global email_msg\n email_msg.set_content(email_msg.get_content() + email_str)\n\n\ndef log_and_email(msg_str, logfunc):\n # uses global email_msg\n global email_critical\n global email_errors\n global email_warnings\n\n # add to error/warning counter to modify email subject\n if logfunc == logging.critical:\n email_critical = True\n elif logfunc == logging.error:\n email_errors = email_errors + 1\n elif logfunc == logging.warning:\n email_warnings = email_warnings + 1\n\n logfunc(msg_str)\n add_to_email(logfunc.__name__.upper() + \": \" + msg_str)\n\n\ndef add_transfer_item(transfer, tdata, ii):\n logging.debug(\"Entering add_transfer_item\")\n logging.debug(f\"Adding: {ii}\")\n logging.verbose(f\"tdata = {tdata}\")\n # get leaf dir from source, and add it to destination\n # if cdDir is set and not tarring data, set leaf\n # to source with cdDir stripped off to get any subdirectories\n # if ii.get(\"cdDir\") and not ii.get(\"tarFileName\"):\n # leaf = ii['source'].replace(ii['cdDir'], '').lstrip(os.path.sep)\n # else:\n # leaf = os.path.basename(ii['source'].rstrip(os.path.sep))\n\n # if we are not TARing, then we will send the leaf of the source up to the destination\n if not ii.get(\"tarFileName\"):\n leaf = os.path.basename(ii['source'].rstrip(os.path.sep))\n destination = os.path.join(ii['destination'], leaf)\n else:\n destination = os.path.join(ii['destination'], ii[\"tarFileName\"])\n # destination = ii['destination']\n logging.debug(f\"Using destination: {destination}\")\n\n # make_globus_dir(transfer, destination)\n\n # Check if destination_dir already exists, and skip if so\n # TODO: add support to overwrite?\n # try:\n # transfer.operation_ls(p.opt[\"archiveEndPoint\"], path=destination)\n # log_and_email(f\"Destination {destination} already exists on archiveEndPoint. SKIPPING!\", logging.error)\n # return\n # except globus_sdk.exc.TransferAPIError as e:\n # if e.code != u'ClientError.NotFound':\n # log_and_email(f\"Can't ls {p.opt['archiveEndPoint']} : {destination}\", logging.fatal)\n # logging.fatal(e)\n # return\n\n # create destination directory\n # try:\n # logging.info(f\"Creating destination directory {destination}\")\n # transfer.operation_mkdir(p.opt[\"archiveEndPoint\"], destination)\n # except globus_sdk.exc.TransferAPIError as e:\n # log_and_email(f\"Can't mkdir {p.opt['archiveEndPoint']} : {destination}\", logging.fatal)\n # logging.fatal(e)\n # return\n\n # TODO: set permissions for users to read dir\n # look at https://github.com/globus/automation-examples/blob/master/share_data.py\n\n # print(\"Looking at local end point\")\n # for entry in transfer.operation_ls(local_ep_id):\n # print(f\"Local file: {entry['name']}\")\n\n logging.debug(f\"source: {ii['source']} isdir: {os.path.isdir(ii['source'])} tfn: {ii.get('tarFileName')}\")\n if os.path.isdir(ii['source']): # and not ii.get(\"tarFileName\"):\n tdata.add_item(ii['source'], destination, recursive=True)\n else:\n tdata.add_item(ii['source'], destination)\n logging.debug(f\"Adding TransferData item: {ii['source']} -> {destination}\")\n\n\ndef check_task_for_success(transfer, task_id):\n logging.debug(\"Waiting for transfer to complete...\")\n\n timeoutFull = p.opt['transferStatusTimeout']\n\n # timeoutInterval - If there is an error, it will take this long before we give up.\n timeoutInterval = 1 * 60 # seconds\n\n # pollingInterval - Once the task has completed, it will take up to this long before we realize it.\n # - You also get several lines in your log at this interval while your task is in progress.\n pollingInterval = 1 * 60 # seconds\n\n # condition intervals\n pollingInterval = min(timeoutInterval, pollingInterval)\n timeoutInterval = min(timeoutInterval, timeoutFull)\n\n timeoutCounter = 0\n\n # wait for task to report that it completed or it timed out\n # if any event is still in progress, keep waiting\n # if no events are still in progress and there are errors\n # then cancel the transfer to stop it from retrying\n hasErrors = False\n\n numErrors = 0\n\n while numErrors < p.opt[\"maxGlobusErrors\"] and timeoutCounter < timeoutFull and not transfer.task_wait(task_id, timeout=timeoutInterval,\n polling_interval=pollingInterval):\n hasErrors = False\n\n # get any errors in the event list, but don't give up, because Globus usually recovers.\n\n # filter option is passed via query_params in sdk >= 2.X\n if int(globus_sdk.__version__[0]) >= 2:\n query_param_dict = {\"filter\": \"is_error:1\"}\n events = transfer.task_event_list(task_id=task_id, query_params=query_param_dict)\n else:\n events = transfer.task_event_list(task_id, filter=[\"is_error:1\"])\n\n for event in events:\n log_and_email(f\"Task Event indicates an error: {event['details']}.\\n\", logging.warning)\n numErrors += 1\n\n\n # check all events for in progress or error status\n # for event in transfer.task_event_list(task_id, filter=[\"is_error:1\"]):\n # print(f\"retrieved event: {event}\")\n # if event['is_error']:\n # hasErrors = True\n # if True in [event['is_error'] for event in transfer.task_event_list(task_id)]:\n # hasErrors = True\n\n timeoutCounter += timeoutInterval\n\n if numErrors >= p.opt[\"maxGlobusErrors\"]:\n transfer.cancel_task(task_id)\n log_and_email(f\"Transfer reached max errors ({numErrors}) and was cancelled.\", logging.critical)\n elif timeoutCounter >= timeoutFull:\n transfer.cancel_task(task_id)\n log_and_email(f\"Transfer timed out after {timeoutFull} seconds and was cancelled.\", logging.critical)\n else:\n log_and_email(f\"Transfer completed successfully.\", logging.info)\n\n\ndef submit_transfer_task(transfer, tdata):\n try:\n logging.info(f\"Submitting transfer task - {tdata}\")\n task = transfer.submit_transfer(tdata)\n except globus_sdk.exc.TransferAPIError as e:\n log_and_email(\"Transfer task submission failed\", logging.critical)\n logging.critical(e)\n return\n\n add_to_email(\"\\n\")\n log_and_email(f\"Task ID: {task['task_id']}\", logging.info)\n log_and_email(f\"This transfer can be monitored via the Web UI: https://app.globus.org/activity/{task['task_id']}\",\n logging.info)\n\n check_task_for_success(transfer, task['task_id'])\n\n\ndef prepare_email_msg():\n email_msg['From'] = email.headerregistry.Address(*p.opt[\"fromEmail\"])\n\n to = ()\n for em in p.opt[\"emailAddresses\"]:\n to += (email.headerregistry.Address(*em),)\n email_msg['To'] = to\n\n email_msg.set_content(f\"This is a msg from GlobusArchiver.py.\\n\")\n\n\ndef set_email_msg_subject():\n # set subject text based on user specifications\n err_str = ''\n\n if email_critical:\n err_str += 'FAILURE'\n elif email_errors == 0 and email_warnings == 0:\n err_str += 'NO PROBLEMS'\n elif email_errors > 0 and email_warnings > 0:\n err_str += f'{email_errors} ERRORS & {email_warnings} WARNINGS'\n elif email_errors:\n err_str += f'{email_errors} ERRORS'\n elif email_warnings:\n err_str += f'{email_warnings} WARNINGS'\n\n subject_format = {}\n subject_format['errors'] = err_str\n subject_format['archiveDate'] = p.opt[\"archive_date_time\"].strftime(p.opt['emailSubjectDateFormat'])\n subject_format['host'] = socket.gethostname()\n subject_format['configFile'] = os.path.basename(p.getConfigFilePath())\n\n subject = p.opt['emailSubjectFormat'].format(**subject_format)\n email_msg['Subject'] = subject\n\n\ndef send_email_msg():\n logging.info(f\"Sending email to {email_msg['To']}\")\n logging.debug(f\"BODY: {email_msg.get_body()}\")\n\n with smtplib.SMTP('localhost') as s:\n s.send_message(email_msg)\n\n\ndef main():\n logging.info(f\"Starting {os.path.basename(__file__)} v{version_info[0]}.{version_info[1]}\")\n if len(sys.argv) == 1:\n logging.info('You must supply command line arguments to run GlobusArchiver.py')\n p.parser.print_help()\n exit(0)\n\n # pp = pprint.PrettyPrinter()\n logging.info(f\"Read this configuration:\")\n for line in p.getParamsString().splitlines():\n # logging.info(pp.pformat(line))\n logging.info(f\"{line}\")\n\n handle_configuration()\n prepare_email_msg()\n\n logging.debug(f\"Using this configuration (after transformation):\")\n for line in p.getParamsString().splitlines():\n logging.debug(f\"\\t{line}\")\n\n transfer_client = get_transfer_client()\n do_transfers(transfer_client)\n\n set_email_msg_subject()\n send_email_msg()\n\n if p.opt[\"cleanTemp\"] and os.path.isdir(p.opt['tempDir']):\n logging.info(f\"removing temp directory tree : {p.opt['tempDir']}\")\n shutil.rmtree(p.opt[\"tempDir\"])\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"GlobusArchiver.py","file_name":"GlobusArchiver.py","file_ext":"py","file_size_in_byte":47273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"556154638","text":"import json\nimport os\nimport time\n\nimport pytest\n\nfrom dent_os_testbed.lib.interfaces.interface import Interface\nfrom dent_os_testbed.lib.os.service import Service\nfrom dent_os_testbed.utils.test_utils.tb_utils import (\n tb_device_check_health,\n tb_flap_links,\n tb_get_all_devices,\n tb_reset_ssh_connections,\n)\n\npytestmark = pytest.mark.suite_system_wide_testing\n\nTRIGGER_FLAP_LINK = \"FLAP_LINK\"\nTRIGGER_RESTART_SERVICES = \"RESTART_SERVICES\"\nTRIGGER_IFRELOAD = \"IFRELOAD\"\n\n\nasync def do_trigger(testbed, trigger_obj):\n trigger = trigger_obj[0]\n device = trigger_obj[1].pop(0)\n if trigger == TRIGGER_FLAP_LINK:\n device.applog.info(f\"Triggering Port Flap in {device.host_name}\")\n await tb_flap_links(device, \"swp\")\n elif trigger == TRIGGER_RESTART_SERVICES:\n services = [\n # \"frr.service\",\n \"IhmDentTcFlower.service\",\n # \"networking\",\n ]\n for s in services:\n input_data = [{device.host_name: [{\"name\": s}]}]\n out = await Service.show(\n input_data=input_data,\n )\n if out[0][device.host_name][\"rc\"]:\n device.applog.info(f\"{s} not running on {device.host_name}\")\n continue\n out = await Service.restart(\n input_data=input_data,\n )\n assert out[0][device.host_name][\"rc\"] == 0, f\"Failed to restart the service {s} {out}\"\n device.applog.info(\"zzZZZ(60s)\")\n time.sleep(60)\n out = await Service.show(\n input_data=input_data,\n )\n assert (\n out[0][device.host_name][\"rc\"] == 0\n ), f\" service didnt come up {s} {out} on {device.host_name}\"\n elif trigger == TRIGGER_IFRELOAD:\n out = await Interface.reload(input_data=[{device.host_name: [{\"options\": \"-a\"}]}])\n assert out[0][device.host_name][\"rc\"] == 0, f\"Failed to ifreload -a \"\n device.applog.info(out)\n else:\n device.applog.info(f\"unknown trigger {trigger} on {device.host_name}\")\n # put the device back to the end\n trigger_obj[1].append(device)\n trigger_obj[2] += 1\n\n\n@pytest.mark.asyncio\nasync def test_system_wide_restart_and_service_reloads(testbed):\n \"\"\"\n - on each device\n - do the following trigger\n - reboot\n - restaert networking\n -\n \"\"\"\n devices = []\n prev_dut_state = {}\n for dev in await tb_get_all_devices(testbed):\n if dev.os != \"dentos\":\n testbed.applog.info(f\"Skipping {dev.host_name} since its {dev.os}\")\n continue\n prev_dut_state[dev.host_name] = await tb_device_check_health(dev, None, False)\n devices.append(dev)\n\n # after traffic is stopped\n trigger_types = [\n # TRIGGER_FLAP_LINK,\n TRIGGER_RESTART_SERVICES,\n TRIGGER_IFRELOAD,\n ]\n triggers = []\n for trigger in trigger_types:\n t = [trigger, [], 1]\n for dev in devices:\n t[1].append(dev)\n triggers.append(t)\n\n count = 25\n while count:\n \"\"\"\n - For each triggers test the traffic is working or not.\n \"\"\"\n # check the system\n\n # analyze logs\n trigger = triggers.pop(0)\n await do_trigger(testbed, trigger)\n testbed.applog.info(f\"zzzZZZ Iteration {count} next trigger {triggers[0]}\")\n time.sleep(60)\n # check the system state\n await tb_reset_ssh_connections(devices)\n for device in devices:\n # disconnect and try now\n await tb_device_check_health(device, prev_dut_state[device.host_name], True)\n triggers.append(trigger)\n count -= 1\n","sub_path":"Amazon_Framework/DentOsTestbed/src/dent_os_testbed/test/test_suite/sanity/test_restart_networking.py","file_name":"test_restart_networking.py","file_ext":"py","file_size_in_byte":3680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"394619652","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.8 (3413)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: \\\\ad.uit.no\\uit\\data\\esi000data\\dokumenter\\forskning\\papers\\paneltime\\paneltime\\paneltime\\gui\\gui_data_objects.py\n# Compiled at: 2020-01-14 06:02:57\n# Size of source mod 2**32: 7564 bytes\nimport tkinter as tk\nfrom tkinter import ttk\nfrom gui import gui_charts\nimport time\nfrom gui import gui_scrolltext\nNON_NUMERIC_TAG = '|~|'\nfont = 'Arial 9 '\ntags = dict()\ntags['dependent'] = {'fg':'#025714', 'bg':'#e6eaf0', 'font':font + 'bold', 'short':'Y'}\ntags['independent'] = {'fg':'#053480', 'bg':'#e6eaf0', 'font':font + 'bold', 'short':'X'}\ntags['time variable'] = {'fg':'#690580', 'bg':'#e6eaf0', 'font':font + 'bold', 'short':'T'}\ntags['id variable'] = {'fg':'#910101', 'bg':'#e6eaf0', 'font':font + 'bold', 'short':'ID'}\ntags['het.sc._factors'] = {'fg':'#029ea3', 'bg':'#e6eaf0', 'font':font + 'bold', 'short':'HF'}\nunselected = {'fg':'black', 'bg':'white', 'font':font, 'short':''}\ntags_list = [\n 'dependent', 'independent', 'time variable', 'id variable', 'het.sc._factors']\n\nclass data_objects(ttk.Treeview):\n\n def __init__(self, tabs, window):\n s = ttk.Style()\n s.configure('new.TFrame', background='white', font=font)\n self.tabs = tabs\n self.win = window\n self.main_frame = tk.Frame(tabs)\n self.canvas = tk.Canvas(self.main_frame)\n ttk.Treeview.__init__(self, (self.canvas), style='new.TFrame')\n self.data_frames = datasets()\n self.level__dicts = [dict(), dict(), dict()]\n yscrollbar = ttk.Scrollbar((self.canvas), orient='vertical', command=(self.yview))\n self.configure(yscrollcommand=(yscrollbar.set))\n xscrollbar = ttk.Scrollbar((self.canvas), orient='horizontal', command=(self.xview))\n self.configure(xscrollcommand=(xscrollbar.set))\n self.gridding(xscrollbar, yscrollbar)\n self.tree_construction()\n self.binding()\n self.tabs.add((self.main_frame), text='data frames')\n self.tabs.grid(row=0, column=0, sticky=(tk.NSEW))\n self.script = ''\n\n def binding(self):\n self.bind('', self.tree_double_click)\n self.bind('<>', self.tree_click)\n self.bind('', self.key_down)\n self.bind('', self.key_up)\n\n def tree_construction(self):\n self['columns'] = ('one', 'two')\n self.column('#0', stretch=(tk.YES))\n self.column('one', width=15, stretch=(tk.YES))\n self.column('two', width=75, stretch=(tk.YES))\n self.heading('#0', text='Name', anchor=(tk.W))\n self.heading('one', text='', anchor=(tk.W))\n self.heading('two', text='type', anchor=(tk.W))\n self.alt_time = time.perf_counter()\n for k in tags_list:\n tag_configure(self, k, tags[k])\n else:\n self.tree = dict()\n\n def gridding(self, xscrollbar, yscrollbar):\n self.rowconfigure(0, weight=1)\n self.columnconfigure(0, weight=1)\n self.tabs.rowconfigure(0, weight=1)\n self.tabs.columnconfigure(0, weight=1)\n self.main_frame.rowconfigure(0, weight=1)\n self.main_frame.columnconfigure(0, weight=1)\n self.canvas.rowconfigure(0, weight=1)\n self.canvas.columnconfigure(0, weight=1)\n self.main_frame.grid(row=0, column=0, sticky=(tk.NSEW))\n self.canvas.grid(row=0, column=0, sticky=(tk.NSEW))\n xscrollbar.grid(row=1, column=0, sticky='ew')\n yscrollbar.grid(row=0, column=1, sticky='ns')\n self.grid(row=0, column=0, sticky=(tk.NSEW))\n\n def key_down(self, event):\n if event.keysym == 'Alt_L' or event.keysym == 'Alt_R':\n self.configure(cursor='target')\n self.alt_time = time.perf_counter()\n\n def key_up(self, event):\n if event.keysym == 'Alt_L' or event.keysym == 'Alt_R':\n self.configure(cursor='arrow')\n\n def tree_double_click(self, event):\n item = self.selection()[0]\n item = self.item(item)['text']\n self.win.main_tabs.insert_current_editor(item)\n\n def tree_click(self, event):\n item = self.selection()\n if len(item) == 0:\n return\n else:\n item = item[0]\n levels = item.split(';')\n if levels[1] == '':\n return\n if len(levels) == 3:\n self.var_defined(levels)\n elif len(levels) == 2:\n self.var_clicked(levels, item)\n self.script = self.get_script()\n self.win.insert_script()\n\n def var_clicked(self, levels, item):\n i, j = levels\n item_obj = self.item(item)\n short, vtype = item_obj['values']\n t = self.tag_configure(item)\n if item_obj['open']:\n if t['font'] != unselected['font']:\n tag_configure(self, item, unselected, ('', vtype))\n else:\n self.close_all()\n elif time.perf_counter() - self.alt_time < 0.1:\n if short == '':\n tag_configure(self, item, tags['independent'], ('X', vtype))\n else:\n tag_configure(self, item, unselected, ('', vtype))\n else:\n self.close_all()\n self.item(item, open=True)\n\n def var_defined(self, levels):\n parent_itm = ';'.join(levels[:-1])\n fname, j, k = levels\n short, vtype = self.item(parent_itm)['values']\n s = tags[k]['short']\n if s == 'Y' or s == 'T' or s == 'ID':\n for i in self.tree[fname]:\n short_i, vtype_i = self.item(i)['values']\n if s == short_i:\n tag_configure(self, i, unselected, ('', vtype_i))\n\n tag_configure(self, parent_itm, tags[k])\n self.item(parent_itm, values=(tags[k]['short'], vtype))\n self.item(parent_itm, open=False)\n\n def update_editor(self):\n tb = self.win.main_tabs.current_editor(True)\n n = len(tb.get('1.0', 'end-1c'))\n\n def get_script(self):\n item = self.selection()\n if len(item) == 0:\n return\n item = item[0]\n levels = item.split(';')\n X = []\n d = dict()\n fname = levels[0]\n d['Y'], d['X'], d['ID'], d['T'] = ('', '', '', '')\n for i in self.tree[fname]:\n fname, j = i.split(';')\n if self.item(i)['values'][0] == 'X':\n X.append(j)\n else:\n d[self.item(i)['values'][0]] = f\"{j}\"\n else:\n args = [\n f\"'{d['Y']}~{'+'.join(X)}'\\n\\t\", f\"df['{fname}']\"]\n for i in ('ID', 'T'):\n if d[i] != '':\n args.append(f\"{i}='{d[i]}'\")\n mod_str = f\"pt.execute({','.join(args)})\"\n return mod_str\n\n def close_all(self):\n for i in self.tree:\n for j in self.tree[i]:\n self.item(j, open=False)\n\n def get_selected_df(self):\n item = self.selection()\n if len(item) == 0:\n raise RuntimeError('No data frame dictionary is selected in the right pane, or data has not been imported')\n itm = item[0].split(';')[0]\n return self.data_frames.dict[itm]\n\n def add_df_to_tree(self, df, fname):\n try:\n self.insert('', 1, f\"{fname};\", text=fname)\n except tk.TclError:\n self.delete(f\"{fname};\")\n self.insert('', 1, f\"{fname};\", text=fname)\n else:\n self.add_node(df, fname)\n self.tabs.select(self.main_frame)\n self.item(f\"{fname};\", open=True)\n self.selection_add(f\"{fname};\")\n\n def add_node(self, df, fname):\n a = []\n self.tree[fname] = a\n for j in df:\n nptype = np_type(j, df)\n if nptype != 'na':\n self.insert(f\"{fname};\", 2, f\"{fname};{j}\", text=j, values=('', nptype), tags=(f\"{fname};{j}\",))\n a.append(f\"{fname};{j}\")\n for k in tags_list:\n self.insert(f\"{fname};{j}\", 3, f\"{fname};{j};{k}\", values=('', tags[k]['short']), text=k, tags=(k,))\n\n\ndef np_type(name, df):\n x = df[name]\n if NON_NUMERIC_TAG in name or name == 'ones':\n return 'na'\n non_num = name + NON_NUMERIC_TAG\n if non_num in df:\n x = df[non_num]\n nptype = 'na'\n t = str(type(x)).replace(' ', '')[7:][:-2]\n if t.split('.')[0] == 'numpy':\n nptype = str(x.dtype)\n return nptype\n\n\ndef tag_configure(tree, name, d, value=None):\n tree.tag_configure(name, foreground=(d['fg']))\n tree.tag_configure(name, background=(d['bg']))\n tree.tag_configure(name, font=(d['font']))\n if value is not None:\n tree.item(name, value=value)\n\n\nclass dataset:\n\n def __init__(self, data_frame, source, script):\n self.data_frame = data_frame\n self.source = source\n self.script = script\n\n\nclass datasets:\n\n def __init__(self):\n self.dict = dict()\n\n def add(self, name, data_frame, source, script):\n self.dict[name] = dataset(data_frame, source, script)","sub_path":"pycfiles/paneltime-1.1.12-cp37-cp37m-win_amd64/gui_data_objects.cpython-38.py","file_name":"gui_data_objects.cpython-38.py","file_ext":"py","file_size_in_byte":9108,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"525102933","text":"#! /usr/bin/python\n# -*- coding: utf-8 -*- \n\n'''\ntemp sample sim user responce data\nbased on a slot_prob_config\n'''\n\n\nimport os, sys, codecs\nimport argparse, json\nfrom random import random\n\n\n\ndef main():\n\tparser = argparse.ArgumentParser(description='sample sim user response data.')\n\n\tparser.add_argument('sim_user_data',\n\t\t\t\t\t\thelp='The sim user asr data.')\n\tparser.add_argument('slot_prob_config',\n\t\t\t\t\t\thelp='The slot prob config file.')\n\tparser.add_argument('sample_num', type=int,\n\t\t\t\t\t\thelp='Sample numbers for each song.')\n\tparser.add_argument('output',\n\t\t\t\t\t\thelp='Output, json format file')\n\targs = parser.parse_args()\n\n\tinput=codecs.open(args.sim_user_data, 'r', 'utf-8')\n\tsim_user_data = json.load(input)\n\tinput.close()\n\n\tinput=file(args.slot_prob_config)\n\tslot_prob_dict = json.load(input)\n\tinput.close()\n\n\tsim_user_res_data = {}\n\n\tfor song in sim_user_data:\n\t\ts_id = song['song1sid']\n\t\tif s_id not in sim_user_res_data:\n\t\t\tsim_user_res_data[s_id] = {}\n\t\tfor i in range(args.sample_num):\n\t\t\tunknown_slots_vec = []\n\t\t\tfor attr in song['song3attributes']:\n\t\t\t\tslot = attr['attr1name']\n\t\t\t\tif random() > slot_prob_dict[slot]:\n\t\t\t\t\tunknown_slots_vec.append(slot)\n\t\t\tunknown_slots_key = str(set(unknown_slots_vec))\n\t\t\tif unknown_slots_key not in sim_user_res_data[s_id]:\n\t\t\t\tsim_user_res_data[s_id][unknown_slots_key] = 1\n\t\t\telse:\n\t\t\t\tsim_user_res_data[s_id][unknown_slots_key] += 1\n\n\toutput=codecs.open(args.output, 'w', 'utf-8')\n\tjson.dump(sim_user_res_data, output, indent=4, ensure_ascii=False)\n\toutput.close()\n\n\t\n\nif __name__ == '__main__':\n\tmain()","sub_path":"src/TempSampleSimUser.py","file_name":"TempSampleSimUser.py","file_ext":"py","file_size_in_byte":1561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"596364499","text":"import binascii\nfrom io import BytesIO\nfrom .serializers import *\nfrom pycoin.block import Block\nfrom .exceptions import NodeDisconnectException, InvalidMessageChecksum\nimport os\nimport codecs\n\nclass ProtocolBuffer (object):\n\tdef __init__ (self):\n\t\tself.buffer = BytesIO()\n\t\tself.header_size = MessageHeaderSerializer.calcsize()\n\t\t#self.socket = socket\n\n\tdef write (self, data):\n\t\tself.buffer.write(data)\n\n\tdef receive_message (self):\n\t\t\"\"\"This method will attempt to extract a header and message.\n\t\tIt will return a tuple of (header, message) and set whichever\n\t\tcan be set so far (None otherwise).\n\t\t\"\"\"\n\t\t# Calculate the size of the buffer\n\t\tself.buffer.seek(0, os.SEEK_END)\n\t\tbuffer_size = self.buffer.tell()\n\n\t\t# Check if a complete header is present\n\t\tif buffer_size < self.header_size:\n\t\t\treturn (None, None)\n\n\t\t# Go to the beginning of the buffer\n\t\tself.buffer.seek(0)\n\n\t\tmessage_model = None\n\t\tmessage_header_serial = MessageHeaderSerializer()\n\t\tmessage_header = message_header_serial.deserialize(self.buffer)\n\t\ttotal_length = self.header_size + message_header.length\n\n\t\t# Incomplete message\n\t\tif buffer_size < total_length:\n\t\t\tself.buffer.seek(0, os.SEEK_END)\n\t\t\treturn (message_header, None)\n\n\t\tpayload = self.buffer.read(message_header.length)\n\t\t#print (codecs.encode (payload, 'hex'))\n\t\tremaining = self.buffer.read()\n\t\tself.buffer = BytesIO()\n\t\tself.buffer.write(remaining)\n\t\tpayload_checksum = MessageHeaderSerializer.calc_checksum(payload)\n\n\t\t# Check if the checksum is valid\n\t\tif payload_checksum != message_header.checksum:\n\t\t\tmsg = \"Bad checksum for command %s\" % message_header.command\n\t\t\traise InvalidMessageChecksum(msg)\n\n\t\tif message_header.command in MESSAGE_MAPPING:\n\t\t\t#print (message_header.command)\n\t\t\tif message_header.command == 'block':\n\t\t\t\tmessage_model = Block.parse(BytesIO(payload))\n\t\t\t\t#print (message_model.id ())\n\t\t\telse:\n\t\t\t\tdeserializer = MESSAGE_MAPPING[message_header.command]()\n\t\t\t\tmessage_model = deserializer.deserialize(BytesIO(payload))\n\n\t\treturn (message_header, message_model)\n\nclass ChainBasicClient (object):\n\t\"\"\"The base class for a Bitcoin network client, this class\n\timplements utility functions to create your own class.\n\n\t:param socket: a socket that supports the makefile()\n\t\t\t\t method.\n\t\"\"\"\n\tdef __init__(self, socket, chain = 'BTC'):\n\t\tif not networks.isSupported (chain):\n\t\t\t raise networks.UnsupportedChainException ()\n\n\t\tself.chain = chain.upper ()\n\t\tself.socket = socket\n\t\tself.buffer = ProtocolBuffer()\n\t\tself.run = True\n\n\tdef stop (self):\n\t\tself.run = False\n\t\ttime.sleep (5)\n\t\tif self.socket != None:\n\t\t\tself.socket.close ()\n\n\tdef update_socket (self, socket):\n\t\tself.socket = socket\n\n\tdef close_stream(self):\n\t\t\"\"\"This method will close the socket stream.\"\"\"\n\t\tself.socket.close()\n\n\tdef handle_message_header(self, message_header, payload):\n\t\t\"\"\"This method will be called for every message before the\n\t\tmessage payload deserialization.\n\n\t\t:param message_header: The message header\n\t\t:param payload: The payload of the message\n\t\t\"\"\"\n\t\tpass\n\n\tdef handle_send_message(self, message_header, message):\n\t\t\"\"\"This method will be called for every sent message.\n\n\t\t:param message_header: The header of the message\n\t\t:param message: The message to be sent\n\t\t\"\"\"\n\t\tpass\n\n\n\tdef send_tx (self, message):\n\t\tbin_data = BytesIO()\n\t\tmessage_header = MessageHeader(self.chain)\n\t\tmessage_header_serial = MessageHeaderSerializer()\n\n\t\tbin_message = binascii.unhexlify (message)\n\t\tpayload_checksum = MessageHeaderSerializer.calc_checksum(bin_message)\n\t\tmessage_header.checksum = payload_checksum\n\t\tmessage_header.length = len(bin_message)\n\t\tmessage_header.command = 'tx'\n\n\t\tbin_data.write(message_header_serial.serialize(message_header))\n\t\tbin_data.write(bin_message)\n\t\tself.socket.sendall(bin_data.getvalue())\n\t\tself.handle_send_message(message_header, message)\n\n\n\tdef send_message(self, message):\n\t\t\"\"\"This method will serialize the message using the\n\t\tappropriate serializer based on the message command\n\t\tand then it will send it to the socket stream.\n\n\t\t:param message: The message object to send\n\t\t\"\"\"\n\t\tbin_data = BytesIO()\n\t\tmessage_header = MessageHeader(self.chain)\n\t\tmessage_header_serial = MessageHeaderSerializer()\n\n\t\tserializer = MESSAGE_MAPPING[message.command]()\n\t\tbin_message = serializer.serialize(message)\n\t\tpayload_checksum = \\\n\t\t\tMessageHeaderSerializer.calc_checksum(bin_message)\n\t\tmessage_header.checksum = payload_checksum\n\t\tmessage_header.length = len(bin_message)\n\t\tmessage_header.command = message.command\n\n\t\tbin_data.write(message_header_serial.serialize(message_header))\n\t\tbin_data.write(bin_message)\n\n\t\tself.socket.sendall(bin_data.getvalue())\n\t\tself.handle_send_message(message_header, message)\n\n\n\tdef loop(self):\n\t\t\"\"\"This is the main method of the client, it will enter\n\t\tin a receive/send loop.\"\"\"\n\n\t\twhile self.run:\n\t\t\tdata = self.socket.recv(1024*8)\n\n\t\t\tif len(data) <= 0:\n\t\t\t\traise NodeDisconnectException(\"Node disconnected\")\n\n\t\t\tself.buffer.write(data)\n\t\t\tmessage_header, message = self.buffer.receive_message()\n\n\t\t\tif message_header is not None:\n\t\t\t\tself.handle_message_header(message_header, data)\n\n\t\t\tif not message:\n\t\t\t\tcontinue\n\n\t\t\thandle_func_name = \"handle_\" + message_header.command\n\t\t\thandle_func = getattr(self, handle_func_name, None)\n\t\t\tif handle_func:\n\t\t\t\thandle_func(message_header, message)\n\n\nclass ChainClient(ChainBasicClient):\n\t\"\"\"This class implements all the protocol rules needed\n\tfor a client to stay up in the network. It will handle\n\tthe handshake rules as well answer the ping messages.\"\"\"\n\n\tdef handshake(self):\n\t\t\"\"\"This method will implement the handshake of the\n\t\tBitcoin protocol. It will send the Version message.\"\"\"\n\t\tversion = Version()\n\t\tself.send_message(version)\n\n\tdef handle_version(self, message_header, message):\n\t\t\"\"\"This method will handle the Version message and\n\t\twill send a VerAck message when it receives the\n\t\tVersion message.\n\n\t\t:param message_header: The Version message header\n\t\t:param message: The Version message\n\t\t\"\"\"\n\t\tverack = VerAck()\n\t\tself.send_message(verack)\n\n\tdef handle_ping (self, message_header, message):\n\t\t\"\"\"This method will handle the Ping message and then\n\t\twill answer every Ping message with a Pong message\n\t\tusing the nonce received.\n\n\t\t:param message_header: The header of the Ping message\n\t\t:param message: The Ping message\n\t\t\"\"\"\n\t\tpong = Pong()\n\t\tpong.nonce = message.nonce\n\t\tself.send_message(pong)\n","sub_path":"bitpeer/clients.py","file_name":"clients.py","file_ext":"py","file_size_in_byte":6361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"59616369","text":"import string\n\n\ndef word_count(s):\n # Your code here\n alpha = string.ascii_lowercase\n if len(s) == 0:\n return {}\n words = s.split()\n for i in range(len(words)):\n words[i] = words[i].lower()\n new_words = []\n\n for i in range(len(words)):\n new_word = \"\"\n for x in range(len(words[i])):\n curr_letter = words[i][x]\n if curr_letter in alpha or curr_letter == \"'\":\n new_word += curr_letter\n\n if new_word != \"\":\n new_words.append(new_word)\n\n my_dict = {}\n\n for word in new_words:\n if word in my_dict:\n my_dict[word] += 1\n else:\n my_dict[word] = 1\n\n return my_dict\n\n\nif __name__ == \"__main__\":\n print(word_count(\"\"))\n print(word_count(\"Hello\"))\n print(word_count(\"Hello hello\"))\n print(word_count('Hello, my cat. And my cat doesn\\'t say \"hello\" back.'))\n print(word_count('Hello, my cat. And my cat doesn\\'t say \"hello\" back.'))\n print(word_count(\n 'This is a test of the emergency broadcast network. This is only a test.'))\n print(word_count('a a\\ra\\na\\ta \\t\\r\\n'))\n","sub_path":"applications/word_count/word_count.py","file_name":"word_count.py","file_ext":"py","file_size_in_byte":1139,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"450922805","text":"# Standard Python libraries\nfrom enum import Enum\nfrom threading import Thread\nfrom time import time\nfrom uuid import uuid4\nfrom logging import getLogger\n\nfrom jackhammer.job import JobState\n\nlogger = getLogger(\"jackhammer\")\n\nclass Worker(Thread):\n \"\"\"\n Worker corresponding to a remote machine.\n\n Thread should remain alive at least as long as a connection to the\n remote machine is possible or the shutdown_flag is set.\n\n Will attempt to run several jobs over its lifetime, using the callbacks\n to get and return jobs.\n\n The thread will measure the duration the machine was available and capture\n any exceptions.\n \"\"\"\n\n def __init__(self, job, cycle_job, provider, shutdown_flag):\n Thread.__init__(self)\n self.name = \"worker-\" + str(uuid4())[:16]\n self.exception = None\n self.duration = None\n self.startTime = None\n\n # Args\n self.job = job\n self.cycle_job = cycle_job\n self.provider = provider\n self.shutdown_flag = shutdown_flag\n\n def run(self):\n \"\"\"\n Thread entry point.\n The worker loop, which opens a machine connection\n and runs through a series of jobs.\n \"\"\"\n logger.info(\"Worker Launch: %s\", self)\n try:\n self.worker_loop()\n except Exception as e:\n logger.warning(\"Worker Failure: %s %s\", self, str(e))\n self.exception = e\n\n logger.info(\"Worker Shutdown: %s\", self)\n self.duration = (time() - self.startTime) if self.startTime else None\n\n def worker_loop(self):\n \"\"\"\n The worker loop, which requests and executes jobs until\n the remote machine crashes.\n \"\"\"\n with self.provider.create_client(self.name) as client:\n self.startTime = time()\n while self.job and self.conn_check(client):\n self.job.execute(client, self.shutdown_flag)\n if self.job.state == JobState.Disconnection:\n break\n self.job = self.cycle_job(self.name, self.job)\n\n def conn_check(self, client):\n \"\"\"\n Check the machine is still available by opening a connection\n and running a known command.\n \"\"\"\n try:\n stdin, stdout, stderr = client.exec_command(\"echo test\", timeout=10)\n l = stdout.readlines()\n return l == [\"test\\n\"]\n except Exception as e:\n return False\n\n def __repr__(self):\n return self.name\n","sub_path":"jackhammer/worker.py","file_name":"worker.py","file_ext":"py","file_size_in_byte":2505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"555680541","text":"\nBORDER_SYMBOLS = {\n\t# 0 1 2 3 4 5 6 7\n\t'line': ('─', '│', '┌', '┐', '└', '┘', '┬', '┴'),\n\t'double': ('═', '║', '╔', '╗', '╚', '╝', '╦', '╩'),\n\t'grate': ['#' for i in range(0,8)],\n\t'block': ['█' for i in range(0,8)],\n\t'empty': [' ' for i in range(0,8)],\n}\n\ndef calcCenterPosition(screen, width, height = 1):\n\treturn (screen.width-width)//2,(screen.height-height)//2\n\t\nclass Rectangle():\n\t\n\tdef __init__(self, screen, x, y, width, height):\n\t\tself.screen = screen\n\t\tself.x = x\n\t\tself.y = y\n\t\tself.width = width\n\t\tself.height = height\n\t\tself.innerX = x\n\t\tself.innerY = y\n\t\tself.innerWidth = width\n\t\tself.innerHeight = height\n\t\tself.fgColor = (255,255,255)\n\t\tself.bgColor = (0,0,0)\n\t\t\n\tdef setColors(self, fgColor = False, bgColor = False):\n\t\tif fgColor:\n\t\t\tself.fgColor = fgColor\n\t\tif bgColor:\n\t\t\tself.bgColor = bgColor\n\t\treturn self\n\t\t\n\tdef draw(self):\n\t\tself.clearBox()\n\t\t\n\tdef clearBox(self):\n\t\tself.screen.draw_rect(self.x, self.y, self.width, self.height, ' ', self.fgColor, self.bgColor)\n\n\tdef move(self, x, y):\n\t\tself.x += x\n\t\tself.innerX += x\n\t\tself.y += y\n\t\tself.innerY += y\n\t\t\nclass BorderedRectangle(Rectangle):\n\t\n\tdef __init__(self, screen, x, y, width, height):\n\t\tsuper(BorderedRectangle, self).__init__(screen, x, y, width, height)\n\t\tself.borderColor = (200,200,200)\n\t\tself.borderBgColor = (0,0,0)\n\t\tself.borderType = 'line'\n\t\tself.innerX = x+1\n\t\tself.innerY = y+1\n\t\tself.innerWidth = width-2\n\t\tself.innerHeight = height-2\n\t\n\tdef setBorderColors(self, borderColor = False, borderBgColor = False):\n\t\tif borderColor:\n\t\t\tself.borderColor = borderColor\n\t\tif borderBgColor:\n\t\t\tself.borderBgColor = borderBgColor\n\t\treturn self\n\t\t\n\tdef setBorderType(self, borderType):\n\t\tself.borderType = borderType\n\t\treturn self\n\t\t\n\tdef draw(self):\n\t\tsuper(BorderedRectangle, self).draw()\n\t\tself.drawBorder()\n\t\t\n\t@property\n\tdef symbols(self):\n\t\treturn BORDER_SYMBOLS[self.borderType]\n\t\n\tdef drawBorder(self):\n\t\t\n\t\tfor x in range(self.x+1, self.x+self.width-1):\n\t\t\tself.screen.draw_char(x, self.y, self.symbols[0], self.borderColor, self.borderBgColor)\n\t\t\tself.screen.draw_char(x, self.y+self.height-1, self.symbols[0], self.borderColor, self.borderBgColor)\n\t\t\t\n\t\tfor y in range(self.y+1, self.y+self.height-1):\n\t\t\tself.screen.draw_char(self.x, y, self.symbols[1], self.borderColor, self.borderBgColor)\n\t\t\tself.screen.draw_char(self.x+self.width-1, y, self.symbols[1], self.borderColor, self.borderBgColor)\n\t\t\t\n\t\tself.screen.draw_char(self.x, self.y, self.symbols[2], self.borderColor, self.borderBgColor)\n\t\tself.screen.draw_char(self.x+self.width-1, self.y, self.symbols[3], self.borderColor, self.borderBgColor)\n\t\tself.screen.draw_char(self.x, self.y+self.height-1, self.symbols[4], self.borderColor, self.borderBgColor)\n\t\tself.screen.draw_char(self.x+self.width-1, self.y+self.height-1, self.symbols[5], self.borderColor, self.borderBgColor)\n\t\nclass FullWindowBorderedRectangle(BorderedRectangle):\n\t\n\tdef __init__(self, screen):\n\t\tsuper(FullWindowBorderedRectangle, self).__init__(screen, 0, 0, screen.width, screen.height)\n\t\t\n\nclass RightPanelBorderedRectangle(BorderedRectangle):\n\t\n\tdef drawBorder(self):\n\t\tsuper(RightPanelBorderedRectangle, self).drawBorder()\n\t\tself.screen.draw_char(self.x, self.y, self.symbols[6], self.borderColor, self.borderBgColor)\n\t\tself.screen.draw_char(self.x, self.y+self.height-1, self.symbols[7], self.borderColor, self.borderBgColor)\n\nclass FullScreenRightPanelBorderedRectangle(RightPanelBorderedRectangle):\n\n\tdef __init__(self, screen):\n\t\twidth = screen.rightPanelWidth\n\t\tsuper(FullScreenRightPanelBorderedRectangle, self).__init__(screen, screen.width-width, 0, width, screen.height)\n\n\nclass BigFullScreenRightPanelBorderedRectangle(RightPanelBorderedRectangle):\n\t\n\tdef __init__(self, screen):\n\t\twidth = screen.width - screen.rightPanelWidth\n\t\tsuper(BigFullScreenRightPanelBorderedRectangle, self).__init__(screen, screen.rightPanelWidth, 0, width, screen.height)\n\t\t\t\t\n\t\t\t\t\n\nclass Box():\n\t\t\n\tdef __init__(self, screen, rectangle,\n\t\t\t\ttitle = False, titleColor = (200,200,200), titleBgColor = (0,0,0)):\n\t\t\t\t\n\t\tassert isinstance(rectangle, Rectangle)\n\t\tself.screen = screen\n\t\tself.rectangle = rectangle\n\t\tself.title = title\n\t\tself.titleColor = titleColor\n\t\tself.titleBgColor = titleBgColor\n\t\tself.linesPrinted = 0\n\t\t\n\tdef draw(self):\n\t\tself.rectangle.draw()\n\t\tself.drawTitle()\n\t\t\n\tdef drawTitle(self):\n\t\tif self.title:\n\t\t\tself.screen.draw_str(self.rectangle.x+2, self.rectangle.y, '['+self.title+']', self.titleColor, self.titleBgColor)\n\t\t\t\n\tdef printLine(self, str, fgColor = None, bgColor = None):\n\t\tif self.linesPrinted >= self.rectangle.innerHeight:\n\t\t\treturn\n\t\tif len(str) > self.rectangle.innerWidth:\n\t\t\tstr = str[:self.rectangle.innerWidth-3] + '...'\n\t\telse:\n\t\t\tfor i in range(len(str), self.rectangle.innerWidth):\n\t\t\t\tstr += ' '\n\t\t\t\t\n\t\tif fgColor == None:\n\t\t\tfgColor = self.rectangle.fgColor\n\t\tif bgColor == None:\n\t\t\tbgColor = self.rectangle.bgColor\n\t\t\n\t\tself.screen.draw_str(self.rectangle.innerX, self.rectangle.innerY+self.linesPrinted, str, fgColor, bgColor)\n\t\tself.linesPrinted += 1\n\t\t\nclass LineBar(Box):\n\t\n\tdef __init__(self, screen, rectangle,\n\t\t\t\tname, value, min, max,\n\t\t\t\ttextWidth = None, valueDisplayFunction = str,\n\t\t\t\tactiveColor = (200,200,200)):\n\t\tsuper(LineBar, self).__init__(screen, rectangle)\n\t\tself.name = name\n\t\tself.value = value\n\t\tself.min = min\n\t\tself.max = max\n\t\tself.activeColor = activeColor\n\t\tself.textWidth = textWidth\n\t\tself.valueDisplayFunction = valueDisplayFunction\n\n\tdef draw(self, vAlign=0):\n\t\tself.rectangle.move(0, vAlign)\n\t\tsuper(LineBar, self).draw()\n\t\tstrValue = self.valueDisplayFunction(self.value)\n\t\ttext = self.name + ': <' + strValue + '>'\n\t\tif self.textWidth != None:\n\t\t\twhile len(text) < self.textWidth:\n\t\t\t\ttext += ' '\n\t\tbar_width = int(float(self.value-self.min) / (self.max-self.min) * (self.rectangle.width-len(text)))\n\t\tif bar_width+len(text) >= self.rectangle.width:\n\t\t\tbar_width -= 1\n\n\n\t\tself.screen.draw_str(self.rectangle.x, self.rectangle.y, text, self.rectangle.fgColor, self.rectangle.bgColor)\n\t\tself.screen.draw_rect(self.rectangle.x+len(text)+1, self.rectangle.y, bar_width, 1, ' ', None, self.activeColor)\n\n\nclass InputList():\n\tdef __init__(self, screen, rectangle,\n\t items, selected=0, scroll=0):\n\t\tself.screen = screen\n\t\tself.rectangle = rectangle\n\t\tself.items = items\n\t\tself.selected = selected\n\t\tself.scroll = scroll\n\t\tself.itemsLength = len(items)\n\t\tif self.selected > self.itemsLength - 1:\n\t\t\tself.selected = self.itemsLength - 1\n\n\tdef scrollTo(self, selected):\n\t\tif selected < self.scroll:\n\t\t\tself.scroll = selected\n\t\telif selected > self.scroll + self.rectangle.innerHeight - 1:\n\t\t\tself.scroll = selected - self.rectangle.innerHeight + 1\n\n\tdef draw(self):\n\t\tself.rectangle.draw()\n\t\tif self.rectangle.innerHeight >= self.itemsLength:\n\t\t\tfor item in self.items:\n\t\t\t\titem.draw()\n\t\telse:\n\t\t\tfor i in range(0, self.rectangle.innerHeight):\n\t\t\t\tself.items[i + self.scroll].draw(vAlign=-1 * self.scroll)\n\n\t\t\tself.screen.draw_rect(self.rectangle.innerX + self.rectangle.innerWidth - 1, self.rectangle.innerY, 1,\n\t\t\t self.rectangle.innerHeight, '║', (55, 55, 55))\n\t\t\tscrollHeight = self.rectangle.innerHeight // 5\n\t\t\tif scrollHeight > 5:\n\t\t\t\tscrollHeight = 5\n\t\t\telif scrollHeight < 1:\n\t\t\t\tscrollHeight = 1\n\t\t\tavailableHeight = self.rectangle.innerHeight - scrollHeight\n\t\t\tscrollPos = int(availableHeight * self.scroll / (self.itemsLength - self.rectangle.innerHeight))\n\t\t\tself.screen.draw_rect(self.rectangle.innerX + self.rectangle.innerWidth - 1,\n\t\t\t self.rectangle.innerY + scrollPos, 1, scrollHeight, '█', (155, 155, 155))\n\n\nclass LineBarList(InputList):\n\n\tdef __init__(self, screen, rectangle,\n\t names, values, min, max,\n\t selected=0, scroll=0, textWidth=None, valueDisplayFunction=str,\n\t activeColor=(200, 200, 200)):\n\n\t\tlinebars = []\n\t\tassert (len(names) == len(values))\n\n\t\tfor i in range(0, len(names)):\n\t\t\tcurRect = Rectangle(screen, rectangle.innerX, rectangle.innerY + i, rectangle.innerWidth, 1)\n\t\t\tif (i == selected):\n\t\t\t\tcurRect.setColors(bgColor=(0, 155, 255))\n\t\t\tlinebars.append(\n\t\t\t\tLineBar(screen, curRect, names[i], values[i], min, max, textWidth, valueDisplayFunction, activeColor))\n\n\t\tsuper(LineBarList, self).__init__(screen, rectangle, linebars, selected, scroll)\n\n\nclass CheckList(InputList):\n\tdef __init__(self, screen, rectangle,\n\t names, values,\n\t selected=0, scroll=0,\n\t selectedColor=(0, 155, 255), checkedColor=(255, 155, 0)):\n\n\t\tinputs = []\n\t\tassert (len(names) == len(values))\n\n\t\tfor i in range(0, len(names)):\n\t\t\tcurRect = Rectangle(screen, rectangle.innerX, rectangle.innerY + i, rectangle.innerWidth, 1)\n\t\t\tif i == selected:\n\t\t\t\tcurRect.setColors(bgColor=selectedColor)\n\t\t\tif values[i]:\n\t\t\t\tcurRect.setColors(fgColor=checkedColor)\n\t\t\tinputs.append(CheckInput(screen, curRect, names[i], values[i]))\n\n\t\tsuper(CheckList, self).__init__(screen, rectangle, inputs, selected, scroll)\n\n\tdef passive(self):\n\t\tself.items[self.selected].rectangle.setColors(bgColor=(0, 0, 0))\n\nclass TextInput(Box):\n\n\tdef __init__(self, screen, rectangle, name, value = '', fieldFgColor = (255,255,255), fieldBgColor = (55,55,55)):\n\t\tsuper(TextInput, self).__init__(screen, rectangle)\n\t\tself.name = name\n\t\tself.value = value\n\t\tself.fieldFgColor = fieldFgColor\n\t\tself.fieldBgColor = fieldBgColor\n\n\tdef draw(self):\n\t\tsuper(TextInput, self).draw()\n\t\ttext = self.name + ': '\n\t\tself.screen.draw_str(self.rectangle.x, self.rectangle.y, text, self.rectangle.fgColor, self.rectangle.bgColor)\n\t\tself.screen.draw_rect(self.rectangle.x + len(text), self.rectangle.y, self.rectangle.width - len(text), 1, ' ', self.fieldFgColor, self.fieldBgColor)\n\t\tself.screen.draw_str(self.rectangle.x + len(text), self.rectangle.y, self.value, self.fieldFgColor, self.fieldBgColor)\n\n\nclass CheckInput(Box):\n\tdef __init__(self, screen, rectangle, name, value=False):\n\t\tsuper(CheckInput, self).__init__(screen, rectangle)\n\t\tself.name = name\n\t\tself.value = value\n\n\tdef draw(self):\n\t\tsuper(CheckInput, self).draw()\n\t\tvalueName = ('☐', '☑')\n\t\ttext = valueName[self.value] + ' ' + self.name\n\t\tself.screen.draw_str(self.rectangle.x, self.rectangle.y, text, self.rectangle.fgColor, self.rectangle.bgColor)\n\n\nclass GenderInput(CheckInput):\n\n\tdef __init__(self, screen, rectangle, name, value = False):\n\t\tsuper(GenderInput, self).__init__(screen, rectangle, name, value)\n\n\tdef draw(self):\n\t\tsuper(GenderInput, self).draw()\n\t\tvalueName = ('Female', 'Male')\n\t\ttext = self.name + ': <' + valueName[self.value] + '>'\n\t\tself.screen.draw_str(self.rectangle.x, self.rectangle.y, text, self.rectangle.fgColor, self.rectangle.bgColor)\n\n\nclass Tabs():\n\t\n\tdef __init__(self, screen, tabs, active,\n\t\tfgColor = (155,155,155), bgColor = (0,0,0),\n\t\tactiveFgColor = (255,255,255), borderColor = (255,255,255), borderType = 'line'):\n\t\tself.screen = screen\n\t\tself.tabs = tabs\n\t\tself.active = active\n\t\tself.fgColor = fgColor\n\t\tself.bgColor = bgColor\n\t\tself.activeFgColor = activeFgColor\n\t\tself.borderColor = borderColor\n\t\tself.borderType = borderType\n\n\t@property\n\tdef symbols(self):\n\t\treturn BORDER_SYMBOLS[self.borderType]\n\n\tdef draw(self):\n\t\twidth = self.screen.width-2\n\t\tsumTabsLen = 0\n\t\tfor name in self.tabs:\n\t\t\tsumTabsLen += len(name)\n\t\tlenPerOne = width/sumTabsLen\n\t\ttabsWidth = [round(len(name)*lenPerOne) for name in self.tabs]\n\t\tsumTabsWidth = 0\n\t\tfor cwidth in tabsWidth:\n\t\t\tsumTabsWidth += cwidth\n\t\tfor i in range(0, sumTabsWidth - width):\n\t\t\ttabsWidth[i] -= 1\n\t\tx = 1\n\t\tfor i in range(0,len(self.tabs)):\n\t\t\tcolor = (self.fgColor, self.activeFgColor)[i == self.active]\n\n\t\t\ttabWidth = tabsWidth[i]\n\t\t\tself.screen.draw_rect(x, 1, tabWidth, 1, ' ', None, self.bgColor)\n\t\t\tself.screen.draw_char(x-1, 1, self.symbols[1], self.borderColor, self.bgColor)\n\t\t\tif i == self.active:\n\t\t\t\tself.screen.draw_char(x-1, 2, self.symbols[5], self.borderColor, self.bgColor)\n\t\t\telse:\n\t\t\t\tself.screen.draw_rect(x, 2, tabWidth, 1, self.symbols[0], self.borderColor, self.bgColor)\n\t\t\t\tif i > 0:\n\t\t\t\t\tif self.active == i-1:\n\t\t\t\t\t\tself.screen.draw_char(x-1, 2, self.symbols[4], self.borderColor, self.bgColor)\n\t\t\t\t\telse:\n\t\t\t\t\t\tself.screen.draw_char(x-1, 2, self.symbols[7], self.borderColor, self.bgColor)\n\n\t\t\toffset = (tabWidth-len(self.tabs[i]))//2\n\t\t\tself.screen.draw_str(x+offset, 1, self.tabs[i], color, self.bgColor)\n\n\t\t\tx += tabWidth\n\t\t\n\n\nclass Camera():\n\n\tdef __init__(self, width, height, x, y, z):\n\t\tself.width = width\n\t\tself.height = height\n\t\tself.x = x\n\t\tself.y = y\n\t\tself.z = z\n\t\t\n\tdef setSize(self, width, height):\n\t\tself.width = width\n\t\tself.height = height\n\t\n\tdef moveTo(self, dx = 0, dy = 0, dz = 0):\n\t\tself.x += dx\n\t\tself.y += dy\n\t\tself.z += dz\n\t\t\n\tdef getBounds(self):\n\t\treturn ((self.x, self.y), (self.x + self.width, self.y + self.height))\n\n","sub_path":"src/gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":12803,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"552112607","text":"import datetime\n\nfrom django.test import TestCase\n\nfrom mks.models import Knesset, Party, Member, Membership, MemberAltname\nfrom mks.tests.base import ten_days_ago, two_days_ago\n\n\nclass TestMember(TestCase):\n def setUp(self):\n super(TestMember, self).setUp()\n\n self.previous_knesset = Knesset.objects.create(number=1,\n start_date=ten_days_ago,\n end_date=two_days_ago)\n self.current_knesset = Knesset.objects.create(number=2,\n start_date=two_days_ago)\n self.previous_party = self.given_party_exists_in_knesset('a_party', self.previous_knesset)\n self.current_party = self.given_party_exists_in_knesset('a_party', self.current_knesset)\n self.member = self.given_member_exists_in_knesset('member_1', self.previous_party)\n self.member = self.given_member_exists_in_knesset('member_1', self.current_party)\n\n def tearDown(self):\n super(TestMember, self).tearDown()\n\n def test_party_at_calculates_correct_party_by_date_when_no_end_date_for_two_periods(self):\n five_days_ago = datetime.datetime.today() - datetime.timedelta(days=5)\n party_at = self.member.party_at(five_days_ago.date())\n self.assertEqual(party_at, self.previous_party)\n\n today = datetime.datetime.today()\n party_at = self.member.party_at(today.date())\n self.assertEqual(party_at, self.current_party)\n\n def test_party_at_calculates_correct_party_by_date_when_given_end_date(self):\n self.given_member_exists_in_knesset(self.member.name, self.previous_party, end_date=two_days_ago.date())\n five_days_ago = datetime.datetime.today() - datetime.timedelta(days=5)\n party_at = self.member.party_at(five_days_ago.date())\n self.assertEqual(party_at, self.previous_party)\n\n today = datetime.datetime.today()\n party_at = self.member.party_at(today.date())\n self.assertEqual(party_at, self.current_party)\n\n def testNames(self):\n m = Member(name='test member')\n self.assertEqual(m.names, ['test member'])\n m.save()\n MemberAltname(member=m, name='test2').save()\n self.assertEqual(m.names, ['test member', 'test2'])\n\n def given_party_exists_in_knesset(self, party_name, knesset):\n party, create = Party.objects.get_or_create(name='{0}_{1}'.format(party_name, knesset.number),\n knesset=knesset,\n start_date=knesset.start_date,\n end_date=knesset.end_date)\n return party\n\n def given_member_exists_in_knesset(self, member_name, party, end_date=None):\n member, create = Member.objects.get_or_create(name=member_name, start_date=ten_days_ago.date())\n membership, create = Membership.objects.get_or_create(member=member, party=party,\n start_date=party.knesset.start_date)\n if end_date:\n membership.end_date = end_date\n membership.save()\n return member\n\n\n","sub_path":"mks/tests/test_member.py","file_name":"test_member.py","file_ext":"py","file_size_in_byte":3232,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"81646527","text":"import threading\nimport socket\nimport sys\n\n# need to make sure that the port number is given as an argument\nif len(sys.argv) != 2:\n print(\"[RS]: ERROR: Need to include a listen port argument.\")\n exit()\n\nRSPortNum = int(sys.argv[1])\nif RSPortNum <= 1023:\n print(\"ERROR: Need to make sure that the port numbers are > 1023\")\n exit()\n\n# function used to insert words into the data table\ndef insertIntoTable(count,word,table):\n for i in range(count):\n for j in range(3):\n if table[i][j] == \".\":\n table[i][j] = word\n return\n\n\n# store the URLs and IPs from PROJI-DNSRS.txt\nDNSTable = []\ncount = 0\n\n# get the number of lines in the DNS list\ntry:\n file = open(\"PROJI-DNSRS.txt\", \"r\")\n for line in file:\n count = count + 1\nexcept IOError:\n print(\"[RS]: ERROR opening file: PROJI-DNSRS.txt\")\n exit()\n\n# create the table and initialize it\nfor i in range(count):\n DNSTable.append([])\n for j in range(3):\n DNSTable[i].append(\".\")\n\n# separate the lines into words and store each word into a list\ndataList = list()\ntry:\n file = open(\"PROJI-DNSRS.txt\",\"r\")\n for line in file:\n for word in line.replace(\"\\r\", \"\").replace(\"\\n\", \"\").split():\n dataList.append(word)\n\nexcept IOError:\n print(\"[RS]: ERROR opening file: PROJI-DNSRS.txt\")\n exit()\nfile.close()\n\n# populate DNS Table with the list of words\nfor word in dataList:\n insertIntoTable(count, word, DNSTable)\n\n# create the socket for the rs server\ntry:\n ss = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n print(\"[RS]: Server socket created\")\nexcept socket.error as err:\n print('[RS]: socket open error: {}\\n'.format(err))\n exit()\n\n# bind the socket to the port to listen for the client\nserver_binding = ('', int(sys.argv[1]))\nss.bind(server_binding)\nss.listen(1)\nhost = socket.gethostname()\nprint(\"[RS]: Server host name is {}\".format(host))\nlocalhost_ip = (socket.gethostbyname(host))\nprint(\"[RS]: Server IP address is {}\".format(localhost_ip))\n\n\nfound = False\n# get list of host names to check for\nwhile True:\n\n csockid, addr = ss.accept()\n print (\"[RS]: Got a connection request from a client at {}\".format(addr))\n\n found = False\n data_from_client = csockid.recv(500)\n print(\"[RS]: Connection received. Looking up : {}\".format(data_from_client.decode('utf-8')) + \" ...\")\n\n # this is the code from the client that tells this server that there are no more host names to look up\n if data_from_client == \"DONE\":\n msg = \"Look up done. Cancelling Connection ... \"\n csockid.send(msg.encode('utf-8'))\n break\n # look through the table and see if the RS server has the IP address for the host name\n for word in range(count-1):\n hostToCheck = DNSTable[word][0].lower()\n if data_from_client == hostToCheck:\n msg = DNSTable[word][0] + \" \" + DNSTable[word][1] + \" \" + DNSTable[word][2]\n csockid.send(msg.encode('utf-8'))\n found = True\n\n # message (host name of the TS server) to the client to look to the TS server to find the IP\n if not found:\n msg = DNSTable[count-1][0] + \" \" + DNSTable[count-1][1] + \" \" + DNSTable[count-1][2]\n csockid.send(msg.encode('utf-8'))\n\n# Close the server socket\nss.close()\nexit()\n\nif __name__ == \"__main__\":\n RS = threading.Thread(name='RSserver')\n RS.start()\n\n","sub_path":"rs.py","file_name":"rs.py","file_ext":"py","file_size_in_byte":3376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"469012792","text":"import random\nfrom random import randint\nfrom app.cliGame import arsenal\n\nclass Inventory:\n \n spells = [\"Mage Armor\", \"Cloak of Shadow\", \"Fireball\", \"Levitation\"];\n hands = arsenal.Weapon(\"HANDS\",1,0,0);\n staff = arsenal.Weapon(\"STAFF\",1,2,1);\n dagger = arsenal.Weapon(\"DAGGER\",2,0,1);\n axe = arsenal.Weapon(\"AXE\",3,1,3);\n spear = arsenal.Weapon(\"SPEAR\",3,2,3);\n bow = arsenal.Weapon(\"BOW\",2,3,4);\n sword = arsenal.Weapon(\"SWORD\",4,1,6);\n noArmor = arsenal.Armor(\"NONE\",0,0);\n leather = arsenal.Armor(\"LEATHER\",1,3);\n chain = arsenal.Armor(\"CHAIN\",2,6);\n plate = arsenal.Armor(\"PLATE\",3,9);\n torch = arsenal.Gear(\"TORCH\",1,\"Each trip into the wild uses one torch\");\n ration = arsenal.Gear(\"RATION\",1,\"Rations can be used to restore health in camp\");\n shield = arsenal.Gear(\"SHIELD\",5,\"A shield grants extra armor\");\n helmet = arsenal.Gear(\"HELMET\",3,\"A helmet grants extra armor\");\n cloak = arsenal.Gear(\"CLOAK\",3,\"A cloak makes you better at sneaking\");\n \n def __init__(self):\n self.gold = 0;\n self.torches = 4;\n self.rations = 2;\n self.weapon = Inventory.hands;\n self.baseDamage = 0;\n self.weaponDamage = 1;\n self.weaponRange = 0;\n self.baseArmor = 0;\n self.armor = Inventory.noArmor;\n self.helmet = False;\n self.shield = False;\n self.armorRating = 0;\n self.cloak = False;\n self.coreStealth = 3;\n self.stealthiness = self.coreStealth;\n self.speedBonus = 0;\n self.knownSpell = \"NONE\";\n self.spellText = \"\";\n self.rationText = \"eats a ration and recovers health.\";\n \n def checkItemEffects(self):\n self.weaponDamage = self.weapon.wDamage;\n self.weaponRange = self.weapon.wRange;\n self.stealthiness = (self.coreStealth - self.armor.aRating);\n self.armorRating = (self.baseArmor + self.armor.aRating);\n if self.helmet == True:\n self.armorRating += 1;\n self.stealthiness -= 1;\n if self.shield == True:\n self.armorRating += 2;\n self.stealthiness -=2;\n if self.cloak == True:\n self.stealthiness +=2;\n\n def jobItems(self, job):\n if job == \"WIZARD\":\n self.weapon = Inventory.staff;\n self.knownSpell = Inventory.spells[random.randint(0,(len(Inventory.spells)-1))];\n elif job == \"THIEF\":\n self.weapon = Inventory.dagger;\n self.coreStealth += 1;\n else:\n self.weapon = Inventory.axe;\n self.helmet = True;\n self.checkItemEffects();\n \n def burnTorch(self):\n self.torches -= 1;\n \n def eatRation(self):\n if self.rations > 0:\n self.rationText = \"eats a ration and recovers health.\"\n self.rations -= 1;\n else:\n self.rationText = \"is out of rations!\"\n \n def buyTorch(self):\n self.gold -= Inventory.torch.price;\n self.torches += 1;\n \n def buyRation(self):\n self.gold -= Inventory.ration.price;\n self.rations += 1;\n \n def buyGear(self, item):\n self.gold -= item.price;\n if item == Inventory.shield:\n self.shield = True;\n elif item == Inventory.helmet:\n self.shield = True;\n elif item == Inventory.cloak:\n self.cloak = True;\n \n def buyWeapon(self, item):\n self.gold -= item.price;\n self.weapon = item;\n \n def buyArmor(self, item):\n self.gold -= item.price;\n self.armor = item;\n \n def useSpell(self):\n if self.knownSpell == \"Mage Armor\":\n self.spellText = (\"Your armor becomes more effective!\");\n self.baseArmor +=2;\n elif self.knownSpell == \"Cloak of Shadows\":\n self.spellText = (\"You are much better at sneaking!\");\n self.coreStealth +=3;\n elif self.knownSpell == \"Fireball\":\n self.spellText = (\"You do substantial extra damage!\");\n self.baseDamage += 3;\n elif self.knownSpell == \"Levitation\":\n self.spellText = (\"You are much better at fleeing!\")\n self.speedBonus += 4;\n self.knownSpell = \"NONE\";\n \n def wizLevel(self,hero,room):\n if hero.level == 1 and room.level > 4:\n hero.level = 2;\n self.knownSpell = Inventory.spells[random.randint(0,(len(Inventory.spells)-1))];\n elif hero.level == 2 and room.level >7:\n hero.level = 3;\n self.knownSpell = Inventory.spells[random.randint(0,(len(Inventory.spells)-1))];\n \n def profit(self, value):\n self.gold += value;","sub_path":"app/cliGame/inventory.py","file_name":"inventory.py","file_ext":"py","file_size_in_byte":4202,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"547292885","text":"from keras import models\nimport numpy as np\nimport pandas as pd\n\n\ndef create_embeddings_index(filename):\n embeddings_index = dict()\n f = open( filename )\n for line in f:\n values = line.split()\n word = values[0]\n coefs = asarray(values[1:], dtype= 'float32' )\n embeddings_index[word] = coefs\n f.close()\n return embeddings_index\n\ndef create_embedding_matrix(tokenizer, embeddings_index):\n vocab_size=len(tokenizer.word_index)+1\n embedding_matrix = zeros((vocab_size, 100))\n for word, i in tokenizer.word_index.items():\n embedding_vector = embeddings_index.get(word)\n if embedding_vector is not None:\n embedding_matrix[i] = embedding_vector\n return embedding_matrix\n\n\n\n\ndef preprocess(data, tokenizer_filename, emb_filename, vocab_size= 33674):\n\n \"\"\"\n data is a pandas series object\n \"\"\"\n \n # if (type(data)!='pandas.core.series.Series'):\n # print(\"Please enter a series object\")\n\n \n from keras.preprocessing.text import Tokenizer\n from numpy import asarray\n from numpy import zeros\n from keras.preprocessing.text import Tokenizer\n from keras.preprocessing.sequence import pad_sequences\n\n # loading\n with open(tokenizer_filename, 'rb') as handle:\n tokenizer = pickle.load(handle)\n\n \n # fit the tokenizer on the documents\n data=data.apply(str)\n \n embedding_index=create_embeddings_index(emb_filename)\n embedding_matrix=create_embedding_matrix(tokenizer, embeddings_index)\n \n max_len=50 #change accordingly\n test_sequences = tokenizer.texts_to_sequences(data)\n test_padded = pad_sequences(test_sequences, maxlen = max_len, padding = 'post' )\n \n return test_padded\n\nmodel=models.load_model('model/spam2.h5')\n\n\ndef predict(data):\n\n preprocessed_data=preprocess(pd.Series(data), \"tokenizer.pickle\", \"embeddings.txt\")\n \n prediction = model.predict_classes(preprocessed_data).flatten()\n \n # You may want to further format the prediction to make it more\n # human readable\n return prediction","sub_path":"predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":1964,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"270615637","text":"import tkinter as tk\nimport numpy as np\nimport pandas as pd\nimport time\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\nimport re\nimport threading\n\ndf=pd.DataFrame({'健保碼':['','','','',''],'名稱':['','','','',''],'A廠品項':['','','','',''],'A品項價格':['','','','',''],'E廠品項':['','','','',''],'E品項價格':['','','','',''],'業務1價':['','','','',''],'業務2價':['','','','',''],'業務3價':['','','','','']})\nA_dict={}\nE_dict={}\n\n#登入A、E網站============================================\nchrome_options=Options()\nchrome_options.add_argument('--headless')\ndriver = webdriver.Chrome(chrome_options=chrome_options)\n# driver = webdriver.Chrome()\n\ndriver.get('http://ec.k-e.com.tw/LoginView.aspx')\nhandles = driver.window_handles\ndriver.switch_to.window(handles[0])\nhandle1 = driver.current_window_handle\nalert = driver.switch_to_alert()\nalert.accept()\naccount_input = driver.find_element_by_id('cphContent_txtAccount') # cphContent_txtPassword\naccount_input.send_keys('0401609') # ysp0613 #cphContent_btnLogin\npass_input = driver.find_element_by_id('cphContent_txtPassword')\npass_input.send_keys('ysp0613')\nlogin_btn = driver.find_element_by_id('cphContent_btnLogin') # lkbRecipeCatalog\nlogin_btn.click()\nAll_pro = driver.find_element_by_id('lkbAllCatalog')\nAll_pro.click()\n\ndriver.execute_script(\"window.open('about:blank', 'tab2');\")\ndriver.switch_to_window('tab2')\ndriver.get('http://www.chahwa.com.tw/index.php')\ndriver.find_element_by_xpath('//*[@id=\"index\"]/nav/ul/li[4]').click()\ntime.sleep(1)\ndriver.find_element_by_xpath('//*[@id=\"primptmsg\"]/div/div[1]/form/ul/li[1]/span/input').send_keys('fkg20131101')\ndriver.find_element_by_xpath('/html/body/header/div[2]/div/div/div[1]/form/ul/li[2]/span/input').send_keys('fkg9988')\ndriver.find_element_by_id('urlogin_a').click()\ntime.sleep(1.5)\ndriver.find_element_by_class_name('order').click()\n\n\n#以ID搜尋========================================================\ndef ID_srch(ID,row):#row:0-4\n #切換到A廠==================================================\n driver.switch_to.window(handles[0])\n srch = driver.find_element_by_id('cphContent_ucCT_txtKeyword')\n srch.send_keys(ID)\n srch_btn = driver.find_element_by_id('cphContent_ucCT_btnSearch')\n srch_btn.click()\n time.sleep(0.5)\n srch = driver.find_element_by_id('cphContent_ucCT_txtKeyword')\n srch.clear()\n A_item=driver.find_elements_by_xpath('//*[@id=\"cphContent_ucCT_gv\"]/tbody/tr/td[4]')\n A_price=driver.find_elements_by_xpath('//*[@id=\"cphContent_ucCT_gv\"]/tbody/tr/td[6]')\n A_status=driver.find_elements_by_xpath('//*[@id=\"cphContent_ucCT_gv\"]/tbody/tr/td[2]')\n #初始化項目陣列==============================================\n items=[]\n prices=[]\n status=[]\n if len(A_item)>1:\n for i in range(1,len(A_item)):\n itemName=A_item[i].get_attribute('innerHTML')\n itemName=re.sub(' .*','',itemName)\n items.append(itemName)\n price=A_price[i].get_attribute('innerHTML')\n price=re.sub('\\.00','',price)\n prices.append(price)\n status.append(A_status[i].get_attribute('innerHTML'))\n item={}\n item['price']=prices[i-1]\n item['status']=status[i-1]\n A_dict[items[i-1]]=item\n A_optchange(A_dict,items,row)\n i=0\n items=[]\n prices=[]\n status=[]\n #切換到E廠==================================================\n driver.switch_to_window('tab2')\n srch = driver.find_element_by_xpath('//*[@id=\"orderForm\"]/ul/li[2]/span/input')\n srch.send_keys(ID)\n srch_btn = driver.find_element_by_xpath('//*[@id=\"orderForm\"]/ul/li[4]/a')\n srch_btn.click()\n time.sleep(0.7)\n E_item=driver.find_elements_by_css_selector('.item .name a')\n E_price=driver.find_elements_by_css_selector('.item .sell_price span')\n E_status=driver.find_elements_by_css_selector('.item .status')\n if len(E_item)>0:\n for i in range(0,len(E_item)):\n itemName=E_item[i].get_attribute('innerHTML')\n items.append(itemName)\n prices.append(E_price[i].get_attribute('innerHTML'))\n status.append(E_status[i].get_attribute('innerHTML'))\n item={}\n item['price']=prices[i]\n item['status']=status[i]\n E_dict[items[i]]=item\n #E_dict['實驗項目']=E_dict.pop(items[0])\n E_optchange(E_dict,items,row)\n\n#以名稱搜尋==========================================================\ndef NAME_srch(NAME,row):#row:0-4\n #切換到A廠==================================================\n driver.switch_to.window(handles[0])\n srch = driver.find_element_by_id('cphContent_ucCT_txtKeyword')\n srch.send_keys(NAME)\n srch_btn = driver.find_element_by_id('cphContent_ucCT_btnSearch')\n srch_btn.click()\n srch = driver.find_element_by_id('cphContent_ucCT_txtKeyword')\n srch.clear()\n time.sleep(0.3)\n A_item=driver.find_elements_by_xpath('//*[@id=\"cphContent_ucCT_gv\"]/tbody/tr/td[4]')\n A_price=driver.find_elements_by_xpath('//*[@id=\"cphContent_ucCT_gv\"]/tbody/tr/td[6]')\n A_status=driver.find_elements_by_xpath('//*[@id=\"cphContent_ucCT_gv\"]/tbody/tr/td[2]')\n #初始化項目陣列==============================================\n items=[]\n prices=[]\n status=[]\n if len(A_item)>1:\n for i in range(1,len(A_item)):\n itemName=A_item[i].get_attribute('innerHTML')\n itemName=re.sub(' .*','',itemName)\n items.append(itemName)\n price=A_price[i].get_attribute('innerHTML')\n price=re.sub('\\.00','',price)\n prices.append(price)\n status.append(A_status[i].get_attribute('innerHTML'))\n item={}\n item['price']=prices[i-1]\n item['status']=status[i-1]\n A_dict[items[i-1]]=item\n A_optchange(A_dict,items,row)\n i=0\n items=[]\n prices=[]\n status=[]\n #切換到E廠==================================================\n driver.switch_to_window('tab2')\n srch = driver.find_element_by_xpath('//*[@id=\"orderForm\"]/ul/li[1]/span/input')\n srch.send_keys(NAME)\n srch_btn = driver.find_element_by_xpath('//*[@id=\"orderForm\"]/ul/li[4]/a')\n srch_btn.click()\n time.sleep(0.5)\n E_item=driver.find_elements_by_css_selector('.item .name a')\n E_price=driver.find_elements_by_css_selector('.item .sell_price span')\n E_status=driver.find_elements_by_css_selector('.item .status')\n if len(E_item)>0:\n for i in range(0,len(E_item)):\n itemName=E_item[i].get_attribute('innerHTML')\n items.append(itemName)\n prices.append(E_price[i].get_attribute('innerHTML'))\n status.append(E_status[i].get_attribute('innerHTML'))\n item={}\n item['price']=prices[i]\n item['status']=status[i]\n E_dict[items[i]]=item\n #E_dict['實驗項目']=E_dict.pop(items[0])\n E_optchange(E_dict,items,row)\n\ndef A_optchange(A_dict,itemList,row):\n if row==0:\n menu = A_N1[\"menu\"]\n menu.delete(0, \"end\")\n A_li1=[]\n dict_string=list(A_dict.keys())\n for string in itemList:\n A_li1.append(string)\n varA1.set(A_li1[0])\n menu.add_command(label=string, command=tk._setit(varA1, string))\n item=A_dict[varA1.get()]\n A_P1.config(text=item['price'])\n elif row==1:\n menu = A_N2[\"menu\"]\n menu.delete(0, \"end\")\n A_li2=[]\n dict_string=list(A_dict.keys())\n for string in itemList:\n A_li2.append(string)\n varA2.set(A_li2[0])\n menu.add_command(label=string, command=tk._setit(varA2, string))\n item=A_dict[varA2.get()]\n A_P2.config(text=item['price'])\n elif row==2:\n menu = A_N3[\"menu\"]\n menu.delete(0, \"end\")\n A_li3=[]\n dict_string=list(A_dict.keys())\n for string in itemList:\n A_li3.append(string)\n varA3.set(A_li3[0])\n menu.add_command(label=string, command=tk._setit(varA3, string))\n item=A_dict[varA3.get()]\n A_P3.config(text=item['price'])\n elif row==3:\n menu = A_N4[\"menu\"]\n menu.delete(0, \"end\")\n A_li4=[]\n dict_string=list(A_dict.keys())\n for string in itemList:\n A_li4.append(string)\n varA4.set(A_li4[0])\n menu.add_command(label=string, command=tk._setit(varA4, string))\n item=A_dict[varA4.get()]\n A_P4.config(text=item['price'])\n elif row==4:\n menu = A_N5[\"menu\"]\n menu.delete(0, \"end\")\n A_li5=[]\n dict_string=list(A_dict.keys())\n for string in itemList:\n A_li5.append(string)\n varA5.set(A_li5[0])\n menu.add_command(label=string, command=tk._setit(varA5, string))\n item=A_dict[varA5.get()]\n A_P5.config(text=item['price'])\n\ndef E_optchange(E_dict,itemList,row):\n if row==0:\n menu = E_N1[\"menu\"]\n menu.delete(0, \"end\")\n E_li1=[]\n for string in itemList:\n E_li1.append(string)\n varE1.set(E_li1[0])\n menu.add_command(label=string, command=tk._setit(varE1, string))\n item=E_dict[varE1.get()]\n E_P1.config(text=item['price'])\n elif row==1:\n menu = E_N2[\"menu\"]\n menu.delete(0, \"end\")\n E_li2=[]\n for string in itemList:\n E_li2.append(string)\n varE2.set(E_li2[0])\n menu.add_command(label=string, command=tk._setit(varE2, string))\n item=E_dict[varE2.get()]\n E_P2.config(text=item['price'])\n\n elif row==2:\n menu = E_N3[\"menu\"]\n menu.delete(0, \"end\")\n E_li3=[]\n for string in itemList:\n E_li3.append(string)\n varE3.set(E_li3[0])\n menu.add_command(label=string, command=tk._setit(varE3, string))\n item=E_dict[varE3.get()]\n E_P3.config(text=item['price'])\n\n elif row==3:\n menu = E_N4[\"menu\"]\n menu.delete(0, \"end\")\n E_li4=[]\n for string in itemList:\n E_li4.append(string)\n varE4.set(E_li4[0])\n menu.add_command(label=string, command=tk._setit(varE4, string))\n item=E_dict[varE4.get()]\n E_P4.config(text=item['price'])\n\n elif row==4:\n menu = E_N5[\"menu\"]\n menu.delete(0, \"end\")\n E_li5=[]\n for string in itemList:\n E_li5.append(string)\n varE5.set(E_li5[0])\n menu.add_command(label=string, command=tk._setit(varE5, string))\n item=E_dict[varE5.get()]\n E_P5.config(text=item['price'])\n\ndef Dataprocess():\n df['健保碼'][0]=enID_1.get()\n df['健保碼'][1]=enID_2.get()\n df['健保碼'][2]=enID_3.get()\n df['健保碼'][3]=enID_4.get()\n df['健保碼'][4]=enID_5.get()\n\n df['名稱'][0]=enNAME_1.get()\n df['名稱'][1]=enNAME_2.get()\n df['名稱'][2]=enNAME_3.get()\n df['名稱'][3]=enNAME_4.get()\n df['名稱'][4]=enNAME_5.get()\n\n #查資料前初始化===============================\n varA1.set('')\n varA2.set('')\n varA3.set('')\n varA4.set('')\n varA5.set('')\n\n varE1.set('')\n varE2.set('')\n varE3.set('')\n varE4.set('')\n varE5.set('')\n\n A_N1[\"menu\"].delete(0, \"end\")\n A_N2[\"menu\"].delete(0, \"end\")\n A_N3[\"menu\"].delete(0, \"end\")\n A_N4[\"menu\"].delete(0, \"end\")\n A_N5[\"menu\"].delete(0, \"end\")\n\n E_N1[\"menu\"].delete(0, \"end\")\n E_N2[\"menu\"].delete(0, \"end\")\n E_N3[\"menu\"].delete(0, \"end\")\n E_N4[\"menu\"].delete(0, \"end\")\n E_N5[\"menu\"].delete(0, \"end\")\n\n A_P1.config(text='0')\n A_P2.config(text='0')\n A_P3.config(text='0')\n A_P4.config(text='0')\n A_P5.config(text='0')\n\n E_P1.config(text='0')\n E_P2.config(text='0')\n E_P3.config(text='0')\n E_P4.config(text='0')\n E_P5.config(text='0')\n\n A_S1.config(text='-')\n A_S2.config(text='-')\n A_S3.config(text='-')\n A_S4.config(text='-')\n A_S5.config(text='-')\n\n E_S1.config(text='-')\n E_S2.config(text='-')\n E_S3.config(text='-')\n E_S4.config(text='-')\n E_S5.config(text='-')\n\n #優先以健保碼搜尋\n for i in range(0,5):#0-4\n if df['健保碼'][i]!='':\n ID_srch(df['健保碼'][i],i)\n elif df['名稱'][i]!='':\n NAME_srch(df['名稱'][i],i)\n else:\n continue\ndef A_update(A_P,A_S,varA):\n item=A_dict[varA.get()]\n price=item['price']\n status=item['status']\n A_P.config(text=price)\n A_S.config(text=status)\n\ndef E_update(E_P,E_S,varE):\n item=E_dict[varE.get()]\n price=item['price']\n status=item['status']\n E_P.config(text=price)\n E_S.config(text=status)\n#生成視窗元件==================================\n \n\n#倒數計時工具================================================\ndef timeout():\n timer=1200\n while True:\n if timer>0:\n time.sleep(1)\n timer-=1\n countdown.config(text='A、E網站將於'+str(timer)+'秒後重新整理')\n else:\n countdown.config(text='重新整理中,請稍後...')\n timer=1200\n driver.switch_to.window(handles[0])\n driver.refresh()\n driver.switch_to_window('tab2')\n driver.refresh()\n\n\nwin=tk.Tk()\nwin.title('查價程式')\nwin.geometry('1500x300')\n\nlbID=tk.Label(win,text='健保碼',width=12).grid(row=0,column=0)\nlbNAME=tk.Label(win,text='名稱',width=12).grid(row=0,column=1)\nlbA_N=tk.Label(win,text='A廠品項',width=20).grid(row=0,column=2)\nlbA_P=tk.Label(win,text='A品項價格',width=10).grid(row=0,column=3)\nlbA_S=tk.Label(win,text='A品項庫存',width=10).grid(row=0,column=4)\nlbE_N=tk.Label(win,text='E廠品項',width=20).grid(row=0,column=5)\nlbE_P=tk.Label(win,text='E品項價格',width=10).grid(row=0,column=6)\nlbE_S=tk.Label(win,text='E品項庫存',width=10).grid(row=0,column=7)\nlbS1=tk.Label(win,text='業務1價',width=7).grid(row=0,column=8)\nlbS2=tk.Label(win,text='業務2價',width=7).grid(row=0,column=9)\nlbS3=tk.Label(win,text='業務3價',width=7).grid(row=0,column=10)\n\n\n\n#健保碼欄位=====================================\nID1=tk.StringVar(win).set('')\nID2=tk.StringVar(win).set('')\nID3=tk.StringVar(win).set('')\nID4=tk.StringVar(win).set('')\nID5=tk.StringVar(win).set('')\n\nenID_1=tk.Entry(win,width=12,textvariale=ID1)\nenID_2=tk.Entry(win,width=12,textvariale=ID2)\nenID_3=tk.Entry(win,width=12,textvariale=ID3)\nenID_4=tk.Entry(win,width=12,textvariale=ID4)\nenID_5=tk.Entry(win,width=12,textvariale=ID5)\n\nenID_1.grid(column=0,row=1)\nenID_2.grid(column=0,row=2)\nenID_3.grid(column=0,row=3)\nenID_4.grid(column=0,row=4)\nenID_5.grid(column=0,row=5)\n\n#名稱欄位========================================\nenNAME_1=tk.Entry(win,width=12)\nenNAME_2=tk.Entry(win,width=12)\nenNAME_3=tk.Entry(win,width=12)\nenNAME_4=tk.Entry(win,width=12)\nenNAME_5=tk.Entry(win,width=12)\n\nenNAME_1.grid(column=1,row=1)\nenNAME_2.grid(column=1,row=2)\nenNAME_3.grid(column=1,row=3)\nenNAME_4.grid(column=1,row=4)\nenNAME_5.grid(column=1,row=5)\n\n#A廠項目欄位======================================\nA_li1=['']\nA_li2=['']\nA_li3=['']\nA_li4=['']\nA_li5=['']\n\nvarA1=tk.StringVar(win)\nvarA2=tk.StringVar(win)\nvarA3=tk.StringVar(win)\nvarA4=tk.StringVar(win)\nvarA5=tk.StringVar(win)\n\nA_N1=tk.OptionMenu(win,varA1,*A_li1)\nA_N2=tk.OptionMenu(win,varA2,*A_li2)\nA_N3=tk.OptionMenu(win,varA3,*A_li3)\nA_N4=tk.OptionMenu(win,varA4,*A_li4)\nA_N5=tk.OptionMenu(win,varA5,*A_li5)\n\nA_N1.grid(column=2,row=1)\nA_N2.grid(column=2,row=2)\nA_N3.grid(column=2,row=3)\nA_N4.grid(column=2,row=4)\nA_N5.grid(column=2,row=5)\n\n#A項目價格欄位======================================\nA_P1=tk.Label(win,text='0')\nA_P2=tk.Label(win,text='0')\nA_P3=tk.Label(win,text='0')\nA_P4=tk.Label(win,text='0')\nA_P5=tk.Label(win,text='0')\n\nA_P1.grid(column=3,row=1)\nA_P2.grid(column=3,row=2)\nA_P3.grid(column=3,row=3)\nA_P4.grid(column=3,row=4)\nA_P5.grid(column=3,row=5)\n\n#A項目庫存===========================================\nA_S1=tk.Label(win,text='-')\nA_S2=tk.Label(win,text='-')\nA_S3=tk.Label(win,text='-')\nA_S4=tk.Label(win,text='-')\nA_S5=tk.Label(win,text='-')\n\nA_S1.grid(column=4,row=1)\nA_S2.grid(column=4,row=2)\nA_S3.grid(column=4,row=3)\nA_S4.grid(column=4,row=4)\nA_S5.grid(column=4,row=5)\n\n#E廠項目庫存=============================================\nE_S1=tk.Label(win,text='-')\nE_S2=tk.Label(win,text='-')\nE_S3=tk.Label(win,text='-')\nE_S4=tk.Label(win,text='-')\nE_S5=tk.Label(win,text='-')\n\nE_S1.grid(column=7,row=1)\nE_S2.grid(column=7,row=2)\nE_S3.grid(column=7,row=3)\nE_S4.grid(column=7,row=4)\nE_S5.grid(column=7,row=5)\n \n#E廠項目欄位============================================\nE_li1=['']\nE_li2=['']\nE_li3=['']\nE_li4=['']\nE_li5=['']\n\nvarE1=tk.StringVar(win)\nvarE2=tk.StringVar(win)\nvarE3=tk.StringVar(win)\nvarE4=tk.StringVar(win)\nvarE5=tk.StringVar(win)\n\nE_N1=tk.OptionMenu(win,varE1,*E_li1)\nE_N2=tk.OptionMenu(win,varE2,*E_li2)\nE_N3=tk.OptionMenu(win,varE3,*E_li3)\nE_N4=tk.OptionMenu(win,varE4,*E_li4)\nE_N5=tk.OptionMenu(win,varE5,*E_li5)\n\nE_N1.grid(column=5,row=1)\nE_N2.grid(column=5,row=2)\nE_N3.grid(column=5,row=3)\nE_N4.grid(column=5,row=4)\nE_N5.grid(column=5,row=5)\n\n#E廠項目價格欄位==========================================\nE_P1=tk.Label(win,text='0')\nE_P2=tk.Label(win,text='0')\nE_P3=tk.Label(win,text='0')\nE_P4=tk.Label(win,text='0')\nE_P5=tk.Label(win,text='0')\n\nE_P1.grid(column=6,row=1)\nE_P2.grid(column=6,row=2)\nE_P3.grid(column=6,row=3)\nE_P4.grid(column=6,row=4)\nE_P5.grid(column=6,row=5)\n\n#A、E下拉式選單變動======================================\nvarA1.trace('w',lambda *args: A_update(A_P1,A_S1,varA1))\nvarA2.trace('w',lambda *args: A_update(A_P2,A_S2,varA2))\nvarA3.trace('w',lambda *args: A_update(A_P3,A_S3,varA3))\nvarA4.trace('w',lambda *args: A_update(A_P4,A_S4,varA4))\nvarA5.trace('w',lambda *args: A_update(A_P5,A_S5,varA5))\n\nvarE1.trace('w',lambda *args:E_update(E_P1,E_S1,varE1))\nvarE2.trace('w',lambda *args:E_update(E_P2,E_S2,varE2))\nvarE3.trace('w',lambda *args:E_update(E_P3,E_S3,varE3))\nvarE4.trace('w',lambda *args:E_update(E_P4,E_S4,varE4))\nvarE5.trace('w',lambda *args:E_update(E_P5,E_S5,varE5))\n\n#業務報價===============================================\nS1_1=tk.Entry(win,width=7).grid(column=8,row=1)\nS1_2=tk.Entry(win,width=7).grid(column=8,row=2)\nS1_3=tk.Entry(win,width=7).grid(column=8,row=3)\nS1_4=tk.Entry(win,width=7).grid(column=8,row=4)\nS1_5=tk.Entry(win,width=7).grid(column=8,row=5)\n\nS2_1=tk.Entry(win,width=7).grid(column=9,row=1)\nS2_2=tk.Entry(win,width=7).grid(column=9,row=2)\nS2_3=tk.Entry(win,width=7).grid(column=9,row=3)\nS2_4=tk.Entry(win,width=7).grid(column=9,row=4)\nS2_5=tk.Entry(win,width=7).grid(column=9,row=5)\n\nS3_1=tk.Entry(win,width=7).grid(column=10,row=1)\nS3_2=tk.Entry(win,width=7).grid(column=10,row=2)\nS3_3=tk.Entry(win,width=7).grid(column=10,row=3)\nS3_4=tk.Entry(win,width=7).grid(column=10,row=4)\nS3_5=tk.Entry(win,width=7).grid(column=10,row=5)\n\ncountdown=tk.Label(win,text='')\ncountdown.grid(column=6,row=6,columnspan=2)\n\nbtn=tk.Button(win,width=10,text='查詢',command=Dataprocess)\nbtn.grid(column=9,row=6,columnspan=2)\n\n\n\n#多線程設定===================================\ntList=[]\n\nt2=threading.Thread(target=timeout)\ntList.append(t2)\n\nfor t in tList:\n t.start()\nwin.mainloop()","sub_path":"main.pyw","file_name":"main.pyw","file_ext":"pyw","file_size_in_byte":19279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"275649492","text":"import sys\n\n\ndef send_request(data: str, finish: bool = False) -> int:\n sys.stdout.write(f'{data}\\n')\n sys.stdout.flush()\n if finish:\n return 0\n server_answer = sys.stdin.readline()\n return int(server_answer)\n\n'''\nhttps://contest.yandex.ru/yacup/contest/19036/problems/D/\n\nВ Поиске Яндекса реализована так называемая политика «зелёного транка»: любой код, попадающий в репозиторий, с некоторыми оговорками гарантированно не ломает сборку и тесты.\nТесты, впрочем, бывают крайне сложными, и запускать их все на каждый коммит оказывается нецелесообразно. Так что для особенно сложных случаев реализована следующая процедура: тесты запускаются с некоторой регулярностью, а проверяется сразу набор коммитов. Таким образом, в течение некоторого времени в транк может попасть \nn\n непроверенных коммитов, среди которых как минимум один содержит ошибку.\nВ такой ситуации тестирующая система должна обнаружить номер \nm\n первого коммита, сломавшего тесты. Этот номер обладает следующим свойством: все коммиты с номерами, меньшими \nm\n, успешно проходят тесты, а коммиты с номерами, большими либо равными \nm\n, тесты не проходят. В данной задаче гарантируется, что коммит с указанными свойствами обязательно существует и является единственным.\nВ целях экономии ресурсов тестирующая система может проверять только один коммит за раз. Вам требуется написать программу, которая будет определять номер \nm\n.\nЭта задача немного необычна — в ней вам предстоит реализовать интерактивное взаимодействие с тестирующей системой. Это означает, что вы можете делать запросы и получать ответы в онлайн-режиме. Обратите внимание, что ввод/вывод в этой задаче — стандартный (то есть с экрана на экран). После вывода очередного запроса обязательно используйте функции очистки потока, чтобы часть вашего вывода не осталась в каком-нибудь буфере. Например, на С++ надо использовать функцию fflush(stdout), на Java вызов System.out.flush(), на Pascal flush(output) и stdout.flush() для языка Python.\nВы можете делать запросы к тестирующей системе. Каждый запрос — это вывод целого числа, принадлежащего диапазону от \n1\n до \nn\n. В ответ тестирующая система вернёт один из двух результатов:\n\nстрока «1» (без кавычек), если коммит с соответствующим номером успешно проходит все тесты;\nстрока «0» (без кавычек), если коммит с соответствующим номером не проходит тесты.\nЕсли ваша программа в точности знает номер \nm\n, она должна вывести строку вида «! m», после чего завершить свою работу.\nВашей программе разрешается сделать не более \n2\n5\n запросов.\n \n'''\ndef main():\n n = 20\n n = sys.stdin.readline().strip()\n start = 1\n end = int(n)\n # mid = (start + end) // 2\n # server_response = send_request(str(start))\n # if server_response == '1' # Fails on the first commit\n # send_request(f'! 1', finish=True)\n # else:\n # server_response = None\n server_response = None\n\n while True:\n\n mid = (start + end) // 2\n # print(f'start={start}, end={end}, mid={mid}')\n if start == end and start == mid and end == mid:\n send_request(f'! {mid}', finish=True)\n return\n\n server_response = send_request(str(mid))\n\n if server_response == 1: # commit succesful\n start = mid + 1\n elif server_response == 0: # commit fails\n end = mid\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"yandex_cup/fail_commit_bin_search.py","file_name":"fail_commit_bin_search.py","file_ext":"py","file_size_in_byte":5125,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"607648483","text":"\nfrom PIL import Image\nfrom random import randint\nfrom colour import Color\nimport time\nimport sys\nimport words\n\n\ndef createSplash(color,size,og_file):\n\n\t# img = Image.open('img/splatters.jpg')\n\timg = Image.open(og_file)\n\timg = img.convert(\"RGBA\")\n\tpixdata = img.load()\n\n\tfor y in xrange(img.size[1]):\n\t for x in xrange(img.size[0]):\n\t if pixdata[x, y][3] <= 255 and pixdata[x, y][3] > 0:\n\t pixdata[x, y] = color\n\t\n\timg = img.resize(size, Image.ANTIALIAS)\n\treturn img\n\n# def splashesLayer(color,size):\n# \timg = Image.open('img/splatters.jpg')\n# \timg = img.convert(\"RGBA\")\n# \tpixdata = img.load()\n\n# \tfor y in xrange(img.size[1]):\n# \t for x in xrange(img.size[0]):\n# \t if pixdata[x, y][3] <= 255 and pixdata[x, y][3] > 0:\n# \t pixdata[x, y] = color\n# \timg = img.resize(size, Image.ANTIALIAS)\n# \treturn img\n\n\ndef splashLayer(emotion_list,frequency,weight_dimensions,background,colors):\n\tfor emotion in reversed(emotion_list):\n\t\t#Create a splash of the emotions color to the size/amount of its weight.\n\t\temotion_weight = frequency[emotion]['weight']\n\t\tsize = weight_dimensions[emotion_weight]\n\t\t# print size\n\t\tcolor = Color(colors[emotion]).rgb\n\t\tr = int(255 * color[0])\n\t\tg = int(255 * color[1])\n\t\tb = int(255 * color[2])\n\t\trgb = (r,g,b)\n\n\t\tfiles = ['img/splatter.png','img/splatter2.png','img/splatter3.png']\n\n\t\t# for og_file in files:\n\t\t# print randint(0,len(files))\n\t\tog_file = files[randint(0,0)]\n\t\timg = createSplash(rgb,size,og_file)\n\t\trotate = 0\n\t\tif randint(0,1):\n\t\t\trotate = 180\n\t\timg = img.rotate(rotate)\n\t\toff_x = (randint(-(size[0]/2) ,background.size[0]-(size[0]/2)))\n\t\toff_y = (randint(-(size[1]/2) ,background.size[1]-(size[1]/2)))\n\t\tbackground.paste(img, ( off_x , off_y),mask=img)\n\n\treturn background\n\ndef main():\n\n\ttop_hit, frequency, emotion_list = words.map_words()\n\n\tcolors = {\n\t\t\"bored\" : \"#F4CAF4\",\n\t\t\"distracted\" : \"#DFC9F0\",\n\t\t\"disbelief\" : \"#D978D4\",\n\t\t\"distate\" : \"#A473CB\",\n\t\t\"disgusted\" : \"#7539AE\",\n\t\t\"disdain\" : \"#4F1E7E\",\n\t\t\"apathetic\" : \"#A01F60\",\n\t\t\"irate\" : \"#EA2207\",\n\t\t\"angry\" : \"#FF2807\",\n\t\t\"loathing\" : \"#401665\",\n\t\t\"bitter\" : \"#7C2102\",\n\t\t\"enraged\" : \"#7C2102\",\n\t\t\"contemptuous\" : \"#931632\",\n\t\t\"irritated\" : \"#F15A82\",\n\t\t\"cranky\" : \"#EEA7BC\",\n\t\t\"aggravated\" : \"#FEA7A5\",\n\t\t\"upset\" : \"#FF997E\",\n\t\t\"frustrated\" : \"#FF7D33\",\n\t\t\"hysterical\" : \"#000000\",\n\t\t\"frantic\" : \"#7B4B06\",\n\t\t\"worried\" : \"#E79918\",\n\t\t\"anxious\" : \"#FFA032\",\n\t\t\"nervous\" : \"#FFC582\",\n\t\t\"confused\" : \"#FFDFAE\",\n\t\t\"concerned\" : \"#FFC55A\",\n\t\t\"frantic\" : \"#7B5107\",\n\t\t\"terrified\" : \"#A48D16\",\n\t\t\"awed\" : \"#CFB01E\",\n\t\t\"astonished\" :\"#CEC61F\",\n\t\t\"afraid\" : \"#FFE13A\",\n\t\t\"startled\" : \"#FFD55D\",\n\t\t\"surprised\" : \"#FFDF83\",\n\t\t\"apprehensive\" : \"#FFEC84\",\n\t\t\"unsure\" : \"#FFFDD4\",\n\t\t\"interested\" : \"#E1EC7C\",\n\t\t\"intrigued\" : \"#CBE139\",\n\t\t\"mesmerized\" : \"#C3DD24\",\n\t\t\"amazed\" : \"#ACC61E\",\n\t\t\"fixated\" : \"#9AB01A\",\n\t\t\"obsessed\" : \"#285609\",\n\t\t\"exuberant\" : \"#31710E\",\n\t\t\"thrilled\" : \"#3C8E13\",\n\t\t\"excited\" : \"#42A017\",\n\t\t\"enthusiastic\" : \"#45B41A\",\n\t\t\"giddy\" : \"#7EC74B\",\n\t\t\"jolly\" : \"#9EDBA1\",\n\t\t\"happy\" : \"#27B42E\",\n\t\t\"satisfied\" : \"#6AD170\",\n\t\t\"overjoyed\" : \"#00B268\",\n\t\t\"content\" : \"#C7F1E2\",\n\t\t\"calm\" : \"#C9EFF3\",\n\t\t\"sad\" : \"#0378BB\",\n\t\t\"grief\" : \"#0072A4\",\n\t\t\"depressed\" : \"#004273\",\n\t\t\"despair\" : \"#181050\",\n\t\t\"distraught\" : \"#002B96\",\n\t\t\"dissapointed\" : \"#91A0E3\",\n\t\t\"hurt\" : \"#C2D6EF\"\n}\n\n\n\tmax_weight = frequency[top_hit]['weight']\n\tweight_list = reversed(range(1,max_weight+1))\n\n\tweight_dimensions = {}\n\n\tfor weight in weight_list:\n\n\t\tif int(weight) == max_weight:\n\t\t\tweight_dimensions[weight] = (2000,1400)\n\t\t\tweight_dimensions[weight] = (1400,960)\n\t\telse:\n\t\t\tx = int(weight_dimensions[weight+1][0] * .75)\n\t\t\ty = int(weight_dimensions[weight+1][1] * .75)\n\t\t\tif x < 400:\n\t\t\t\tx = 442\n\t\t\tif y < 300:\n\t\t\t\ty = 303\n\t\t\tweight_dimensions[weight] = (x,y)\n\n\tbackground = Image.new('RGBA', (2000,1000), (0, 0, 0, 255))\n\tbackground2 = Image.new('RGBA', (2000,1500), (0, 0, 0, 0))\n\tbackground = splashLayer(emotion_list,frequency,weight_dimensions,background,colors)\n\tbackground = splashLayer(emotion_list,frequency,weight_dimensions,background,colors)\n\t\n\t#background.show()\n\tbackground.save(\"img/tweet.png\")\n\n\nif __name__ == '__main__': \n main()\n","sub_path":"image_v01.py","file_name":"image_v01.py","file_ext":"py","file_size_in_byte":4181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"621161544","text":"import adsk.core, adsk.fusion, traceback\n\ndef run(context):\n ui = None\n try: \n app = adsk.core.Application.get()\n ui = app.userInterface\n design = app.activeProduct\n rootComp = design.rootComponent\n\n # Create a new sketch on the xy plane.\n sketches = rootComp.sketches;\n xzPlane = rootComp.xZConstructionPlane\n sketch = sketches.add(xzPlane)\n circles = sketch.sketchCurves.sketchCircles\n circle1 = circles.addByCenterRadius(adsk.core.Point3D.create(0, 0, 0), 2)\n circle2 = circles.addByCenterRadius(adsk.core.Point3D.create(8, 3, 0), 3)\n circle3 = circles.addByCenterRadius(circle2.centerSketchPoint, 4)\n except:\n if ui:\n ui.messageBox('Failed:\\n{}'.format(traceback.format_exc()))","sub_path":"create_circle_test.py","file_name":"create_circle_test.py","file_ext":"py","file_size_in_byte":792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"326645912","text":"from errors.error import *\n\n__all__ = [\"RunInsideGitRepoError\"]\n\n\nclass RunInsideGitRepoError(Error):\n \"\"\"Exception raised when autopush is run inside a Git repository\n\n Attributes:\n expression -- input expression in which the error occurred\n message -- explanation of the error\n \"\"\"\n\n def __init__(self, expression, message):\n self.expression = expression\n self.message = message\n","sub_path":"errors/run_inside_git_repo_error.py","file_name":"run_inside_git_repo_error.py","file_ext":"py","file_size_in_byte":449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"381248548","text":"import numpy as np\nimport tensorflow as tf\n\nclass ExperienceBuffer(object):\n \"\"\"Used for creating a buffer of experiences to train the agent\"\"\"\n\n def __init__(self, buffer_size, batch_size, state_dims=1, action_dims=1, history_length=1):\n\n self.buffer_size = buffer_size\n self.batch_size = batch_size\n self.state_dims = state_dims\n\n self.actions = np.empty([buffer_size, action_dims], dtype=np.float32)\n self.states = np.empty([buffer_size, state_dims]) # T\n self.rewards = np.empty([buffer_size], dtype=np.float32)\n self.history_length = history_length\n # The next state for a given state will be the next index\n self.terminals = np.zeros(buffer_size) # Stores whether the observation has ended\n\n self.current = 0\n self.count = 0\n\n def add(self, action, reward, state, terminal):\n\n self.actions[self.current] = action\n self.rewards[self.current] = reward\n self.states[self.current] = state\n self.terminals[self.current] = terminal\n self.count = max(self.count, self.current + 1)\n self.current = (self.current + 1) % self.buffer_size\n\n def add_batch(self, action, reward, state, terminal):\n\n size = len(action)\n self.actions[self.current:self.current + size] = action\n self.rewards[self.current:self.current + size] = reward.squeeze()\n self.states[self.current:self.current + size] = state\n self.terminals[self.current:self.current + size] = terminal\n self.count = max(self.count, self.current + size)\n self.current = (self.current + size) % self.buffer_size\n\n def sample(self):\n\n # Generate a random index\n prestates = np.empty([self.batch_size, self.state_dims])\n poststates = np.empty([self.batch_size, self.state_dims])\n\n indexes = []\n while len(indexes) < self.batch_size:\n # Keep drawing candidate indices\n while True:\n index = np.random.randint(self.history_length, self.count - 1) # This is going to be an exausting\n # Determine if enough prior observations\n if index >= self.current > index - self.history_length:\n continue\n # Check if terminal component in sequence\n if self.terminals[(index - self.history_length):index].any():\n continue\n break\n\n # Fill pre and post states\n prestates[len(indexes), ...] = self.retrieve(index - 1)\n poststates[len(indexes), ...] = self.retrieve(index)\n indexes.append(index)\n\n # Get data to return in batch\n rv_rewards = self.rewards[indexes]\n rv_actions = self.actions[indexes]\n rv_terminals = self.terminals[indexes]\n\n rv_rewards = (rv_rewards - rv_rewards.mean()) / rv_rewards.std()\n return rv_actions, rv_rewards, prestates, poststates, rv_terminals\n\n def retrieve(self, index):\n\n index = index % self.count\n if index >= self.history_length - 1:\n return self.states[(index - (self.history_length - 1)):(index + 1), ...]\n else:\n indexes = [(index - i) % self.count for i in reversed(range(self.history_length))]\n return self.states[indexes, ...]","sub_path":"continuous-DDPG/buffers.py","file_name":"buffers.py","file_ext":"py","file_size_in_byte":3306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"583389201","text":"from metadata import metadata as mda\n\n\ndef test_get_key_value():\n\n line = \"title: Titre\"\n result = mda.get_key_value(line)\n assert result == {'title': \"Titre\"}\n\n line = \"title : Titre\"\n result = mda.get_key_value(line)\n assert result == {'title': \"Titre\"}\n\n line = \"line without key-value structure\"\n result = mda.get_key_value(line)\n assert result is None\n\n\ndef test_check_keys():\n\n metadata = {\"title\": \"Titre\", \"intra\": \"#note\", \"where\": \"#usa\"}\n rules = {\"metadata\": {\"keys\": [\"title\", \"intra\", \"where\"],\n \"values\": {\"title\": {}, \"intra\": {}, \"where\": {}}}}\n assert mda.check_keys(metadata, rules) == ([], metadata)\n\n metadata = {\"title\": \"Titre\", \"intra\": \"#note\", \"where\": \"#usa\"}\n rules = {\"metadata\": {\"keys\": [\"title\", \"tags\"],\n \"values\": {\"title\": {}, \"tags\": {}}}}\n assert mda.check_keys(metadata, rules) ==\\\n ([\"Additional key(s): intra, where\"], metadata)\n\n metadata = {\"title\": \"Titre\", \"intra\": \"#note\", \"where\": \"#usa\"}\n rules = {\"metadata\": {\"keys\": [\"title\", \"tags\"],\n \"values\": {\"title\": {}, \"tags\": {\"mandatory\": True}}}}\n assert mda.check_keys(metadata, rules)[0] == \\\n [\"Additional key(s): intra, where\"]\n assert mda.check_keys(metadata, rules)[1] == \\\n {\"title\": \"Titre\", \"tags\": \"#NA-tags\", \"intra\": \"#note\", \"where\": \"#usa\"}\n\n\ndef test_check_values():\n\n metadata = {\"tags\": \"#tag2, #tag3\"}\n rules = {\"metadata\": {\"keys\": [\"tags\"],\n \"values\": {\"tags\": {\"multiple_values\": [\"#tag1\", \"#tag2\"]}}}}\n assert mda.check_values(metadata, rules)[0] == ['Key: tags - Unknown value(s): #tag3']\n assert mda.check_values(metadata, rules)[1] == {'tags': ['#tag2', '#tag3']}\n\n metadata = {\"tags\": \"#tag2, #tag3\", \"intra\": \"#src, #website\"}\n rules = {\"metadata\": {\n \"keys\": [\"tags\", \"intra\"],\n \"values\": {\n \"tags\": {\"multiple_values\": [\"#tag1\", \"#tag2\"]},\n \"intra\": {\"multiple_values\": [\"#src\", \"#book\"]}\n }}}\n assert mda.check_values(metadata, rules)[0] == \\\n ['Key: tags - Unknown value(s): #tag3', 'Key: intra - Unknown value(s): #website']\n assert mda.check_values(metadata, rules)[1] == metadata\n\n\ndef test_join():\n\n dico = {\"key1\": \"value1\", \"key2\": \"value2\"}\n check = \"key1: value1\\nkey2: value2\\n\"\n assert check == mda.join(dico)\n\n dico = {\"key1\": [\"#tag1\", \"#tag2\"], \"key2\": \"value2\"}\n check = \"key1: #tag1, #tag2\\nkey2: value2\\n\"\n assert check == mda.join(dico)\n\n dico = {\"longkey\": \"value1\", \"key\": \"value2\"}\n check = \"longkey: value1\\nkey : value2\\n\"\n assert check == mda.join(dico)\n\n dico = {}\n check = \"\"\n assert check == mda.join(dico)\n","sub_path":"tests/test_metadata.py","file_name":"test_metadata.py","file_ext":"py","file_size_in_byte":2794,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"132302078","text":"# coding:utf-8\r\n#create by wqh in 2017.5.3 shidaiwang model_id for str\r\nfrom scrapy.spiders import CrawlSpider\r\nfrom mobile.items import GlobalItem\r\nimport scrapy\r\nfrom urllib.parse import urljoin\r\n\r\n\r\nclass ShiDaiWangSpider(CrawlSpider):\r\n #\r\n # 汽车时代网\r\n #\r\n #读取字母的model_id 和 model_url\r\n #\r\n\r\n name = 'shidaiwangStrID'\r\n allowed_domains = ['autotimes.com.cn']\r\n start_urls = ['http://car.autotimes.com.cn/price/ab9/']\r\n\r\n def parse(self, response):\r\n for brandlist in response.css('div.left_tree > ul.in_top > li.leftMain > ul'):\r\n brand_name = brandlist.css('a::text').extract_first()\r\n brand_url = urljoin('http://car.autotimes.com.cn', brandlist.css('a::attr(href)').extract_first())\r\n brand_id = brandlist.css('a::attr(href)').re_first(r'price/([a,b]+[0-9]+)')\r\n makerlist = brandlist.css('li.leftfac > a')\r\n for maker in makerlist:\r\n maker_name = maker.css('::text').extract_first()\r\n maker_url = urljoin('http://car.autotimes.com.cn', maker.css('::attr(href)').extract_first())\r\n maker_id = maker.css('::attr(href)').re_first(r'price/([a,f]+[0-9]+)')\r\n model = brandlist.css('li.leftfac > a+ ul')[0]\r\n for modellist in model.css('li'):\r\n model_name = modellist.css('a::text').extract_first()\r\n model_url = urljoin('http://car.autotimes.com.cn', modellist.css('a::attr(href)').extract_first())\r\n #model_id = modellist.css('a::attr(href)').re_first(r'price/([a,s]+[0-9]+)')[2:]\r\n meta = {'website': 'shidaiwang1',\r\n 'brand_name': brand_name, 'brand_url': brand_url, 'brand_id': brand_id,\r\n 'maker_name': maker_name, 'maker_url': maker_url, 'maker_id': maker_id,\r\n 'model_name': model_name#, 'model_url': model_url\r\n }\r\n yield scrapy.Request(model_url, callback=self.parse_style, meta=meta)\r\n\r\n\r\n def parse_style(self, response):\r\n meta = response.meta\r\n styles = response.css('div.quop_pplb_14.quop_pplb_26')\r\n meta['model_id']=str(response.css('.quop_pplb_4 a::attr(href)').extract_first()).split('/')[-2]\r\n meta['model_url']=response.css('.quop_pplb_4 a::attr(href)').extract_first()\r\n\r\n\r\n for stylelist in styles:\r\n style_name = str(stylelist.css('div.quop_pplb_15.quop_pplb_17::text').extract_first()).strip()\r\n style_id = str(stylelist.css('span:nth-child(1) .quop_pllink_4::attr(href)').extract_first()).split('/')[-1].replace('.html','')\r\n style_url = stylelist.css('span:nth-child(1) .quop_pllink_4::attr(href)').extract_first()\r\n\r\n meta['style_name'] = style_name\r\n meta['style_id'] = style_id\r\n meta['style_url'] = style_url\r\n if style_name != 'None':\r\n official_price = stylelist.css(\r\n 'div.quop_pplb_15.quop_pplb_17+ div.quop_pplb_16.quop_pplb_18::text').re_first(\r\n r'(.*)万')\r\n meta['official_price'] = official_price\r\n\r\n # 不需要 暂未空\r\n meta['price_level'] = ''\r\n meta['car_class'] = ''\r\n item = GlobalItem()\r\n for key in item.fields.keys():\r\n if key in meta.keys():\r\n item[key] = meta[key]\r\n\r\n yield item\r\n","sub_path":"Engineer/mobile/spiders/autotimes_str.py","file_name":"autotimes_str.py","file_ext":"py","file_size_in_byte":3552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"635166121","text":"import cv2\nimport screeninfo\nimport numpy as np\nimport copy\nimport datetime\n\n\n'''\n\n1.为什么要用Image3 \n因为如果\n\tdef addimage(self,image4):\n\t\tself.image1 = cv2.add(image4,self.image1)\n\t\tself.image1_copy = copy.copy(self.image1)\n\timage1会不断地叠加,循环中第二次运行 addimage的时候,输入的self.image1就不是纯黑的图了\n\t而是混合了一帧相机的图,所以要用 image3 作为初始的图.\n\t\n2.为什么要show一下 image3 在show 一下 image1\n第一点: 先show image3 在 show image1 这样image1 会覆盖image3 看到的还是image1\n第二点: cv2.circle(self.image3, (x, y), 4,color1, -1)要在循环中不断刷新image3才能保证\n画出的东西显示出来, 虽然self.image1 = cv2.add(image4,self.image3) image1里面混了image3\nimage1不断地在刷新,但是仍然看不到做的标记 所��每次show image1 之前需要先show 一下image3.\n\n3.image1 的作用只是显示相机的照片image4 画线 擦线绑定的都是image3 \n\n4.相机拍到的是Image4,和相机混合,在一屏储存标记的是image3,image2是在二屏最开始画线的图,image1是混合了image3和image4的图,\nimage5是二屏全屏显示的那个黑色图,将想要的东西赋值给image5的一部分就可以了.self.image6是调同轴用的 把二屏框内全黑的部分变成相机拍到\n的图 这样容易看相机投影仪视场是不是重合\n\n'''\n\n\nclass Window:\n\tdef __init__(self, windowname1, windowname2, width, height):\n\t\tself.windowname1, self.windowname2 = windowname1, windowname2\n\t\tself.image1 = np.zeros([height, width, 3], np.uint8)\n\t\tself.image2 = np.zeros([height, width, 3], np.uint8)\n\t\tself.image3 = np.zeros([height, width, 3], np.uint8)\n\t\tself.image4 = np.zeros([height, width, 3], np.uint8)\n\t\tself.image5 = np.zeros([height, width, 3], np.uint8)\n\t\tself.image6 = np.zeros([height, width, 3], np.uint8)\n\t\tself.image7 = np.zeros([height, width, 3], np.uint8)\n\t\tself.image8 = np.zeros([height, width, 3], np.uint8)\n\t\tself.image2_copy = np.zeros([height, width, 3], np.uint8)\n\t\tself.image3_copy = np.zeros([height, width, 3], np.uint8)\n\t\tself.mode = True\n\t\tself.drawing = False\n\t\tself.number = 0\n\t\tself.list1 = []\n\t\tself.list2 = []\n\t\tself.list3 = []\n\t\tself.list4 = []\n\t\tself.bbox = ()\n\n\tdef createwindow(self, width, height):\n\t\tcv2.namedWindow(self.windowname1, cv2.WINDOW_NORMAL)\n\t\tcv2.resizeWindow(self.windowname1, width, height)\n\t\tcv2.namedWindow(self.windowname2, cv2.WINDOW_NORMAL)\n\t\tcv2.resizeWindow(self.windowname2, width, height)\n\n\tdef movewindow(self, id):\n\t\tscreen_id = id\n\t\tscreen = screeninfo.get_monitors()[screen_id]\n\t\twidth, height = screen.width, screen.height\n\t\tprint('width,height are ', width, height)\n\t\tcv2.moveWindow(self.windowname2, screen.x, screen.y)\n\t\tself.image5 = np.zeros([height, width, 3], np.uint8)\n\n\tdef nobiaotilan(self):\n\t\tcv2.setWindowProperty(self.windowname2, cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)\n\n\tdef addimage(self, frame):\n\t\tself.image6 = frame\n\t\t# 由于投影仪左右反转 所以这里把放在二屏的相机的图左右翻转一下\n\t\t# self.image6 = cv2.flip(self.image6, 1)\n\t\tself.image1 = cv2.add(frame, self.image3)\n\n\tdef changeimage(self, width, height, x1, y1, z):\n\t\tself.image4 = cv2.resize(self.image2, (width, height))\n\t\t# 调同轴,需要投影相机拍到的画面,调用下面这句话,注释上面这句话\n\t\t# self.image4 = cv2.resize(self.image6, (width, height))\n\n\t\tself.image5[x1:x1+height, y1:y1+width] = self.image4\n\t\t# 加框\n\t\tself.image5[x1-1-z:x1-1, y1-1:y1+1+width] = [255, 255, 255]\n\t\tself.image5[x1+height+1:x1+height+1+z, y1-1:y1+1+width] = [255, 255, 255]\n\t\tself.image5[x1-1-z:x1+height+1+z, y1-1-z:y1-1] = [255, 255, 255]\n\t\tself.image5[x1-1-z:x1+height+1+z, y1+width+1:y1+width+1+z] = [255, 255, 255]\n\n\tdef showimage(self, frame):\n\t\tself.image1 = cv2.add(frame, self.image3)\n\t\t# cv2.imshow(self.windowname1, self.image3)\n\t\tcv2.imshow(self.windowname1, self.image1)\n\t\tcv2.imshow(self.windowname2, self.image5)\n\n\tdef bindingwi(self, color1, color2, thickness, width, height):\n\t\tdef drawline(event, x, y, flags, param):\n\t\t\tif event == cv2.EVENT_LBUTTONDOWN:\n\t\t\t\tself.drawing = True\n\t\t\t\t# 由于投影仪自身有反转, 坐标系对应的是opencv的坐标系, x对应width y对应height 如果俩个图画一样的点self.list1和list2\n\t\t\t\t# 坐标一样就行,左右翻转需要改成width-x,上下翻转height-y\n\t\t\t\tself.list1.append((x, y))\n\t\t\t\tself.list2.append((x, y))\n\n\t\t\t\tself.start = datetime.datetime.now()\n\t\t\telif event == cv2.EVENT_MOUSEMOVE:\n\t\t\t\tif self.drawing:\n\t\t\t\t\tif self.mode:\n\t\t\t\t\t\tself.list1.append((x, y))\n\t\t\t\t\t\tself.list2.append((x, y))\n\t\t\t\t\t\tcv2.line(self.image3, self.list1[self.number], self.list1[self.number+1], color1, thickness=thickness)\n\t\t\t\t\t\tcv2.line(self.image2, self.list2[self.number], self.list2[self.number+1], color2, thickness=thickness)\n\t\t\t\t\t\t# 画点 不和画线的坐标一样 画点对应的是opencv的坐标系 x对应width y对应height\n\t\t\t\t\t\t# cv2.circle(self.image3, (x, y), 4,color1, -1)\n\t\t\t\t\t\t# cv2.circle(self.image2, (width-x, y), 4, color2, -1)\n\t\t\t\t\t\tself.end = datetime.datetime.now()\n\t\t\t\t\t\tself.number = self.number+1\n\t\t\t\t\t\t_time = str(self.end-self.start)\n\t\t\t\t\t\ta, b, c = _time.split(':')\n\t\t\t\t\t\tprint(c)\n\t\t\t\t\telse:\n\t\t\t\t\t\tself.image3[y - 25:y + 25, x - 25:x + 25, :] = self.image3_copy[y - 25:y + 25, x - 25:x + 25, :]\n\t\t\t\t\t\tself.image2[y - 25:y + 25, x - 25:x + 25, :] = self.image2_copy[y - 25:y + 25, x - 25:x + 25, :]\n\t\t\telif event == cv2.EVENT_LBUTTONUP:\n\t\t\t\tself.drawing = False\n\t\t\t\tfor (x, y) in self.list1:\n\t\t\t\t\tself.list3.append(x)\n\t\t\t\t\tself.list4.append(y)\n\t\t\t\tself.list1 = []\n\t\t\t\tself.list2 = []\n\t\t\t\tself.number = 0\n\n\t\tcv2.setMouseCallback(self.windowname1, drawline)\n\n\tdef movebiaoji(self):\n\t\tif self.list3:\n\t\t\tself.list3.sort()\n\t\t\tself.list4.sort()\n\t\t\t# print(self.list3[0], self.list3[-1])\n\t\t\t# bbox = (x,y,w,h)\n\t\t\tself.bbox = (int(self.list3[0]-3), int(self.list4[0]-3), int(self.list3[-1]-self.list3[0]+3), int(self.list4[-1]-self.list4[0]+3))\n\t\t\tself.image8 = copy.copy(self.image3[int(self.list4[0]-3):int(self.list4[-1]), int(self.list3[0]-3):int(self.list3[-1])])\n\t\t\t# cv2.imshow('1', self.image8)\n\t\t\t# cv2.waitKey(0)\n\t\t\tprint(self.bbox)\n\t\t\tself.list3 = []\n\t\t\tself.list4 = []\n\n\tdef dong(self, x, y, w, h):\n\t\tself.image3[:, :] = [0, 0, 0]\n\t\tdst = cv2.resize(self.image8, (w, h))\n\t\tself.image3[y:y+h, x:x+w] = dst\n\n\n","sub_path":"new-program-2/window.py","file_name":"window.py","file_ext":"py","file_size_in_byte":6362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"578041701","text":"import json\nimport logging\n\nimport discord\nfrom d4dj_utils.master.asset_manager import AssetManager\n\nfrom miyu_bot.bot.bot import D4DJBot\nfrom miyu_bot.bot.master_asset_manager import MasterFilterManager\n\nlogging.basicConfig(level=logging.INFO)\n\nwith open('config.json') as f:\n bot_token = json.load(f)['token']\n\nasset_manager = AssetManager('assets')\nbot = D4DJBot(asset_manager, MasterFilterManager(asset_manager), command_prefix='!', case_insensitive=True,\n activity=discord.Game(name='https://discord.gg/TThMwrAZTR'))\n\nbot.load_extension('miyu_bot.commands.cogs.card')\nbot.load_extension('miyu_bot.commands.cogs.event')\nbot.load_extension('miyu_bot.commands.cogs.music')\nbot.load_extension('miyu_bot.commands.cogs.utility')\n\n\n@bot.event\nasync def on_ready():\n logging.getLogger(__name__).info(f'Current server count: {len(bot.guilds)}')\n\n\nbot.run(bot_token)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":881,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"566408033","text":"import sys\nclass node: \n\n def __init__(self, info): \n self.info = info \n self.next = None \n\n\nclass Stack: \n\n def __init__(self): \n self.top = None\n \n \n def isEmpty(self):\n if self.top is None:\n return True\n return False\n \n def push(self,data):\n self.temp=node(data)\n if self.temp is None:\n print(\"Stack overflow\")\n return\n self.temp.next=self.top\n self.top=self.temp\n \n def pop(self):\n if self.isEmpty():\n print(\"Stack Underflow\")\n sys.exit(0)\n d=self.top.info\n self.top=self.top.next \n return d\n \n def peek(self):\n if self.isEmpty():\n print(\"Stack Underflow\")\n sys.exit(0)\n d=self.top.info\n return d\n def display(self):\n if self.isEmpty():\n print(\"Stack Underflow\")\n sys.exit(0)\n self.p=self.top\n while self.p is not None:\n print(self.p.info)\n self.p=self.p.next\n \ndef match(a,b):\n if(a=='[' and b==']'):\n return 1\n if(a=='{' and b=='}'):\n return 1\n if(a=='(' and b==')'):\n return 1\n return 0\ndef checkParantheses(s):\n stack=Stack()\n for i in range(len(s)):\n if s[i]=='(' or s[i]=='[' or s[i]=='{':\n stack.push(s[i])\n if s[i]==')' or s[i]==']' or s[i]=='}':\n if stack.isEmpty():\n print(\"Right parentheses are more than left parentheses\")\n return 0\n else:\n temp=stack.pop()\n if match(temp,s[i]) != 1:\n print(\"Mismatched parentheses are : {} {}\".format(temp,s[i]))\n return 0\n \n if stack.isEmpty() is True:\n print(\"Balanced Parentheses\")\n return 1\n else:\n print(\"Left parentheses more than right parentheses\")\n return 0\n\n \nif __name__=='__main__':\n s=\"((a+b)+(c+d))\"\n valid=checkParantheses(s)\n if valid==1:\n print(\"Valid Expression\")\n else:\n print(\"Invalid Expression\")\n","sub_path":"Applied Course/4.Problem Solving/4.Problems on Stacks and Queues/4.Check if parenthesis are balanced or not.py","file_name":"4.Check if parenthesis are balanced or not.py","file_ext":"py","file_size_in_byte":2140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"24"}
+{"seq_id":"445079840","text":"import flask\nfrom flask import request\nimport tempfile\nimport io\n\nimport main_w as main\n\n\nclass A:\n basic_filter = False\n save_greyscale = False\n radius_div = main.RAD_DIV\n sigma = None\n save_gaussian = False\n\n\nclass App(flask.Flask):\n def __init__(self, name):\n super().__init__(name)\n\n self.route('/blurple/index.css')(self.css)\n self.route('/')(self.index)\n self.route('/blurple', methods=['POST', 'GET'])(self.blurple)\n self.route('/blurple/', methods=['POST', 'GET'])(self.blurple)\n\n def index(self):\n with open('index.html') as f:\n dat = f.read()\n\n return dat\n\n def css(self):\n return flask.send_file('index.css')\n \n def blurple(self):\n if request.method == 'POST':\n if 'file' not in request.files:\n with open('result.html') as f:\n dat = f.read()\n return dat.replace('{{body}}', '