diff --git "a/1516.jsonl" "b/1516.jsonl" new file mode 100644--- /dev/null +++ "b/1516.jsonl" @@ -0,0 +1,642 @@ +{"seq_id":"186845629","text":"import numpy\nz=11\ndef p(k):\n\tpol=0;\n\tx=\"x\";\n\tfor i in range(k-1,1,-1):\n\t\tpol=pol+i*x**i\n\treturn x\ndef encoding(m):\n\tk=len(m)\n\tp(k)\nencoding(\"edo\")\n\t","sub_path":"uploads/actn.py","file_name":"actn.py","file_ext":"py","file_size_in_byte":148,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"256930196","text":"import sys\nimport string\n\nif __name__ == \"__main__\":\n\n \n temp = []\n for line in sys.stdin:\n line = line.strip()\n \n line2 = line.split()\n temp.append(line2)\n temp = sorted(temp, key = lambda x: (x[0], int(x[2])))\n \n for item in temp:\n print(item[0]+\"\\t\"+item[1]+\"\\t\"+item[2])\n\n","sub_path":"01-hadoop-50/q07-10/reducer.py","file_name":"reducer.py","file_ext":"py","file_size_in_byte":329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"139334288","text":"\ndef binary_search(s_set, ele):\n\tlow = 0\n\thigh = len(s_set) - 1\n\t# low and high set the boundaries for what section of the list you'll search.\n\twhile low <= high:\n\t\t# While the lowest index of the list is less than or equal to the highest index.\n\t\t# If a choice was found, it would be a single element of a list, meaning a list whose length is 1,\n\t\t# and if the length of the list is one, then the highest and lowest possible index are the same number,\n\t\t# meaning low and high have to be equal when a choice is found.\n\t\tmid = (low + high) / 2\n\t\t# While no choice has been found, the above line of code checks the middle element of the list.\n\t\t# The question is, how does adding the low(est index of the list) to the high(est index of the list) result,\n\t\t# in a value we can assume is equivalent to the middle index of the list?\n\n\t\tguess = s_set[mid]\n\t\tif guess == ele:\n\t\t\treturn mid\n\t\tif guess > ele:\n\t\t\thigh = mid - 1\n\t\telse:\n\t\t\tlow = mid + 1\n\treturn None\n\na_list = [1, 2 , 3, 5, 7, 9]\n\n\nprint(binary_search(a_list, 9))","sub_path":"sorting.py","file_name":"sorting.py","file_ext":"py","file_size_in_byte":1022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"480674723","text":"from rest_framework.serializers import ModelSerializer\nfrom pss_project.api.models.database.MicrobenchmarkResult import MicrobenchmarkResult\n\n\nclass MicrobenchmarkResultSerializer(ModelSerializer):\n class Meta:\n model = MicrobenchmarkResult\n fields = ('time', 'jenkins_job_id', 'git_branch', 'git_commit_id',\n 'db_version', 'environment', 'benchmark_suite', 'benchmark_name',\n 'threads', 'min_runtime', 'wal_device', 'metrics')\n","sub_path":"performance-storage-service/pss_project/api/serializers/database/MicrobenchmarkResultSerializer.py","file_name":"MicrobenchmarkResultSerializer.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"339994156","text":"\n\n#calss header\nclass _SUBURBAN():\n\tdef __init__(self,): \n\t\tself.name = \"SUBURBAN\"\n\t\tself.definitions = [u'relating to a suburb: ', u'used to suggest that something is boring and has no excitement: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'adjectives'\n\n\n\tdef run(self, obj1, obj2):\n\t\tself.jsondata[obj2] = {}\n\t\tself.jsondata[obj2]['properties'] = self.name.lower()\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/adjectives/_suburban.py","file_name":"_suburban.py","file_ext":"py","file_size_in_byte":453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"622539661","text":"#!/usr/bin/env python2\n\nfrom sys import argv\nfrom random import random as rand\n\ndef intersect(pos1, pos2, width, height):\n dx=abs(pos1[0]-pos2[0])\n if dx > width/2:\n dx = width - dx\n\n dy=abs(pos1[1]-pos2[1])\n if dy > height/2:\n dy = height - dy\n\n return dx < 1 and dy < 1\n\nprint(\"#aktiv flaeche dichte\")\nmaxtries=int(argv[1])\nfor simrun in range(0, 100):\n width=10.0+rand()*200\n height=10.0+rand()*200\n\n num=0\n triesleft=maxtries\n \n squares=[]\n\n while triesleft > 0:\n pos = [width*rand(), height*rand()]\n for pos2 in squares:\n if intersect(pos, pos2, width, height):\n triesleft -= 1\n num -= 1\n break;\n num += 1\n squares.append(pos)\n \n print(\"%d\\t%f\\t%f\"%(num, width*height, num/(width*height)))\n","sub_path":"data_en/rsa/rsa.py","file_name":"rsa.py","file_ext":"py","file_size_in_byte":851,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"175095253","text":"#!/usr/bin/python\n\"\"\" turtle-example-suite:\n\n tdemo_radioactive.py\n\nA simple drawing, suitable as a beginner's\nprogramming example.\nTherefore the animation is set to slow\nby the command speed(1). So you can easily\nfollow each and every action of the turtle.\n\nBe patient!\n\"\"\"\n\nfrom turtle import *\n\ndef square(length):\n for i in range(4):\n forward(length)\n left(90)\n\ndef sector(radius, angle):\n forward(radius)\n left(90)\n circle(radius, angle)\n left(90)\n forward(radius)\n left(180-angle)\n\ndef move(x, y):\n up()\n forward(x)\n left(90)\n forward(y)\n right(90)\n down()\n\ndef radioactive(radius1, radius2, side,\n angle=60, outlinecol=\"black\", fillcol=\"yellow\"):\n color(outlinecol)\n move(-(side/2) , -(side/2))\n \n begin_fill()\n square(side)\n color(fillcol)\n end_fill()\n move((side/2), (side/2))\n color(outlinecol)\n right(90 + angle/2)\n\n for i in range(3):\n begin_fill()\n sector(radius1,angle)\n left(120)\n #left((360 - 3 * angle)/3 + 60)\n color(outlinecol)\n end_fill()\n\n up()\n forward(radius2)\n left(90)\n down()\n\n color(fillcol)\n begin_fill()\n circle(radius2)\n color(outlinecol)\n end_fill()\n\n up()\n left(90)\n forward(radius2)\n width(1)\n\ndef main():\n reset()\n width(5)\n speed(1)\n radioactive(160, 36, 400)\n return \"Done!\"\n\nif __name__ == '__main__':\n msg = main()\n print(msg)\n mainloop()\n\n\n","sub_path":"6. Python/海龟模块-Python/TurtleDemo-Python3.x/tdemo_elementary/tdemo_radioactive.py","file_name":"tdemo_radioactive.py","file_ext":"py","file_size_in_byte":1494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"483044738","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /home/vincent/Projects/PythonPowerMole/powermolelib/powermolelib/transferagent.py\n# Compiled at: 2020-05-11 17:44:59\n# Size of source mod 2**32: 6979 bytes\n\"\"\"\nMain code for transferagent.\n\n.. _Google Python Style Guide:\n http://google.github.io/styleguide/pyguide.html\n\nNOTE: The TransferAgent class is responsible to purge the stream (ie. index in stream is at COMMAND_PROMPT)\n\n\"\"\"\nimport inspect, logging, os.path, pexpect\n__author__ = 'Vincent Schouten '\n__docformat__ = 'google'\n__date__ = '10-05-2019'\n__copyright__ = 'Copyright 2020, Vincent Schouten'\n__credits__ = ['Vincent Schouten']\n__license__ = 'MIT'\n__maintainer__ = 'Vincent Schouten'\n__email__ = ''\n__status__ = 'Development'\nCOMMAND_PROMPT = '[#$] '\n\nclass LoggerMixin:\n __doc__ = 'Contains a logger method for use by other classes.'\n\n def __init__(self):\n logger_basename = 'agent'\n self._logger = logging.getLogger(f\"{logger_basename}.{self.__class__.__name__}\")\n\n\nclass TransferAgent(LoggerMixin):\n __doc__ = 'Establishes a connection to the target destination host via one or more intermediaries.'\n\n def __init__(self, path_ssh_cfg_minitor, all_hosts):\n super().__init__()\n self.all_hosts = all_hosts\n self.child = None\n self.path_ssh_cfg_minitor = path_ssh_cfg_minitor\n\n def __str__(self):\n return 'TransferAgent'\n\n def create_ssh_config(self):\n \"\"\"______________.\"\"\"\n pass\n\n @property\n def _path_to_agent_module(self):\n running_script = inspect.getframeinfo(inspect.currentframe()).filename\n running_script_dir = os.path.dirname(os.path.abspath(running_script))\n path_file = os.path.join(running_script_dir, 'payload', 'agent.py')\n self._logger.debug('minitoragent.py resides in: %s', running_script_dir)\n return path_file\n\n def _generate_ssh_runtime_param(self):\n last_host = self.all_hosts[(-1)]\n if len(self.all_hosts) == 1:\n order_of_hosts = f\"{self.all_hosts[0]}\"\n else:\n order_of_hosts = ''\n for i, host in enumerate(self.all_hosts):\n if i == 0:\n order_of_hosts += (f\"{host}\")\n else:\n order_of_hosts += f\",{host}\"\n\n runtime_param = f\"scp -v -F {self.path_ssh_cfg_minitor} -o 'ProxyJump {order_of_hosts}' {self._path_to_agent_module} \"\n runtime_param += f\"{last_host}:/tmp\"\n self._logger.debug(runtime_param)\n return runtime_param\n\n def start(self):\n \"\"\"_______________________.\"\"\"\n result = True\n try:\n try:\n self.child = pexpect.spawn((self._generate_ssh_runtime_param()), env={'TERM': 'dumb'})\n self._logger.debug('going through the stream to match patterns')\n for hostname in self.all_hosts:\n index = self.child.expect([\n f\"Authenticated to {hostname}\", 'Last failed login:', 'Last login:', 'socket error',\n 'not accessible', 'fingerprint', 'open failed: connect failed:', 'No such file', pexpect.TIMEOUT])\n if index == 0:\n self._logger.info('authenticated to %s', hostname)\n elif index == 1:\n self._logger.debug('there were failed login attempts')\n elif index == 2:\n self._logger.debug('there where no failed login attempts')\n elif index == 3:\n self._logger.error('socket error. probable cause: SSH service on proxy or target machine disabled')\n self.child.terminate()\n result = False\n elif index == 4:\n self._logger.error('the identity file is not accessible')\n self.child.terminate()\n result = False\n elif index == 5:\n self._logger.warning('warning: hostname automatically added to list of known hosts')\n self.child.sendline('yes')\n elif index == 6:\n self._logger.error('ssh could not connect to %s', hostname)\n self.child.terminate()\n result = False\n else:\n if index == 7:\n continue\n if index == 8:\n self._logger.error('TIMEOUT exception was thrown. ssh could probably not connect to %s', hostname)\n self.child.terminate()\n result = False\n else:\n self._logger.error('unknown state reached')\n result = False\n\n except pexpect.exceptions.ExceptionPexpect:\n self._logger.error('EOF is read; ssh has exited abnormally')\n self.child.terminate()\n result = False\n\n finally:\n return\n\n return result","sub_path":"pycfiles/powermolelib-0.1.1-py3.7/transferagent.cpython-37.py","file_name":"transferagent.cpython-37.py","file_ext":"py","file_size_in_byte":5289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"298285880","text":"import requests\nfrom requests import get\nfrom bs4 import BeautifulSoup\nnames = ['Name']\nurls = ['URL']\nprices = ['Price']\navg_ratings = ['Average rating']\nauthors = ['Author']\nnum_ratings = ['Number of ratings']\nurl = 'https://www.amazon.com/best-sellers-books-Amazon/zgbs/books/ref=zg_bs_pg_1?_encoding=UTF8&pg='\nfor l in range(0, 5):\n response = get(url + str(l+1))\n html_soup = BeautifulSoup(response.text, 'html.parser')\n book_containers = html_soup.find_all('div', class_='zg_itemWrapper')\n for book in book_containers:\n\n name = ((book.find(\n 'div', class_='p13n-sc-truncate p13n-sc-line-clamp-1').text).encode('utf-8')).strip()\n names.append(name)\n\n author = book.find('a', class_='a-size-small a-link-child')\n if author is not None:\n author = ((book.find(\n 'a', class_='a-size-small a-link-child').get_text()).encode('utf-8')).strip()\n else:\n author = \"Not available\"\n authors.append(author)\n\n price = book.find('span', class_='p13n-sc-price')\n if price is not None:\n price = (\n (book.find('span', class_='p13n-sc-price').get_text()).encode('utf-8')).strip()\n prices.append(price)\n else:\n price = \"Not available\"\n prices.append(price)\n\n num_rating = book.find('a', class_='a-size-small a-link-normal')\n if num_rating is not None:\n num_rating = (\n (book.find('a', class_='a-size-small a-link-normal').text).encode('utf-8')).strip()\n else:\n num_rating = \"Not available\"\n num_ratings.append(num_rating)\n\n avg_rating = book.find('span', class_='a-icon-alt')\n if avg_rating is not None and \"Prime\":\n avg_rating = (\n (book.find('span', class_='a-icon-alt').text).encode('utf-8')).strip()\n else:\n avg_rating = \"Not available\"\n avg_ratings.append(avg_rating)\n\n lol = book.find_next('a', class_='a-link-normal')\n if lol is not None:\n urls.append(((lol.get('href')).encode('utf-8')).strip())\n else:\n urls.append(\"Not available\")\n\nfile = open(\"com_book.csv\", \"w\")\nlength = len(names)\nfor i in range(0, length):\n file.write(names[i]+\";\"+urls[i]+\";\"+authors[i]+\";\" +\n prices[i]+\";\"+num_ratings[i]+\";\"+avg_ratings[i])\n file.write(\"\\n\")\nfile.close()\n","sub_path":"com_bestseller.py","file_name":"com_bestseller.py","file_ext":"py","file_size_in_byte":2417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"654520420","text":"\nimport torch\nimport pandas as pd\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.optim import lr_scheduler\nfrom torch.utils.data import DataLoader\nfrom torch.utils.data.sampler import SubsetRandomSampler\nimport numpy as np\nimport torchvision\nfrom torchvision import datasets, models, transforms\nfrom time import time\nimport os\nimport argparse\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass Net(nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n self.conv1 = nn.Conv2d(3, 10, 5)\n self.pool = nn.MaxPool2d(2, 2)\n self.conv2 = nn.Conv2d(10, 20, 5)\n self.fc1 = nn.Linear(20*53*53, 64)\n self.fc2 = nn.Linear(64, 32)\n self.fc3 = nn.Linear(32, 2)\n\n def forward(self, x):\n x = self.conv1(x)\n x = F.relu(x)\n x = self.pool(x)\n x = self.conv2(x)\n x = F.relu(x)\n x = self.pool(x)\n x = x.view(-1, 20*53*53)\n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n x = self.fc3(x)\n x = x.squeeze(1)\n # x = nn.functional.log_softmax(x, dim=1)\n return x\n\n\ndef get_name(args):\n name = 'batch size {} learning rate {} epochs {} model {} transfer {} valid ratio {} shuffle {} seed {}'.format(\n args.batch_size, args.learning_rate, args.epochs, args.model, args.transfer,\n args.valid_ratio, args.shuffle, args.seed\n )\n\n return name\n\n\ndef get_loader(args):\n valid_ratio = args.valid_ratio\n batch_size = args.batch_size\n shuffle = args.shuffle\n seed = args.seed\n\n train_transforms = transforms.Compose([\n transforms.RandomResizedCrop(224),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n ])\n\n valid_transforms = transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n ])\n\n data_dir = os.getcwd() + '/data_binary'\n train_datasets = datasets.ImageFolder(data_dir, transform=train_transforms)\n valid_datasets = datasets.ImageFolder(data_dir, transform=valid_transforms)\n\n length = len(train_datasets)\n indices = list(range(length))\n split = int(np.floor(valid_ratio * length))\n\n if shuffle:\n np.random.seed(seed)\n np.random.shuffle(indices)\n\n train_ind, valid_ind = indices[split:], indices[:split]\n train_sam = SubsetRandomSampler(train_ind)\n valid_sam = SubsetRandomSampler(valid_ind)\n\n train_loader = DataLoader(train_datasets, batch_size=batch_size, sampler=train_sam)\n valid_loader = DataLoader(valid_datasets, batch_size=batch_size, sampler=valid_sam)\n\n return train_loader, valid_loader\n\n\ndef write_record(record, args):\n record = np.transpose(record)\n\n df = pd.DataFrame({\n 'step': record[0], 'time': record[1], 'epoch': record[2], 'train_loss': record[3],\n 'train_error': record[4], 'valid_error': record[5]\n })\n df.to_csv(os.getcwd() + '/result/{}.csv'.format(get_name(args)), index=False)\n\n return\n\n\ndef evaluate(model, valid_loader):\n correct = 0\n\n for i, data in enumerate(valid_loader, 1):\n inputs, label = data[0], data[1]\n outputs = model(inputs)\n _, predict = torch.max(outputs, 1)\n correct += torch.sum(predict == label)\n\n return correct.double() / len(valid_loader.dataset)\n\n\ndef load_model(args):\n\n learning_rate = args.learning_rate\n model = Net()\n\n # optimizer = optim.Adam(model.fc.parameters() if transfer else model.parameters(), lr=learning_rate, eps=1e-4)\n optimizer = optim.Adam(model.parameters(), lr=learning_rate, eps=1e-4)\n scheduler = lr_scheduler.StepLR(optimizer, step_size=7, gamma=0.1)\n\n loss_fnc = nn.CrossEntropyLoss()\n\n return model, optimizer, scheduler, loss_fnc\n\n\ndef train(args, train_loader, valid_loader):\n epochs = args.epochs\n curr_path = os.getcwd()\n\n if not os.path.exists(curr_path + '/model'):\n os.makedirs(curr_path + '/model')\n\n if not os.path.exists(curr_path + '/result'):\n os.makedirs(curr_path + '/result')\n\n model, optimizer, scheduler, loss_fnc = load_model(args)\n\n steps = 0\n record = []\n max_acc = 0\n start = time()\n\n for epoch in range(epochs):\n # scheduler.step()\n\n running_loss = 0.0\n running_correct = 0\n\n for i, data in enumerate(train_loader, 1):\n\n steps += 1\n print(steps)\n inputs, label = data[0], data[1]\n\n optimizer.zero_grad()\n\n outputs = model(inputs)\n _, predict = torch.max(outputs, 1)\n loss = loss_fnc(outputs, label)\n\n loss.backward()\n optimizer.step()\n\n running_loss += loss.item() * inputs.size(0)\n running_correct += torch.sum(predict == label.data)\n\n eval_acc = evaluate(model, valid_loader)\n\n # if eval_acc >= max_acc:\n # max_acc = eval_acc\n # torch.save(model, curr_path + '/model/{}.pt'.format(get_name(args)))\n\n rec = [\n steps, time() - start, epoch + 1, running_loss/(i+1), running_correct.double()/ len(train_loader.dataset),\n eval_acc\n ]\n # record.append(rec)\n\n print('ep:', rec[2], ', los:', rec[3], ', acc:', rec[4], ', val:', rec[5])\n\n # write_record(record, args)\n\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser()\n\n parser.add_argument('--valid_ratio', type=float, default=0.2, help='validation set proportion')\n parser.add_argument('--learning_rate', type=float, default=0.001, help='learning rate')\n parser.add_argument('--transfer', type=int, default=1, help='transfer learning')\n parser.add_argument('--model', type=str, default='resnet', help='type of model')\n parser.add_argument('--batch_size', type=int, default=64, help='batch_size')\n parser.add_argument('--shuffle', type=int, default=1, help='shuffle or not')\n parser.add_argument('--epochs', type=int, default=25, help='num of epochs')\n parser.add_argument('--seed', type=int, default=324, help='random seed')\n\n args = parser.parse_args()\n\n train_loader, valid_loader = get_loader(args)\n\n train(args, train_loader, valid_loader)\n","sub_path":"CNNabis-master/binary_classifier.py","file_name":"binary_classifier.py","file_ext":"py","file_size_in_byte":6160,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"335092288","text":"import numpy as np\nfrom numpy.linalg import inv\nfrom numpy import dot\nfrom numpy import mat\n\n# y=2x\nX = mat([1, 2, 3]).reshape(3, 1) # 一行3列变成 一列3行\nY = 2*X\nprint(X)\nprint(Y)\n\n#通过theta的公式由X Y倒求出theta: theta = (X的转置*X)取逆 * X的转置 * Y\ntheta = dot(dot(inv(dot(X.T, X)), X.T), Y)\nprint(theta)\n\n# 梯度下降\n# theta = theta - alpha*(theta*X-Y)*X\ntheta = 1\nalpha = 0.1\n# range(100) 即 等于 [0,1,2,3,4,5,......100]\n\nfor i in range(100):\n theta = theta - np.sum(alpha*(dot(X, theta) - Y)*X.reshape(1, 3))/3. # 梯度下降的公式\n\nprint(theta)\n","sub_path":"linear_demo02.py","file_name":"linear_demo02.py","file_ext":"py","file_size_in_byte":601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"151469842","text":"# http://www.numpy.org/\nimport numpy as np\n\n__all__ = ['interatomicpotential']\n\ndef interatomicpotential(database, keys, content_dict=None, record='potential_LAMMPS',\n status='active', query=None, **kwargs):\n \"\"\"\n Builds parameter sets related to interatomic potentials.\n \"\"\"\n if content_dict is None:\n content_dict = {}\n\n # Set all status value\n if status is 'all':\n status = ['active', 'retracted', 'superseded']\n\n # Fetch potential records and df\n potentials, potential_df = database.get_records(style=record, return_df=True,\n query=query, status=status, **kwargs)\n print(len(potential_df), 'matching interatomic potentials found')\n if len(potential_df) == 0:\n raise ValueError('No matching interatomic potentials found')\n\n # Initialize inputs keys\n inputs = {}\n for key in keys:\n inputs[key] = []\n \n # Loop over all potentials \n for i in potential_df.index:\n potential = potentials[i]\n content_dict[potential.name] = potential.content\n\n # Loop over all input keys\n for key in keys:\n if key == 'potential_file':\n inputs['potential_file'].append(f'{potential.name}.json')\n elif key == 'potential_content':\n inputs['potential_content'].append(f'record {potential.name}')\n elif key == 'potential_dir':\n inputs['potential_dir'].append(potential.name)\n elif key == 'potential_dir_content':\n inputs['potential_dir_content'].append(f'tar {potential.name}')\n else:\n inputs[key].append('')\n \n return inputs, content_dict","sub_path":"iprPy/input/buildcombos_functions/interatomicpotential.py","file_name":"interatomicpotential.py","file_ext":"py","file_size_in_byte":1729,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"109362059","text":"from deap import creator, tools, algorithms, gp, base\nfrom deap.gp import *\nimport operator\nfrom scipy.io import arff\nfrom copy import deepcopy\nfrom io import StringIO\nimport math\nimport os\nimport numpy\nimport warnings\n\nwarnings.filterwarnings(\"ignore\")\n\n# open the arff file\ndirectory = os.getcwd().split(\"src\")[0]\nfile_name = \"\\\\china.arff\"\nraw_data = open(directory + file_name).read()\n\n# read arff\nf = StringIO(raw_data)\ndata, meta = arff.loadarff(f)\n\n# Get attribute names and indices of values\nattributes = {}\nfor i in range(len(meta.names())):\n attributes[meta.names()[i]] = i\n\n# delete what we won't use\nattributes.pop(\"ID\")\nattributes.pop(\"Added\")\nattributes.pop(\"Changed\")\nattributes.pop(\"Deleted\")\nattributes.pop(\"Dev.Type\")\nattributes.pop(\"N_effort\")\n\n\nfinal_data = []\n\n# convert data to a dictionary\nfor i in range(len(data)):\n current = data[i]\n test_dict = {}\n for key in attributes.keys():\n value_expression = current[attributes[key]]\n test_dict[key] = int(value_expression)\n\n final_data.append(test_dict)\n\n# split data into training and testData\ntrainData = final_data[: int(len(data) * 0.8)]\ntestData = final_data[int(len(data) * 0.8) :]\n\n# recommended in deap docs\ndef protectedDiv(x, y):\n try:\n return x / y\n except ZeroDivisionError:\n return 1\n\n\ndef protectedSqrt(x):\n if x >= 0:\n return x ** (0.5)\n else:\n # how else to handle?\n return abs(x) ** (0.5)\n\n\ndef protectedLog10(x):\n if x <= 0.0:\n return 0\n else:\n return math.log10(x)\n\n\ndef protectedLog2(x):\n if x <= 0.0:\n return 0\n else:\n return math.log2(x)\n\n\ndef distance(x, y):\n if x >= y:\n result = x - y\n else:\n result = y - x\n return result\n\n\n# create primitve set and add operators\nprimitive_set = PrimitiveSet(\"main\", 12)\nprimitive_set.addPrimitive(operator.add, 2)\nprimitive_set.addPrimitive(operator.mul, 2)\nprimitive_set.addPrimitive(protectedSqrt, 1)\nprimitive_set.addPrimitive(protectedLog2, 1)\nprimitive_set.addPrimitive(protectedLog10, 1)\nprimitive_set.addEphemeralConstant(\n \"ran%d\" % random.randrange(10, 1000), lambda: random.randrange(-1, 1)\n)\nprimitive_set.addPrimitive(protectedDiv, 2)\nprimitive_set.addPrimitive(operator.sub, 2)\nprimitive_set.addPrimitive(math.sin, 1)\nprimitive_set.addPrimitive(math.cos, 1)\n\n# rename the arguments\ncount = 0\nfor key in attributes.keys():\n argName = \"ARG%d\" % count\n primitive_set.renameArguments(**{argName: key})\n count += 1\n\n# Minimise both the MAE and RMSE\ncreator.create(\"FitnessMin\", base.Fitness, weights=(-1.0, -1.0))\ncreator.create(\"Individual\", gp.PrimitiveTree, fitness=creator.FitnessMin)\n\n\ndef main(popSize, mutation, cx, nGens, tournSize):\n # popSize, mutation, cx, nGens, tournSize = 1000, 0.1, 0.75, 50, 7\n minTreeSize, maxTreeSize = 4, 14\n gp_toolbox = base.Toolbox()\n gp_toolbox.register(\n \"expr\",\n gp.genHalfAndHalf,\n pset=primitive_set,\n min_=minTreeSize,\n max_=maxTreeSize,\n )\n gp_toolbox.register(\n \"individual\", tools.initIterate, creator.Individual, gp_toolbox.expr\n )\n gp_toolbox.register(\"population\", tools.initRepeat, list, gp_toolbox.individual)\n gp_toolbox.register(\"compile\", gp.compile, pset=primitive_set)\n\n # https://deap.readthedocs.io/en/master/examples/gp_symbreg.html\n def evaluate(individual, trainData):\n func = gp_toolbox.compile(individual)\n # print(individual)\n difference = 0\n differenceSquared = 0\n for i in range(len(trainData)):\n try:\n currentValue = func(\n trainData[i][\"AFP\"],\n trainData[i][\"Input\"],\n trainData[i][\"Output\"],\n trainData[i][\"Enquiry\"],\n trainData[i][\"File\"],\n trainData[i][\"Interface\"],\n trainData[i][\"PDR_AFP\"],\n trainData[i][\"PDR_UFP\"],\n trainData[i][\"NPDR_AFP\"],\n trainData[i][\"NPDU_UFP\"],\n trainData[i][\"Resource\"],\n trainData[i][\"Duration\"],\n )\n except:\n print(\"integer too large!\")\n currentValue = 2, 147, 483, 647\n\n absoluteError = distance(trainData[i][\"Effort\"], currentValue)\n difference += absoluteError\n differenceSquared += pow(absoluteError, 2)\n\n mae = difference / len(trainData)\n rmse = protectedSqrt(differenceSquared / len(trainData))\n return mae, rmse\n\n hof = tools.HallOfFame(popSize)\n\n gp_toolbox.register(\"evaluate\", evaluate, trainData=trainData)\n gp_toolbox.register(\"mate\", gp.cxOnePoint)\n gp_toolbox.register(\"select\", tools.selTournament, tournsize=tournSize)\n gp_toolbox.register(\n \"expr_mut\", gp.genHalfAndHalf, min_=minTreeSize, max_=maxTreeSize\n )\n gp_toolbox.register(\n \"mutate\", gp.mutUniform, expr=gp_toolbox.expr_mut, pset=primitive_set\n )\n\n # https://deap.readthedocs.io/en/master/examples/gp_symbreg.html\n # limit overall tree height\n gp_toolbox.decorate(\n \"mate\", gp.staticLimit(key=operator.attrgetter(\"height\"), max_value=17)\n ) # static limit of 17 recomended in deap docs\n gp_toolbox.decorate(\n \"mutate\", gp.staticLimit(key=operator.attrgetter(\"height\"), max_value=17)\n )\n\n # register stats for fitness and size of each individual (tree)\n mstats = tools.Statistics(lambda individual: individual.fitness.values)\n # stats_size = tools.Statistics(len)\n\n # mstats = tools.MultiStatistics(fitness=stats_fit, size=stats_size)\n mstats.register(\"avg\", numpy.mean, axis=0)\n mstats.register(\"std\", numpy.std, axis=0)\n mstats.register(\"min\", numpy.min, axis=0)\n mstats.register(\"max\", numpy.max, axis=0)\n mstats.register(\"all_gens\", deepcopy)\n log = tools.Logbook()\n\n pop = gp_toolbox.population(n=nGens)\n print(\"Starting GA\")\n hof.clear()\n pop, log = algorithms.eaMuPlusLambda(\n pop, gp_toolbox, popSize, popSize, cx, mutation, nGens, mstats, hof, True\n )\n print(\n \"GA Complete after %d gens, tournament selection between %d, mutation rate of %f, crossover rate of %f\"\n % (nGens, tournSize, mutation, cx)\n )\n\n # Results Time\n # Calculate Correlation Coefficient\n coefficients = []\n maes = []\n guesses = []\n answers = []\n final_function = None\n\n final_train_mae = 0\n final_train_rmse = 0\n\n for i in range(1):\n hof_func = gp_toolbox.compile(hof[i])\n current_cc = 0\n current_mae = 0\n guesses.clear()\n answers.clear()\n\n for j in range(len(trainData)):\n guess = hof_func(\n trainData[j][\"AFP\"],\n trainData[j][\"Input\"],\n trainData[j][\"Output\"],\n trainData[j][\"Enquiry\"],\n trainData[j][\"File\"],\n trainData[j][\"Interface\"],\n trainData[j][\"PDR_AFP\"],\n trainData[j][\"PDR_UFP\"],\n trainData[j][\"NPDR_AFP\"],\n trainData[j][\"NPDU_UFP\"],\n trainData[j][\"Resource\"],\n trainData[j][\"Duration\"],\n )\n guesses.append(guess)\n answers.append(trainData[j][\"Effort\"])\n\n diff = 0\n diffSquared = 0\n for j in range(len(guesses)):\n difference = distance(guesses[j], answers[j])\n diff += difference\n diffSquared += pow(difference, 2)\n\n MAE = diff / len(trainData)\n RMSE = protectedSqrt(diffSquared / len(trainData))\n final_train_mae = MAE\n final_train_rmse = RMSE\n current_cc = numpy.corrcoef(guesses, answers)[0, 1]\n coefficients.append(current_cc)\n print(\"\\nCoefficient for Best Individual on training set = %f\" % (current_cc))\n print(\"MAE for Best Individual on training set = %f\\n\" % (MAE))\n print(\"RMSE for Best Individual on training set = %f\\n\" % (RMSE))\n final_function = hof_func\n\n final_answers = []\n final_guesses = []\n\n for i in range(len(testData)):\n currentDataPoint = testData[i]\n answer = currentDataPoint[\"Effort\"]\n guess = final_function(\n currentDataPoint[\"AFP\"],\n currentDataPoint[\"Input\"],\n currentDataPoint[\"Output\"],\n currentDataPoint[\"Enquiry\"],\n currentDataPoint[\"File\"],\n currentDataPoint[\"Interface\"],\n currentDataPoint[\"PDR_AFP\"],\n currentDataPoint[\"PDR_UFP\"],\n currentDataPoint[\"NPDR_AFP\"],\n currentDataPoint[\"NPDU_UFP\"],\n currentDataPoint[\"Resource\"],\n currentDataPoint[\"Duration\"],\n )\n final_answers.append(answer)\n final_guesses.append(guess)\n\n diff = 0\n diffSquared = 0\n\n for i in range(len(final_guesses)):\n absoluteError = distance(final_guesses[i], final_answers[i])\n diff += absoluteError\n diffSquared += pow(absoluteError, 2)\n\n final_mae = diff / len(final_guesses)\n final_mae_diff = distance(final_mae, final_train_mae)\n final_rmse = protectedSqrt(diffSquared / len(testData))\n final_rmse_diff = distance(final_rmse, final_train_rmse)\n final_cc = numpy.corrcoef(final_guesses, final_answers)[0, 1]\n\n print(\"\\nCoefficient for Best Individual on test set = %f\" % (final_cc))\n print(\"MAE for Best Individual on test set = %f\\n\" % (final_mae))\n print(\"RMSE for Best Individual on test set = %f\\n\" % (final_rmse))\n\n return final_mae, final_mae_diff, final_rmse, final_rmse_diff, final_cc, hof[0]\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"src/GP_CHINA_MO.py","file_name":"GP_CHINA_MO.py","file_ext":"py","file_size_in_byte":9688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"223952331","text":"#\r\n# @lc app=leetcode id=559 lang=python3\r\n#\r\n# [559] Maximum Depth of N-ary Tree\r\n#\r\n# https://leetcode.com/problems/maximum-depth-of-n-ary-tree/description/\r\n#\r\n# algorithms\r\n# Easy (65.49%)\r\n# Likes: 352\r\n# Dislikes: 20\r\n# Total Accepted: 47.5K\r\n# Total Submissions: 72.5K\r\n# Testcase Example: '{\"$id\":\"1\",\"children\":[{\"$id\":\"2\",\"children\":[{\"$id\":\"5\",\"children\":[],\"val\":5},{\"$id\":\"6\",\"children\":[],\"val\":6}],\"val\":3},{\"$id\":\"3\",\"children\":[],\"val\":2},{\"$id\":\"4\",\"children\":[],\"val\":4}],\"val\":1}'\r\n#\r\n# Given a n-ary tree, find its maximum depth.\r\n# \r\n# The maximum depth is the number of nodes along the longest path from the root\r\n# node down to the farthest leaf node.\r\n# \r\n# For example, given a 3-ary tree:\r\n# \r\n# \r\n# \r\n# \r\n# \r\n# \r\n# We should return its max depth, which is 3.\r\n# \r\n# \r\n# \r\n# Note:\r\n# \r\n# \r\n# The depth of the tree is at most 1000.\r\n# The total number of nodes is at most 5000.\r\n# \r\n# \r\n#\r\n\"\"\"\r\n# Definition for a Node.\r\nclass Node:\r\n def __init__(self, val, children):\r\n self.val = val\r\n self.children = children\r\n\"\"\"\r\nclass Solution:\r\n def maxDepth(self, root: 'Node') -> int:\r\n\r\n # method 2\r\n maxd=0\r\n if root==None:\r\n return 0\r\n\r\n for v in root.children:\r\n tmp=self.maxDepth(v)\r\n maxd=max(tmp, maxd)\r\n return maxd+1\r\n \r\n'''\r\nmethod 1\r\n q=collections.deque([root])\r\n ans=0\r\n if root==None:\r\n return 0\r\n while q:\r\n ans+=1\r\n #print(q[0].val)\r\n tmp=[]\r\n n=len(q)\r\n for i in range(n):\r\n for v in q[0].children:\r\n #print(\"child \", v.val)\r\n tmp.append(v)\r\n #print(len(q))\r\n q.popleft()\r\n q.extend(tmp)\r\n return ans\r\n'''\r\n\r\n","sub_path":"559.maximum-depth-of-n-ary-tree.py","file_name":"559.maximum-depth-of-n-ary-tree.py","file_ext":"py","file_size_in_byte":1839,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"40869366","text":"import numpy as np\n\nfrom itertools import combinations\nimport dask.array as dsa\n\nfrom ..core import histogram\nfrom .fixtures import empty_dask_array\n\nimport pytest\n\n\n@pytest.mark.parametrize(\"density\", [False, True])\n@pytest.mark.parametrize(\"block_size\", [None, 1, 2])\n@pytest.mark.parametrize(\"axis\", [1, None])\ndef test_histogram_results_1d(block_size, density, axis):\n nrows, ncols = 5, 20\n # Setting the random seed here prevents np.testing.assert_allclose\n # from failing beow. We should investigate this further.\n np.random.seed(2)\n data = np.random.randn(nrows, ncols)\n bins = np.linspace(-4, 4, 10)\n\n h = histogram(data, bins=bins, axis=axis, block_size=block_size, density=density)\n\n expected_shape = (nrows, len(bins) - 1) if axis == 1 else (len(bins) - 1,)\n assert h.shape == expected_shape\n\n # make sure we get the same thing as numpy.histogram\n if axis:\n expected = np.stack(\n [np.histogram(data[i], bins=bins, density=density)[0] for i in range(nrows)]\n )\n else:\n expected = np.histogram(data, bins=bins, density=density)[0]\n norm = nrows if (density and axis) else 1\n np.testing.assert_allclose(h, expected / norm)\n\n if density:\n widths = np.diff(bins)\n integral = np.sum(h * widths)\n np.testing.assert_allclose(integral, 1.0)\n\n\n@pytest.mark.parametrize(\"block_size\", [None, 1, 2])\ndef test_histogram_results_1d_weighted(block_size):\n nrows, ncols = 5, 20\n data = np.random.randn(nrows, ncols)\n bins = np.linspace(-4, 4, 10)\n h = histogram(data, bins=bins, axis=1, block_size=block_size)\n weights = 2 * np.ones_like(data)\n h_w = histogram(data, bins=bins, axis=1, weights=weights, block_size=block_size)\n np.testing.assert_array_equal(2 * h, h_w)\n\n\n# @pytest.mark.skip(reason=\"Weight broadcasting on numpy arrays is not yet implemented\")\n@pytest.mark.parametrize(\"block_size\", [None, 1, 2, \"auto\"])\ndef test_histogram_results_1d_weighted_broadcasting(block_size):\n nrows, ncols = 5, 20\n data = np.random.randn(nrows, ncols)\n bins = np.linspace(-4, 4, 10)\n h = histogram(data, bins=bins, axis=1, block_size=block_size)\n weights = 2 * np.ones((1, ncols))\n h_w = histogram(data, bins=bins, axis=1, weights=weights, block_size=block_size)\n np.testing.assert_array_equal(2 * h, h_w)\n\n\n@pytest.mark.parametrize(\"block_size\", [None, 1, 2])\ndef test_histogram_right_edge(block_size):\n \"\"\"Test that last bin is both left- and right-edge inclusive as it\n is for numpy.histogram\n \"\"\"\n nrows, ncols = 5, 20\n data = np.ones((nrows, ncols))\n bins = np.array([0, 0.5, 1]) # All data at rightmost edge\n\n h = histogram(data, bins=bins, axis=1, block_size=block_size)\n assert h.shape == (nrows, len(bins) - 1)\n\n # make sure we get the same thing as histogram (all data in the last bin)\n hist, _ = np.histogram(data, bins=bins)\n np.testing.assert_array_equal(hist, h.sum(axis=0))\n\n # now try with no axis\n h_na = histogram(data, bins=bins, block_size=block_size)\n np.testing.assert_array_equal(hist, h_na)\n\n\ndef test_histogram_results_2d():\n nrows, ncols = 5, 20\n data_a = np.random.randn(nrows, ncols)\n data_b = np.random.randn(nrows, ncols)\n nbins_a = 9\n bins_a = np.linspace(-4, 4, nbins_a + 1)\n nbins_b = 10\n bins_b = np.linspace(-4, 4, nbins_b + 1)\n\n h = histogram(data_a, data_b, bins=[bins_a, bins_b])\n assert h.shape == (nbins_a, nbins_b)\n\n hist, _, _ = np.histogram2d(data_a.ravel(), data_b.ravel(), bins=[bins_a, bins_b])\n np.testing.assert_array_equal(hist, h)\n\n\ndef test_histogram_results_2d_density():\n nrows, ncols = 5, 20\n data_a = np.random.randn(nrows, ncols)\n data_b = np.random.randn(nrows, ncols)\n nbins_a = 9\n bins_a = np.linspace(-4, 4, nbins_a + 1)\n nbins_b = 10\n bins_b = np.linspace(-4, 4, nbins_b + 1)\n\n h = histogram(data_a, data_b, bins=[bins_a, bins_b], density=True)\n assert h.shape == (nbins_a, nbins_b)\n\n hist, _, _ = np.histogram2d(\n data_a.ravel(), data_b.ravel(), bins=[bins_a, bins_b], density=True\n )\n np.testing.assert_allclose(hist, h)\n\n # check integral is 1\n widths_a = np.diff(bins_a)\n widths_b = np.diff(bins_b)\n areas = np.outer(widths_a, widths_b)\n integral = np.sum(hist * areas)\n np.testing.assert_allclose(integral, 1.0)\n\n\ndef test_histogram_results_3d_density():\n nrows, ncols = 5, 20\n data_a = np.random.randn(nrows, ncols)\n data_b = np.random.randn(nrows, ncols)\n data_c = np.random.randn(nrows, ncols)\n nbins_a = 9\n bins_a = np.linspace(-4, 4, nbins_a + 1)\n nbins_b = 10\n bins_b = np.linspace(-4, 4, nbins_b + 1)\n nbins_c = 9\n bins_c = np.linspace(-4, 4, nbins_c + 1)\n\n h = histogram(data_a, data_b, data_c, bins=[bins_a, bins_b, bins_c], density=True)\n\n assert h.shape == (nbins_a, nbins_b, nbins_c)\n\n hist, _ = np.histogramdd(\n (data_a.ravel(), data_b.ravel(), data_c.ravel()),\n bins=[bins_a, bins_b, bins_c],\n density=True,\n )\n\n np.testing.assert_allclose(hist, h)\n\n # check integral is 1\n widths_a = np.diff(bins_a)\n widths_b = np.diff(bins_b)\n widths_c = np.diff(bins_c)\n areas = np.einsum(\"i,j,k\", widths_a, widths_b, widths_c)\n integral = np.sum(hist * areas)\n np.testing.assert_allclose(integral, 1.0)\n\n\n@pytest.mark.parametrize(\"block_size\", [None, 5, \"auto\"])\n@pytest.mark.parametrize(\"use_dask\", [False, True])\ndef test_histogram_shape(use_dask, block_size):\n \"\"\"These tests just verify that arrays with the right shape come out.\n They don't verify correctness.\"\"\"\n\n shape = 10, 15, 12, 20\n if use_dask:\n b = empty_dask_array(shape, chunks=(1,) + shape[1:])\n else:\n b = np.random.randn(*shape)\n bins = np.linspace(-4, 4, 27)\n\n # no axis\n c = histogram(b, bins=bins, block_size=block_size)\n assert c.shape == (len(bins) - 1,)\n # same thing\n for axis in [(0, 1, 2, 3), (0, 1, 3, 2), (3, 2, 1, 0), (3, 2, 0, 1)]:\n c = histogram(b, bins=bins, axis=axis)\n assert c.shape == (len(bins) - 1,)\n if use_dask:\n assert isinstance(c, dsa.Array)\n\n # scalar axis (check positive and negative)\n for axis in list(range(4)) + list(range(-1, -5, -1)):\n c = histogram(b, bins=bins, axis=axis, block_size=block_size)\n shape = list(b.shape)\n del shape[axis]\n expected_shape = tuple(shape) + (len(bins) - 1,)\n assert c.shape == expected_shape\n if use_dask:\n assert isinstance(c, dsa.Array)\n\n # two axes\n for i, j in combinations(range(4), 2):\n axis = (i, j)\n c = histogram(b, bins=bins, axis=axis, block_size=block_size)\n shape = list(b.shape)\n partial_shape = [shape[k] for k in range(b.ndim) if k not in axis]\n expected_shape = tuple(partial_shape) + (len(bins) - 1,)\n assert c.shape == expected_shape\n if use_dask:\n assert isinstance(c, dsa.Array)\n","sub_path":"xhistogram/test/test_core.py","file_name":"test_core.py","file_ext":"py","file_size_in_byte":6957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"262702725","text":"import numpy\n\n\nimport pprint\npp = pprint.PrettyPrinter(indent=4)\n\nclass _Layer:\n def __init__(self):\n self.nodes = {} # node -> int index\n self.nodes__ = {} # int index -> node\n self.probs_ = numpy.zeros(128)\n self.N = 128\n self.top = 0\n self.total = 0\n pass\n\n def probs(self, node, req):\n \"\"\"gives calculated probabilty, 'req' request for all sbilings in layer\n As request number form same node increases, it occurrence probabilty decreases\n by exponetial rule. \n e.g. quanta = 8, 8 th note in music. 1 / 8 th of beat \n p(0) = 2 at req == 0\n p(0) = 1 at req == 8\n \n Formultation: \n y = ab^x, .... y = p(0) and x = req\n @ p(0) = 2, req = 0 \n 2 = ab^0\n a = 2 .... therefore\n p(0) = 0.5, req == 8\n 2 * b ^ 8 = 1\n b = (0.5) ^ (1 / 8)\n b = 0.9170040432046712\n\n Args:\n req (int): request number\n \"\"\"\n a , b = 1, 0.9170040432046712 # decay co-effecient\n f = a * b ** (req) # f determines, how much percentage should it reduce to that of orginal prob\n probs = self.increase_probs(node)\n p = probs[self.nodes[node]] * f # p after decrease \n dim = probs[self.nodes[node]] - p # offset that will distribute to other node, based on their probabilties\n dim_total = self.total - probs[self.nodes[node]]\n p_arr = numpy.zeros(128, dtype = int)\n p_arr[ : self.nodes[node]] = probs[ : self.nodes[node]] + probs[ : self.nodes[node]] * (dim / dim_total)\n p_arr[self.nodes[node] + 1: ] = probs[self.nodes[node] + 1 : ] + probs[self.nodes[node] + 1 :] * (dim / dim_total)\n p_arr[self.nodes[node]] = p + (dim_total - sum(p_arr)) \n # print(f\"node : {node}, req : {req}, parr : {p_arr}\")\n return p_arr\n\n def increase_probs(self, node, weight = 0.40):\n probs = numpy.array(self.probs_)\n node_probs = probs[self.nodes[node]]\n offset = probs * (weight)\n probs -= offset\n probs[self.nodes[node]] = node_probs + sum(offset) - offset[self.nodes[node]]\n # print(f\"total : {sum(self.probs_)} == {sum(probs)}\")\n return probs\n\n def add_node(self, node):\n if self.top > 128: raise IndexError(\"Capacity of __layer fulled. C : [128]\")\n if node in self.nodes:\n self.probs_[self.nodes[node]] += 1\n else :\n self.nodes[node] = self.top\n self.nodes__[self.top] = node\n self.probs_[self.nodes[node]] = 1\n self.top += 1\n self.total += 1\n \n def __str__(self): \n b_ = list(self.nodes.keys())\n v_ = list((self.probs_[self.nodes[k]] for k in b_))\n return str(list(zip(b_, v_)))\n\nclass CircularChain:\n def __init__(self, seq, quanta = 1):\n self.chain = {} # (__layer) objects dictionary\n self.N = 16\n seq = numpy.array([[n, int(c / quanta)] for n, c in seq], dtype = int)\n # pp.pprint(seq)\n self.create(seq)\n \n def create(self, seq):\n \"\"\"creates the circular graphs\n _______________________________________\n | |\n '>---> A------> B-------> C-------> D--^\n \n Here A, B, C are __layers\n\n Args:\n seq (numpy.ndarray): 2 D list, [[n0, f0], [n1, f1], ...]\n \"\"\"\n if not isinstance(seq, numpy.ndarray) : raise AttributeError(f\"Expected sequence to be of type 'numpy.ndarray' given {type(seq)}\")\n for index, (node, freq) in enumerate(seq):\n f_ = freq\n while f_ > 0:\n self.add_node(index, node, freq)\n f_ -= 1\n\n def add_node(self, index, node, freq):\n index %= self.N\n if index not in self.chain:\n self.chain[index] = _Layer()\n self.chain[index].add_node(node)\n\n def __getitem__(self, index):\n return self.chain[index]\n\n def __str__(self):\n side_pattern = \"|{}\\n|\\n-->\"\n s = \"\"\n for l, v in sorted(self.chain.items()):\n s += \"\\n\\n\" + side_pattern.format(l)\n s += \"\\t\\t\" + str(v) + \"\\n\"\n return s\n\n ","sub_path":"ravana/net/memory/chains.py","file_name":"chains.py","file_ext":"py","file_size_in_byte":4372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"121057167","text":"#!/usr/bin/python\n# -*- coding: gb2312 -*-\n#*******************************************************************************\n# Copyright (c) 2012,中兴通讯股份有限公司,All rights reserved.\n# \n# 文件名称:ssh.py\n# 测试内容:vlan模块xmlrpc接口封装\n# 当前版本:1.0 \n# 作 者:高明\n# 完成日期:2012/08/24\n#\n# 修改记录1:\n# 修改日期:2012/08/24\n# 版 本 号:V1.0\n# 修 改 人:高明\n# 修改内容:创建\n#*******************************************************************************/\nimport paramiko\nimport re\n\nclass ssh_cmd:\n ''' tecs ssh_cmd methods '''\n def __init__(self,server_addr,port): \n self.client = paramiko.SSHClient() \n self.client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) \n self.client.connect(server_addr,port,username='root', password='') \n def __del__(self):\n self.client.close()\n\n #函数说明:本函数适合如“xm list”、“xm sched-credit”这样的命令,其执行结果\n #像二元关系表一样,如:xm list执行结果:\n #Name ID Mem VCPUs State Time(s)\n #Domain-0 0 1024 2 r----- 5970.4\n #instance_1 1 1024 1 -b---- 44.3\n #入参:\n # cmd_str - shell命令,如xm list\n # key_str - 作为查找依据的key,如Name\n # value_str - 主键在某一元组中的值,如instance_1(要查找的目标就在该元组中)\n #返回值:类型是列表,列表的每一个元素都是字典结构,字典的key都是相同的,为二元\n #关系表的属性名,value为查找到符合条件的每一行;\n def xm_cmd(self,cmd_str,key_str,value_str):\n count =0\n tgt_list =[]\n stdin, stdout, stderr = self.client.exec_command(cmd_str) \n for i_line in stdout.readlines(): \n count = count+1\n if count == 1:\n key_list = i_line.split()\n else:\n val_list = i_line.split()\n tgt_dict = dict(zip(key_list,val_list)) \n if tgt_dict[key_str] == value_str:\n tgt_list.append(tgt_dict) \n return tgt_list\n \n #查找ifconfig结果中某个网卡的mtu值\n def ifconfig_MTU(self,nic_name_str):\n pattern = re.compile(r'^%s' %nic_name_str)\n match = None\n stdin, stdout, stderr = self.client.exec_command('ifconfig') \n for i_line in stdout.readlines():\n if match == None:\n match = pattern.match(i_line) \n if match: \n find = re.findall(r'MTU:(\\d*)',i_line)\n if find != []:\n return (int(find[0]))\n return None","sub_path":"tools/ci/pytest/tecs/ssh_cmd.py","file_name":"ssh_cmd.py","file_ext":"py","file_size_in_byte":2771,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"280034775","text":"\"\"\"\nView functions for user webapp v1.0\n\"\"\"\nfrom flask import render_template, redirect, url_for, flash\nfrom flask_login import login_required, current_user\nfrom app.user_app import user_app, user_app_logger\nfrom app.user_app.forms import EditProfileForm, RegistrationForm\nfrom app.models import User, Address, Permission\nfrom helper.countries import countries, get_country_key\n\n\n@user_app.route('/register', methods=['GET', 'POST'])\ndef register():\n \"\"\"\n View function for a registering a new user.\n :return:\n \"\"\"\n form = RegistrationForm()\n if form.validate_on_submit():\n user = User(username=form.username.data, email=form.email.data)\n user.set_password(form.password.data)\n user.save()\n # TODO: generate and send confirmation token for the user.\n conf_token = user.generate_confirmation_token()\n user_app_logger.info('Confirmation token successfully generated for '\n 'user.')\n flash('Registration successful.')\n return redirect(url_for('auth_app.confirm', conf_token=conf_token))\n return render_template('user/register.html', form=form)\n\n\n@user_app.route('/')\n@user_app.route('/index')\ndef index():\n \"\"\"\n View function to return the index page for the user request.\n :return:\n \"\"\"\n if current_user.is_anonymous:\n user_app_logger.info('Serving index page to anonymous user.')\n return render_template('index.html')\n\n return render_template('user/user_index.html')\n\n\n@user_app.route('/profile/')\n@login_required\ndef profile_page(username_or_email):\n \"\"\"\n This view function takes the username/email address of the user\n and returns the profile page of user.\n :param username_or_email: Username or email address of the user\n whose profile page is accessed.\n :return:\n \"\"\"\n user = User.objects(username=username_or_email).first() or \\\n User.objects(username=username_or_email).first()\n if user is None:\n user_app_logger.warning('User %d request for info page of '\n 'non existing user %s' % (current_user.id,\n username_or_email))\n return render_template('errors/404.html')\n \"\"\"\n If the logged in user is trying to access some other user's\n profile, minimal information is presented.\n \"\"\"\n if user.id != current_user.id and \\\n User.objects(id=current_user.id).first().permissions != \\\n Permission.PERM_ADMIN:\n user_app_logger.info('Displaying user %d profile page for '\n 'user %d' % (user.id, current_user.id))\n return render_template('user/profile_minimal.html',\n user=user)\n # Full information is provided to the user for his own profile view.\n user_app_logger.info('displaying profile page with full information to '\n 'user %d' % user.id)\n return render_template('user/profile.html', user=user)\n\n\n@user_app.route('/profile/')\ndef profile_page_id(user_id):\n \"\"\"\n This view function takes the ID of the user and returns the profile\n page of the respective user.\n :param user_id: ID of the user\n whose profile page is accessed.\n :return:\n \"\"\"\n user = User.objects(id=user_id).first()\n if user is None:\n user_app_logger.error('User %d requested profile page for non'\n ' existing user %d' % (current_user.id, user_id))\n return render_template('errors/404.html')\n return redirect(url_for('user_app.profile_page',\n username_or_email=user.username))\n\n\n@user_app.route('/edit-profile', methods=['GET', 'POST'])\n@login_required\ndef edit_profile():\n \"\"\"\n This view function presents the page edit information about the user.\n :return:\n \"\"\"\n form = EditProfileForm(\n first_name=current_user.first_name,\n last_name=current_user.last_name,\n phone=current_user.phone,\n country=get_country_key(current_user.address.country) if\n current_user.address else None)\n user_app_logger.debug('edit profile form populated for user %d' %\n current_user.id)\n if form.validate_on_submit():\n user_app_logger.debug('edit profile form submitted by user %d' %\n current_user.id)\n current_user.first_name = form.first_name.data\n current_user.last_name = form.last_name.data\n current_user.phone = form.phone.data\n # If there is no address previously, create address object.\n if current_user.address is None:\n user_app_logger.debug('No address previously registered to '\n 'user={0}'.format(current_user.id))\n current_user.address = Address()\n current_user.address.street = form.street.data\n current_user.address.postal_code = form.postal_code.data\n current_user.address.city = form.city.data\n current_user.address.state = form.state.data\n current_user.address.country = countries.get(form.country.data)\n current_user.save()\n user_app_logger.info('user %d profile updated successfully' %\n current_user.id)\n flash('Profile information has been updated.')\n user_app_logger.debug(\n 'user={0} being redirected to profile page after profile '\n 'update'.format(current_user.id))\n return redirect(url_for('user_app.profile_page',\n username_or_email=current_user.username))\n # Populate the form for GET request.\n if current_user.address is not None:\n form.street.data = current_user.address.street or ''\n form.postal_code.data = current_user.address.postal_code or ''\n form.city.data = current_user.address.city or ''\n form.state.data = current_user.address.state or ''\n user_app_logger.info('Edit profile form being displayed to user='\n '{0}'.format(current_user.id))\n return render_template('user/edit_profile.html', form=form)\n","sub_path":"app/user_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6148,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"555703233","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#  @Time    : 2020-02-04 17:14\n#  @Author  : July\nimport re\nimport os\nimport csv\nimport time\nimport requests\nfrom retrying import retry\nfrom constants import proxy_url, app_header, app_url\nfrom lxml import html\nimport random\n\netree = html.etree\n\nproxy = {}\n# '4490719299787262'\nsince_id = 4458441189777880\n\n\n# @retry(stop_max_attempt_number=3, wait_random_min=1000, wait_random_max=5000)\ndef change_proxy(retry_count):\n if retry_count < 0:\n return\n\n result = requests.get(proxy_url).json()\n if result['msg'] == 'ok':\n ip = result['obj'][0]['ip']\n port = result['obj'][0]['port']\n proxies = {\"http\": \"http://\" + ip + \":\" + port, \"https\": \"http://\" + ip + \":\" + port}\n global proxy\n proxy = proxies\n print(f\"代理ip为更改为:{proxies}\")\n return proxies\n else:\n time.sleep(1)\n print('切换代理失败,重新尝试。。。')\n change_proxy(retry_count - 1)\n\n\ndef save_data(filename, data):\n if os.path.isfile(filename):\n is_exist = True\n else:\n is_exist = False\n with open(filename, \"a\", newline=\"\", encoding=\"utf_8_sig\") as f:\n c = csv.writer(f)\n if not is_exist:\n \"\"\"need = [user_id, user_name, created_at, source,\n content, reposts_count, comments_count, attitudes_count,\n pics_url, video_url, retweeted_status, 'retweeted_content', retweeted_url,\n topic_num, topics]\"\"\"\n c.writerow(['用户id', '昵称', '发表时间', '发布设备', 'scheme_url', '正文', '转发数', '评论数', '点赞数',\n '图片链接', '视频链接', '是否转发', '转发内容', '转发链接', '话题数', '话题', '微博链接', 'since_id'])\n for line in data:\n c.writerow(line)\n\n\ndef spider():\n global since_id\n app_param = {\n # 'uid': 2803301701,\n 'type': 'uid',\n 'value': '2803301701',\n 'containerid': '1076032803301701',\n 'since_id': since_id\n }\n\n def get_ret(c):\n\n if c < 0:\n return\n try:\n ret = requests.get(url=app_url, headers=app_header, params=app_param, proxies=proxy).json()\n print(ret)\n a = ret['data']['cardlistInfo']['since_id']\n return ret\n except:\n time.sleep(random.uniform(1, 3))\n change_proxy(3)\n return get_ret(c - 1)\n\n ret = get_ret(3)\n\n since_id = ret['data']['cardlistInfo']['since_id']\n print(f'since_id改为 {since_id} ,上一次的数据还未保存,请使用前一个since_id')\n need_list = []\n cards = ret['data']['cards']\n for card in cards:\n if card['card_type'] == 9:\n user_id = card['mblog']['user']['id'] # 用户id\n user_name = card['mblog']['user']['screen_name'] # 用户名称\n created_at = card['mblog']['created_at'] # 发布时间\n source = card['mblog']['source'] # 发布工具\n scheme_url = card['scheme']\n wb_id = card['mblog']['id']\n wb_url = 'https://m.weibo.cn/detail/' + str(wb_id)\n\n reposts_count = card['mblog']['reposts_count'] # 转发数\n comments_count = card['mblog']['comments_count'] # 评论数\n attitudes_count = card['mblog']['attitudes_count'] # 点赞数\n app_detail_url = 'https://m.weibo.cn/statuses/extend?id=' + str(wb_id)\n\n pics_url = ''\n if card['mblog']['pic_num'] != 0:\n try:\n pics = card['mblog']['pics']\n for pic in pics:\n tupian = pic['url']\n pics_url = pics_url + tupian + os.linesep + ' ' # 图片链接\n except:\n pics_url = ''\n\n try:\n video_url = card['mblog']['page_info']['media_info']['stream_url'] # 视频链接\n except KeyError:\n video_url = ''\n\n retweeted_status = '否'\n retweeted_url = ''\n retweeted_content = ''\n if 'retweeted_status' in card['mblog']:\n retweeted_status = '是'\n # retweeted_url = 'https://m.weibo.cn/detail/' + str(card['mblog']['retweeted_status']['id'])\n retweeted_url = 'https://m.weibo.cn/statuses/extend?id=' + str(card['mblog']['retweeted_status']['id'])\n\n def get_re_detail_ret(c):\n if c < 0:\n return\n try:\n re_detail_ret = requests.get(url=retweeted_url, headers=app_header, proxies=proxy).json()\n print(f're_detail_ret{re_detail_ret}')\n time.sleep(random.uniform(0.3, 1.2))\n return re_detail_ret\n except:\n change_proxy(3)\n return get_ret(c - 1)\n\n re_detail_ret = get_re_detail_ret(2)\n try:\n html = re_detail_ret['data']['longTextContent'] # 正文\n root = etree.HTML(html)\n retweeted_content = root.xpath(\"string(//*)\")\n except:\n print(f'获取微博详情失败,详情内容为detail_ret{re_detail_ret}')\n html = card['mblog']['retweeted_status']['text']\n root = etree.HTML(html)\n retweeted_content = root.xpath(\"string(//*)\")\n\n def get_detail_ret(c):\n if c < 0:\n return\n try:\n detail_ret = requests.get(url=app_detail_url, headers=app_header, proxies=proxy).json()\n print(detail_ret)\n time.sleep(random.uniform(0.3, 1.2))\n return detail_ret\n except:\n change_proxy(3)\n return get_ret(c - 1)\n\n re_detail_ret = get_detail_ret(2)\n try:\n html = re_detail_ret['data']['longTextContent'] # 正文\n root = etree.HTML(html)\n content = root.xpath(\"string(//*)\")\n except:\n print(f'获取微博详情失败,详情内容为detail_ret{re_detail_ret}')\n html = card['mblog']['text']\n root = etree.HTML(html)\n content = root.xpath(\"string(//*)\")\n\n topic_num = int(content.count('#') / 2) # 话题数\n p = re.compile(r'[#](.*?)[#]', re.S)\n topics = '\\n'.join(p.findall(content)) # 话题\n\n need = [user_id, user_name, created_at, source, scheme_url,\n content, reposts_count, comments_count, attitudes_count,\n pics_url, video_url, retweeted_status, retweeted_content, retweeted_url,\n topic_num, topics,\n wb_url, since_id]\n print(need)\n need_list.append(need)\n\n return need_list\n\n\nif __name__ == '__main__':\n change_proxy(1)\n while True:\n data = spider()\n save_data('人民日报1.csv', data)\n print(\"########################存储成功########################\")\n","sub_path":"user_weibo_spider.py","file_name":"user_weibo_spider.py","file_ext":"py","file_size_in_byte":7292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"30329840","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Sep 27 15:35:36 2019\n\n@author: talavera\n\"\"\"\nimport numpy as np\nimport obspy\nfrom obspy import read as read\nfrom frospy.util.array_util import stack\nfrom frospy.core.segment import read as read_seg\nfrom scipy.optimize import curve_fit\nimport matplotlib.pyplot as plt\n#from obspy.core.utcdatetime import UTCDateTime\n\ndef func(x, A0, q):\n# f=0.814368*1e-3 # PREM\n f=0.814601*1e-3 # SC measurement\n om0 = f * 2 * np.pi * 3600 *24 # in rad/day\n return A0 * np.exp(-om0*x*0.5*q)\n\nevents = [\n \"011307Z\", # aftershock 10 March, 2007, 2 stations\n \"022710Z\", # good!\n \"031111Z\", # fit witouth trimmning\n \"032805Z\", # after removing stations good fit!\n \"050306Z\", # remove? after shock, 4 July 2006?\n# \"052413Z\", # not included in inversion\n \"122604Z\", # fit w/o trimming, aftershock 15, march 2005?\n ]\nhome = \"/net/home/talavera/eejit/splitting/00s00/Qcevents/self/c00=1\"\nfig = plt.figure(figsize=(12,8))\nax1 = fig.add_subplot(1,2,1)\nax2 = fig.add_subplot(1,2,2)\nfor e in events:\n seg_file = \"%s/%s/%s.dat\"%(home,e,e)\n seg = read_seg(seg_file)\n stations = [s.stats.station for s in seg]\n# if e==\"122604Z\":\n# # Smaller ntps Remove from inversion!\n# stations.remove(\"STU\")\n# stations.remove(\"VTS\")\n# stations.remove(\"RGN\")\n# # Okal & Stein, 2009 sued stations\n# # CTA, MAJO, PPT and YSS \n# # From this I use: PPT, CTAO\n# if e==\"032805Z\":\n# # aftershock? Remove from inversion! done!\n# stations.remove(\"ANTO\")\n# stations.remove(\"DWPF\")\n# stations.remove(\"PAB\")\n# stations.remove(\"ESK\")\n# if e==\"050306Z\":\n# # aftershock? Remove from inversion! done!\n# stations.remove(\"ALE\")\n st = read('/net/home/talavera/eejit/data/VHZ/%s.ahx'%e)\n st_filt = st.copy()\n \n tmax = st_filt[0].stats.npts*10/3600\n# tmax = 734400*10/3600\n start = st_filt[0].stats.starttime\n end = st_filt[0].stats.endtime\n# print(start,end)\n tw1 = seg[0].tw1\n tw2 = seg[0].tw2\n st_filt.trim(start+tw1*3600,end-(tmax-tw2)*3600)\n \n data_for_stack = [] \n for tr in st_filt:\n station = tr.stats.station\n if station in stations:\n fw1 = seg.select(station=station)[0].fw1*1e-3\n fw2 = seg.select(station=station)[0].fw2*1e-3\n# tw1 = seg.select(station=station)[0].tw1\n# tw2 = seg.select(station=station)[0].tw2\n# tmax = tr.stats.npts*10/3600\n# start = tr.stats.starttime\n# end = tr.stats.endtime \n tr.filter('bandpass', freqmin=fw1, freqmax=fw2, \n corners=2, zerophase=True)\n data_for_stack.append(tr.data)\n# tr.plot()\n \n data_for_stack = np.array(data_for_stack)\n \n # Stations have to have the same sampling rate!\n stacked_data = stack(data_for_stack)\n \n data_envelope = obspy.signal.filter.envelope(stacked_data)\n A0 = max(data_envelope)\n index = np.argmax(data_envelope)\n data_envelope=data_envelope[index::]\n \n # x is tranformed from seg to days and is reflected in func\n x=np.linspace(tw1/24,tw2/24,num=len(data_envelope))\n# ax1.plot(x,np.log10(data_envelope/max(data_envelope)),label=e)\n ax1.plot(x,data_envelope,label=e)\n ax2.plot(x,data_envelope)\n \n popt, pcov = curve_fit(func, x, data_envelope, p0=(A0, 1./5327.))\n ax2.plot(x, func(x, *popt), color=\"k\")\n print(\"%s, Q=%2.f\"%(e, 1/popt[1]))\n#popt, pcov = curve_fit(func, x, np.log10(data_envelope/max(data_envelope)))\nax1.plot(x, func(x, *popt), color=\"k\", label=\"regression\")\nax1.set_yscale('log')\n#ax.set_xlim(-1,85)\nax1.legend(ncol=1,loc=\"lower left\")\nplt.show()\n\n#011307Z, Q=6802\n#022710Z, Q=5976\n#031111Z, Q=6063\n#032805Z, Q=6090\n#122604Z, Q=6005\n#050306Z, Q=7916\n\nevents = [\n \"011307Z\", # aftershock 10 March, 2007, 2 stations\n \"022710Z\", # good!\n \"031111Z\", # fit witouth trimmning\n \"032805Z\", # after removing stations good fit!\n \"050306Z\", # remove? after shock, 4 July 2006?\n \"052413Z\", # not included in inversion\n \"122604Z\", # fit w/o trimming, aftershock 15, march 2005?\n ]\nfor e in events:\n print(e)\n fig = plt.figure(figsize=(12,8))\n ax1 = fig.add_subplot(1,2,1)\n ax2 = fig.add_subplot(1,2,2)\n seg_file = \"%s/%s/%s.dat\"%(home,e,e)\n seg = read_seg(seg_file)\n stations = [s.stats.station for s in seg]\n for sta in stations:\n st = read('/net/home/talavera/eejit/data/VHZ/%s.ahx'%e)\n st_filt = st.copy()\n \n tmax = st_filt[0].stats.npts*10/3600\n # tmax = 734400*10/3600\n start = st_filt[0].stats.starttime\n end = st_filt[0].stats.endtime\n # print(start,end)\n tw1 = seg[0].tw1\n tw2 = seg[0].tw2\n st_filt.trim(start+tw1*3600,end-(tmax-tw2)*3600)\n \n fw1 = seg.select(station=sta)[0].fw1*1e-3\n fw2 = seg.select(station=sta)[0].fw2*1e-3 \n tr = st_filt.select(station=sta)\n # Maybe is better to first filter and trim after filtering\n tr.filter('bandpass', freqmin=fw1, freqmax=fw2, \n corners=2, zerophase=True)\n data=tr[0].data\n # tr[0].plot()\n\n \n data_envelope = obspy.signal.filter.envelope(data)\n A0 = max(data_envelope)\n index = np.argmax(data_envelope)\n data_envelope=data_envelope[index::]\n \n # x is tranformed from seg to days and is reflected in func\n x=np.linspace(tw1/24,tw2/24,num=len(data_envelope))\n # ax1.plot(x,np.log10(data_envelope/max(data_envelope)),label=e)\n \n popt, pcov = curve_fit(func, x, data_envelope, p0=(A0, 1./5327.))\n ax2.plot(x, func(x, *popt), color=\"k\")\n print(\"%s %2.f\"%(sta, 1/popt[1]))\n \n ax1.plot(x,data_envelope,label=\"%s: Q=%2.f\"%(sta,1/popt[1]))\n ax2.plot(x,data_envelope)\n #popt, pcov = curve_fit(func, x, np.log10(data_envelope/max(data_envelope)))\n ax1.plot(x, func(x, *popt), color=\"k\", label=\"regression\")\n ax1.set_yscale('log')\n #ax.set_xlim(-1,85)\n ax1.legend(ncol=1,loc=\"lower left\")\n plt.suptitle(e)\n plt.show()","sub_path":"frospy/tests/todo/Su/plot_decay_rate_Q.py","file_name":"plot_decay_rate_Q.py","file_ext":"py","file_size_in_byte":6278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"352277502","text":"import sklearn\nfrom sklearn import linear_model\nimport numpy as np\nimport pickle \nimport nltk\nfrom nltk.corpus import stopwords\nimport csv\nfrom nltk.stem.snowball import SnowballStemmer\nimport pickle\nimport math\nimport numpy as np\nimport re\nfrom tf_idf import *\nimport matplotlib.pyplot as plt\n\n#Load the distinct words list\nwith open('distinct.pkl','rb') as f:\n\tdistinct_words=pickle.load(f)\n\n#Load the American Airlines tweets\nwith open('aal.pkl','rb') as f:\n\ttweets_temp=pickle.load(f)\n\n#Load the TF_IDF vector\nwith open('tf_idf.pkl','rb') as f:\n\tTF_IDF_VECTOR=pickle.load(f)\n\n#NTK's SnowballStemmer\nstemmer=SnowballStemmer(\"english\")\n\n#Special characters to be stripped\nspecial_characters=['[',']','\\\\','/',',','\"','@','#','.']\n\nday_to_index={}\t#dictionary to map dates in string to index\ncount=0\t\t\ntot=92\t\t\t#Since total number of days was 92\nfor tw in tweets_temp:\n\tif(tw[-1] not in day_to_index):\n\t\tday_to_index[tw[-1]]=tot-count-1\n\t\tcount+=1\n\t#print(tw[-1])\nprint(\"Distinct days \", len(day_to_index))\nprint(\"Tweet count \",len(tweets_temp))\n'''\nfor key,val in day_to_index.items():\n\tprint(key,val)\n'''\n#data will contain the emotion values for each day\ndata=[]\nfor i in range(92):\n\ttemp=[0,0,0,0,0,0,0]\n\tdata.append(temp)\n\ntweet_count=1\n\n#Process each tweet\nfor tw1 in tweets_temp:\n\tif(tweet_count%100==1):\n\t\tprint(\"Tweets processed \",tweet_count,\"/\",len(tweets_temp))\n\ttweet_count+=1\n\n\tdate=day_to_index[tw1[-1]]\t\t#date of the in-process tweet\n\ttw1=tw1[0:-1]\t\t\t\t\t#remove the date token\n\ttesting1=[]\t\t\t\t\t\t#will contain the stemmed tokens\n\ttw=[]\t\t\t\t\t\t\t#remove special character tokens\n\n\t#special characters removal\n\tfor i in tw1:\n\t\tif(i not in special_characters):\n\t\t\ttw.append(i)\n\n\t#stemming each word\n\tfor i in range(len(tw)):\n\t\ttesting1.append(stemmer.stem(tw[i]))\n\n\t#convert into 1*6072 vector\n\ttesting_row=[]\n\tfor i in range(len(distinct_words)):\n\t\tif(distinct_words[i] in testing1):\n\t\t\ttesting_row.append(1)\n\t\telse:\n\t\t\ttesting_row.append(0)\n\n\tans=np.matmul(testing_row,TF_IDF_VECTOR)\n\t#now ans contains the score for each emotion\n\t#add the values to respective slots for the day on which the tweet was made\n\tfor i in range(len(ans)):\n\t\tdata[date][i]+=ans[i]\n\n#normalize each day's emotion scores\nfor row in data:\n\tmx=sum(row)\n\tfor j in range(len(row)):\n\t\trow[j]=float(row[j])/mx\n\n#Make different lists for visualization purposes\nclose_data=[]\nopen_data=[]\njoy=[]\nfear=[]\nanger=[]\nsadness=[]\ndisgust=[]\nshame=[]\nguilt=[]\n\nfor i in range(len(day_to_index)):\n\tclose_data.append(0)\n\topen_data.append(0)\n\n\n\nreader=csv.reader(open('AAL.csv','r'))\nno_tweets=[]\nfor row in data:\n\tjoy.append(row[0])\n\tfear.append(row[1])\n\tanger.append(row[2])\n\tsadness.append(row[3])\n\tdisgust.append(row[4])\n\tshame.append(row[5])\n\tguilt.append(row[6])\n\t\n\nfor row in reader:\n\t#print(row)\n\tif(row[0] not in day_to_index):\n\t\tno_tweets.append(row[0])\n\tif(row[0]!='Date' and row[0] in day_to_index):\n\t\tdate=day_to_index[row[0]]\n\t\topen_data[date]=float(row[1])\n\t\tclose_data[date]=float(row[4])\n\nx_axis=[i for i in range(len(day_to_index))]\t\t\t\t\t\t\t\t\t \t#day count for the visualisation graphs\n#############################################\n#Filling in values for absent financial data on weekends\ni=0\nj=0\nwhile(i discord.Guild:\n return await self.fetch_guild(GUILD_ID)\n\n async def admin_log(\n self, message: str = None, embed: discord.Embed = None\n ) -> discord.Message:\n channel: discord.TextChannel = await self.fetch_channel(\n Channels.ADMIN_LOG.value\n )\n return await channel.send(content=message, embed=embed)\n\n async def on_ready(self):\n print(\"Bot is online\")\n await self.change_presence(\n activity=discord.Game(name=\"Being developed by the ArjanCodes community\")\n )\n\n @staticmethod\n def load_command_docs(cog, data):\n if data is None:\n return\n else:\n for command in cog.walk_commands():\n if isinstance(command, CommandWithDocs):\n command.docs = data.get(command.qualified_name)\n\n\nbot = Bot(\n command_prefix=PREFIX,\n case_insensitive=CASE_INSENSITIVE,\n intents=INTENTS,\n help_command=CustomHelpCommand(),\n)\n\nbot.run(TOKEN)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"238340406","text":"\"\"\"YOLO Object Detection - Local Inference\n\nThis script allows the user to obtain the output bounding boxes\nfor trained models, and save them to .txt files, \nand/or visualize them using opencv2.\n\"\"\"\n\nfrom base_inference import BaseInference, CONFIDENCE_THRESHOLD, NMS_IOU_THRESHOLD\n\nimport cv2\nimport numpy as np\nimport glob\nimport random\nimport argparse\n\nimport os\n\nDEFAULT_WEIGHTS = os.path.join(os.pardir, os.pardir, \"lambda_backend\", \"predict_microservice\", \"weights\", \"yolov4-tiny-obj.weights\")\nDEFAULT_CONFIG = os.path.join(os.pardir, os.pardir, \"lambda_backend\", \"predict_microservice\", \"weights\", \"yolov4-tiny-obj.cfg\")\nDEFAULT_IMAGES = os.path.join(os.curdir, \"test_images\")\n# Can read classes from a file if more are ever added\nDEFAULT_CLASSES = [\"hold\"]\n\nIMSHOW_FONT = cv2.FONT_HERSHEY_PLAIN\n\nclass LocalInference(BaseInference):\n \"\"\"\n Inference class for local predictions\n \n ...\n\n Attributes\n ----------\n images_path : str\n Path to the test images folder\n will_save : bool\n To save predicted boxes for each image to .txt files\n will_show : bool\n To show predicted boxes for each image using opencv2\n\n Methods\n -------\n run()\n Obtains predicted boxes for visualization and/or saving to file\n \"\"\"\n\n def __init__(self, weight_path, config_path, classes, score_threshold, nms_thresh, images_path, will_save, will_show, is_random):\n super().__init__(weight_path, config_path, classes, score_threshold, nms_thresh)\n \n self.images = glob.glob(os.path.join(images_path, \"*.jpg\"))\n\n self.will_save = will_save\n self.will_show = will_show\n self.is_random = is_random\n\n def run(self):\n\n self.colors = np.random.uniform(0, 255, size=(len(self.classes), 3))\n \n if self.is_random:\n random.shuffle(self.images)\n\n for img_path in self.images:\n # Loading image\n img = cv2.imread(img_path)\n # img = cv2.resize(img, None, fx=0.6, fy=0.6)\n\n class_ids, box_dims, box_confidences, box_dims_norm, indexes = super().run(img)\n if self.will_save:\n self._save_labelfile(img_path, class_ids, box_dims_norm, indexes)\n if self.will_show:\n self._show(img, class_ids, box_dims, indexes)\n cv2.destroyAllWindows()\n \n def _show(self, img, class_ids, box_dims, indexes): \n for i in indexes:\n x, y, w, h = box_dims[i]\n label = str(self.classes[class_ids[i]])\n color = self.colors[class_ids[i]]\n cv2.rectangle(img, (x, y), (x + w, y + h), color, 2)\n cv2.putText(img, label, (x, y + 30), IMSHOW_FONT, 1, color, 2)\n\n cv2.imshow(\"Image\", img)\n key = cv2.waitKey(0)\n \n def _save_labelfile(self, img_path, class_ids, box_dims_norm, indexes):\n # Get filename for labelfile\n labelfile = os.path.splitext(img_path)[0]\n with open(labelfile + \".txt\", \"w+\") as f:\n for i in indexes:\n class_id = class_ids[i]\n # Normalised format for yolo labeling\n nx, ny, nw, nh = box_dims_norm[i]\n f.write(f'{class_id} {nx} {ny} {nw} {nh}\\n')\n\ndef add_bool_arg(parser, name, default=True, msg=\"\"):\n group = parser.add_mutually_exclusive_group(required=False)\n group.add_argument('--' + name, dest=name, action='store_true', help=msg)\n group.add_argument('--no-' + name, dest=name, action='store_false')\n parser.set_defaults(**{name:default})\n\ndef setup_parser():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-w\", \"--weights\", help=\"path to learned weights from model\", default=DEFAULT_WEIGHTS)\n parser.add_argument(\"-c\", \"--config\", help=\"path to config of yolo\", default=DEFAULT_CONFIG)\n parser.add_argument(\"-i\", \"--images\", help=\"path to test images\", default=DEFAULT_IMAGES)\n parser.add_argument(\"-s\", \"--score\", help=\"score threshold\", default=CONFIDENCE_THRESHOLD, type=float)\n parser.add_argument(\"-n\", \"--nms\", help=\"nms threshold\", default=NMS_IOU_THRESHOLD, type=float)\n add_bool_arg(parser, 'save', msg=\"save to labelfiles\")\n add_bool_arg(parser, 'show', msg=\"visualise using opencv2\")\n add_bool_arg(parser, 'random', msg=\"randomise image visualisation order\")\n return parser\n\ndef main():\n # Parsing arguments\n parser = setup_parser()\n args = parser.parse_args()\n\n inference = LocalInference(\n weight_path = args.weights, \n config_path = args.config, \n classes = DEFAULT_CLASSES,\n score_threshold = args.score, \n nms_thresh = args.nms, \n images_path = args.images, \n will_save = args.save, \n will_show = args.show,\n is_random = args.random\n )\n inference.run()\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"model_training/local_prediction/local_inference.py","file_name":"local_inference.py","file_ext":"py","file_size_in_byte":4875,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"330313548","text":"\"\"\"\n# Reward summary\n\n## Sparse reward\n\nThis gives a reward each time that other agent dies. At the end of the\nmatch it gives an extra reward depending on the ranking.\n\n## Ranking reward\n\nThe reward is the current ranking of the agent on the match\n\n## Clipped len reward\n\nThe reward is the difference between the leading goose and the agent, clipped.\nIf the agent is leading then is the difference with the second.\n\n## Grow and kill reward\n\nGrowing gives reward and also the death of the other agents\n\n## Terminal, kill and grow reward (terminal_kill_and_grow_reward)\n\n- Terminal reward: this will be given if the agent dies or reaches the terminal state. It will be\nproportional to the ranking of the agent, f.e. 5, -5, -10, -15. This is a difference with current\nreward that was not giving reward on the terminal state. This way the agent will not find any\ndifference on dying on step 190 or reaching step 200 on second position if only two goose are left.\nIt will incentivate taking more risks for winning.\n- Kill reward. this is already present on current reward and I think is a good incentive. Sometimes\nthe other agents will die by themselves, but if the agent learns to kill them that would be a good\nability. We could give for example 2 for each killed agent.\n- Eat reward. I also think that encourage growing eases the learning.\n\n\"\"\"\n\nimport numpy as np\n\n\ndef get_reward(current_observation, previous_observation, configuration, reward_name):\n if reward_name == 'sparse_reward':\n return get_sparse_reward(current_observation, previous_observation, configuration)\n elif reward_name.startswith('ranking_reward'):\n return get_ranking_reward(current_observation, reward_name)\n elif reward_name.startswith('clipped_len_reward'):\n return get_clipped_len_reward(current_observation, reward_name)\n elif reward_name.startswith('grow_and_kill_reward'):\n return get_grow_and_kill_reward(current_observation, previous_observation, reward_name)\n elif reward_name.startswith('just_survive'):\n return get_just_survive_reward(current_observation, reward_name)\n elif reward_name.startswith('terminal_kill_and_grow_reward'):\n return get_terminal_kill_and_grow_reward(\n current_observation, previous_observation, reward_name, configuration)\n elif reward_name.startswith('v2_terminal_kill_and_grow_reward'):\n return get_v2_terminal_kill_and_grow_reward(\n current_observation, previous_observation, reward_name, configuration)\n else:\n raise KeyError(reward_name)\n\n\ndef get_cumulative_reward(rewards, reward_name):\n if reward_name == 'sparse_reward':\n return np.cumsum(rewards[::-1])[::-1]\n else:\n if reward_name.startswith('ranking_reward'):\n window_size = int(reward_name.split('_')[3])\n elif reward_name.startswith('clipped_len_reward'):\n window_size = _get_clipped_len_reward_params_from_name(reward_name)[1]\n elif reward_name.startswith('grow_and_kill_reward'):\n window_size = _get_grow_and_kill_reward_params_from_name(reward_name)[1]\n else:\n raise KeyError(reward_name)\n if window_size > len(rewards):\n window_size = len(rewards)\n cumulative_reward = np.array(rewards, dtype=np.float32)\n mask = np.ones_like(cumulative_reward)\n for idx in range(1, window_size):\n cumulative_reward[:-idx] += rewards[idx:]\n mask[:-idx] += 1\n cumulative_reward /= mask\n return cumulative_reward\n\n\ndef get_sparse_reward(current_observation, previous_observation, configuration):\n \"\"\" Computes the sparse reward for the previous action\"\"\"\n if current_observation['geese'][current_observation['index']]:\n is_terminal_step = current_observation['step'] == configuration['episodeSteps'] - 1\n if is_terminal_step:\n return _get_terminal_sparse_reward(current_observation, previous_observation)\n else:\n # Give reward if some geese has died\n return get_n_geese_alive(previous_observation['geese']) - get_n_geese_alive(current_observation['geese'])\n else:\n # Then the agent has died\n return -1\n\n\ndef _get_terminal_sparse_reward(current_observation, previous_observation):\n \"\"\"\n Returns a reward between 0 and 3, where 0 means the agent is in the last position and 3 means\n it is the winner. It gives 1 point for each smaller or death agent, and 0.5 for each agent\n of the same size\n \"\"\"\n current_geese_len = _get_geese_len(current_observation)\n previous_geese_len = _get_geese_len(previous_observation)\n goose_idx = current_observation['index']\n goose_len = current_geese_len[goose_idx]\n reward = 0\n for idx, (current_len, previous_len) in enumerate(zip(current_geese_len, previous_geese_len)):\n if idx == goose_idx:\n continue\n if not previous_len:\n reward += 1\n continue\n if goose_len > current_len:\n reward += 1\n elif goose_len == current_len:\n reward += 0.5\n\n return reward\n\n\ndef get_n_geese_alive(geese):\n return len([goose for goose in geese if goose])\n\n\ndef get_ranking_reward(current_observation, reward_name):\n geese_len = [len(goose) for goose in current_observation['geese']]\n goose_len = geese_len[current_observation['index']]\n if goose_len: # then it is alive\n reward = 0\n for idx, other_goose_len in enumerate(geese_len):\n if idx == current_observation['index']:\n continue\n if other_goose_len < goose_len:\n reward += 1\n elif other_goose_len == goose_len:\n reward += 0.5\n return reward\n else: # the agent has died\n return float(reward_name.split('_')[2])\n\n\ndef get_clipped_len_reward(current_observation, reward_name):\n death_reward, window, max_reward, min_reward = _get_clipped_len_reward_params_from_name(reward_name)\n geese_len = [len(goose) for goose in current_observation['geese']]\n goose_len = geese_len[current_observation['index']]\n if goose_len: # then it is alive\n max_len = max([geese_len[idx] for idx in range(len(geese_len)) if idx != current_observation['index']])\n len_diff = goose_len - max_len\n return np.clip(len_diff, min_reward, max_reward)\n else: # the agent has died\n return death_reward\n\n\ndef _get_clipped_len_reward_params_from_name(reward_name):\n death_reward, window, max_reward, min_reward = reward_name.split('_')[3:]\n return float(death_reward), int(window), float(max_reward), float(min_reward)\n\n\ndef get_grow_and_kill_reward(current_observation, previous_observation, reward_name):\n death_reward, window, max_reward, kill_reward = _get_grow_and_kill_reward_params_from_name(reward_name)\n geese_len = [len(goose) for goose in current_observation['geese']]\n goose_len = geese_len[current_observation['index']]\n if goose_len: # then it is alive\n kill_reward *= get_n_geese_alive(previous_observation['geese']) - get_n_geese_alive(current_observation['geese'])\n max_len = max([geese_len[idx] for idx in range(len(geese_len)) if idx != current_observation['index']])\n len_diff = goose_len - max_len\n grow_reward = goose_len - len(previous_observation['geese'][current_observation['index']])\n if len_diff > max_reward or grow_reward < 0:\n grow_reward = 0\n return kill_reward + grow_reward\n else: # the agent has died\n return death_reward\n\n\ndef _get_grow_and_kill_reward_params_from_name(reward_name):\n \"\"\" grow_and_kill_reward_-1_8_3_1 \"\"\"\n death_reward, window, max_reward, kill_reward = reward_name.split('_')[4:]\n return float(death_reward), int(window), float(max_reward), float(kill_reward)\n\n\ndef get_just_survive_reward(current_observation, reward_name):\n goose_len = len(current_observation['geese'][current_observation['index']])\n if goose_len: # then it is alive\n return 0.\n else: # the agent has died\n death_reward = float(reward_name.split('_')[-1])\n return death_reward\n\n\ndef get_terminal_kill_and_grow_reward(current_observation, previous_observation, reward_name, configuration):\n terminal_reward_scale, kill_reward, grow_reward = _get_terminal_kill_and_grow_reward_params_from_name(reward_name)\n if is_terminal_state(current_observation, configuration):\n terminal_reward = _get_terminal_sparse_reward(current_observation, previous_observation)\n terminal_reward -= 2.5 # this only gives positive reward for winning, [-2.5, 0.5]\n terminal_reward *= terminal_reward_scale\n return terminal_reward\n else:\n reward = kill_reward * _get_killed_geese(current_observation, previous_observation)\n reward += grow_reward * _get_goose_growth(current_observation, previous_observation, configuration)\n return reward\n\ndef get_v2_terminal_kill_and_grow_reward(current_observation, previous_observation, reward_name, configuration):\n ret = _get_v2_terminal_kill_and_grow_reward_params_from_name(reward_name)\n terminal_reward_scale, max_death_reward, win_reward, kill_reward, grow_reward = ret\n if is_terminal_state(current_observation, configuration):\n terminal_reward = _get_terminal_sparse_reward(current_observation, previous_observation)\n if terminal_reward == 3:\n return win_reward\n terminal_reward = (terminal_reward - 2.5)*terminal_reward_scale + max_death_reward\n return terminal_reward\n else:\n reward = kill_reward * _get_killed_geese(current_observation, previous_observation)\n reward += grow_reward * _get_goose_growth(current_observation, previous_observation, configuration)\n return reward\n\n\ndef is_terminal_state(current_observation, configuration):\n if _is_goose_death(current_observation):\n return True\n if _is_final_state(current_observation, configuration):\n return True\n if _are_all_other_goose_death(current_observation):\n return True\n return False\n\n\ndef _is_goose_death(observation):\n geese_len = _get_geese_len(observation)\n goose_len = geese_len[observation['index']]\n return not goose_len\n\n\ndef _are_all_other_goose_death(observation):\n geese_len = _get_geese_len(observation)\n for idx, goose_len in enumerate(geese_len):\n if idx == observation['index']:\n continue\n if goose_len:\n return False\n return True\n\n\ndef _get_geese_len(observation):\n geese_len = [len(goose) for goose in observation['geese']]\n return geese_len\n\n\ndef _is_final_state(observation, configuration):\n return observation['step'] == configuration['episodeSteps'] - 1\n\n\ndef _get_killed_geese(current_observation, previous_observation):\n \"\"\" Computes how many geese were killed between observations \"\"\"\n return get_n_geese_alive(previous_observation['geese']) - get_n_geese_alive(current_observation['geese'])\n\n\ndef _get_goose_growth(current_observation, previous_observation, configuration):\n \"\"\" Returns 1 if the goose is bigger, 0 otherwise \"\"\"\n current_len = _get_geese_len(current_observation)[current_observation['index']]\n previous_len = _get_geese_len(previous_observation)[previous_observation['index']]\n if current_observation['step'] % configuration['hunger_rate'] == 0 and current_observation['step']:\n current_len += 1\n if current_len > previous_len:\n return 1\n return 0\n\n\ndef _get_terminal_kill_and_grow_reward_params_from_name(reward_name):\n \"\"\" terminal_kill_and_grow_reward_10_2_1 \"\"\"\n terminal_reward_scale, kill_reward, grow_reward = [float(value) for value in reward_name.split('_')[-3:]]\n return terminal_reward_scale, kill_reward, grow_reward\n\ndef _get_v2_terminal_kill_and_grow_reward_params_from_name(reward_name):\n \"\"\"\n v2_terminal_kill_and_grow_reward_2_-5_5_2_1\n terminal_reward_scale, max_death_reward, win_reward, kill_reward, grow_reward\n \"\"\"\n ret = [float(value) for value in reward_name.split('_')[-5:]]\n terminal_reward_scale, max_death_reward, win_reward, kill_reward, grow_reward = ret\n return terminal_reward_scale, max_death_reward, win_reward, kill_reward, grow_reward\n\n\ndef get_death_reward_from_name(reward_name):\n if reward_name == 'sparse_reward':\n raise NotImplementedError()\n elif reward_name.startswith('ranking_reward'):\n raise NotImplementedError()\n elif reward_name.startswith('clipped_len_reward'):\n return _get_clipped_len_reward_params_from_name(reward_name)[0]\n elif reward_name.startswith('grow_and_kill_reward'):\n return _get_grow_and_kill_reward_params_from_name(reward_name)[0]\n elif reward_name.startswith('just_survive'):\n return float(reward_name.split('_')[-1])\n else:\n raise KeyError(reward_name)\n","sub_path":"hungry_geese/reward.py","file_name":"reward.py","file_ext":"py","file_size_in_byte":12849,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"385376040","text":"import os\r\nimport requests\r\nimport zipfile\r\nimport shutil\r\n\r\n# the working dir.\r\nfrom sklearn.model_selection import train_test_split\r\n\r\np_cwd = os.getcwd()\r\nDATA_PATH = os.path.abspath(os.path.join(p_cwd, 'data'))\r\nLABEL_CAT = 'cat'\r\nLABEL_DOG = 'dog'\r\n\r\n\r\ndef get_classes():\r\n return [LABEL_CAT, LABEL_DOG]\r\n\r\n\r\ndef resolve_path(*path):\r\n \"\"\"\r\n Resolve relative path to project root.\r\n :param path: paths relate to project root.\r\n :return: absolution path\r\n \"\"\"\r\n global p_cwd\r\n target = os.path.join(p_cwd, *path)\r\n return os.path.abspath(target)\r\n\r\n\r\ndef parse_file_name(file_name):\r\n \"\"\"\r\n Parse file name and return label information\r\n :type file_name: str\r\n :param file_name: the file name to parse.\r\n :return: label_in_lowercase, index\r\n \"\"\"\r\n assert file_name is not None, 'Param \"file_name\" must not be None.'\r\n assert file_name.strip(), 'Param \"file_name\" must not be empty.'\r\n parts = file_name.split('.')\r\n if 2 == len(parts):\r\n return None, int(parts[0])\r\n elif 3 == len(parts):\r\n return parts[0].lower(), int(parts[1])\r\n else:\r\n raise ValueError(\"Param file_name must be a pattern like ${label}.${idx} or ${idx}, now %s\" % file_name)\r\n\r\n\r\ndef read_img_labels(*path):\r\n \"\"\"\r\n Read image labels from specific path\r\n :param path: paths to image(s).\r\n :return: label(s)\r\n \"\"\"\r\n target = resolve_path(*path)\r\n assert os.path.exists(target), 'Target %s must be exists.' % target\r\n labels = []\r\n if os.path.isdir(target):\r\n # dir_entry: os.DirEntry\r\n for dir_entry in os.scandir(target):\r\n if dir_entry.is_file():\r\n label, _ = parse_file_name(dir_entry.name)\r\n labels.append(label)\r\n else:\r\n file_name = os.path.basename(target)\r\n label, _ = parse_file_name(file_name)\r\n labels.append(label)\r\n return labels\r\n\r\n\r\ndef get_target_path(label):\r\n return os.path.abspath(os.path.join(DATA_PATH, '{}.zip'.format(label)))\r\n\r\n\r\ndef get_extract_path(label):\r\n return os.path.abspath(os.path.join(DATA_PATH, label))\r\n\r\n\r\ndef download_data(label):\r\n \"\"\"\r\n Download data from kaggle.com.\r\n :param label: data label\r\n :return: None\r\n \"\"\"\r\n target = get_target_path(label)\r\n url = \"https://www.kaggle.com/c/5441/download/{}.zip\".format(label)\r\n print(\"Download {} files from {}\".format(label, url))\r\n\r\n data_response = requests.get(url)\r\n\r\n with open(target, 'wb') as writer:\r\n chunk_size = 1024 * 1024\r\n for chunk in data_response.iter_content(chunk_size=chunk_size):\r\n writer.write(chunk)\r\n\r\n\r\ndef data_exists(label):\r\n \"\"\"\r\n Return file existing status\r\n :param label: data label\r\n :return: existing_status\r\n \"\"\"\r\n return os.path.exists(get_target_path(label))\r\n\r\n\r\ndef extract_data(label):\r\n \"\"\"\r\n Extract zip file into data folder. It will execute only if target folder not exist.\r\n :param label: data label\r\n :return: None\r\n \"\"\"\r\n data_path = get_target_path(label)\r\n extract_path = get_extract_path(label)\r\n\r\n if not os.path.exists(extract_path):\r\n print(\"Extract {} into {}...\".format(data_path, extract_path))\r\n _zip = zipfile.ZipFile(data_path)\r\n _zip.extractall(DATA_PATH)\r\n\r\n\r\ndef _prepare_path(path):\r\n # if not os.path.exists(path):\r\n # os.mkdir(path)\r\n return path\r\n\r\n\r\ndef get_train_data_path():\r\n return _prepare_path(get_extract_path('train_link'))\r\n\r\n\r\ndef get_valid_data_path():\r\n return _prepare_path(get_extract_path('valid_link'))\r\n\r\n\r\ndef get_test_data_path():\r\n return _prepare_path(get_extract_path('test_link'))\r\n\r\n\r\ndef get_train_all_data_path():\r\n return _prepare_path(get_extract_path('train_all'))\r\n\r\n\r\ndef valid_data_exist():\r\n \"\"\"\r\n verify data existing\r\n it will extract data into 'data/{label}' if folder not exist.\r\n :return: dir for all kinds of label.\r\n \"\"\"\r\n label_train = 'train'\r\n label_test = 'test'\r\n labels = [label_train, label_test]\r\n for label in labels:\r\n if not data_exists(label):\r\n raise ValueError('Cannot find {} data under \"{}\" folder.'.format(label, DATA_PATH))\r\n # download_data(label)\r\n extract_data(label)\r\n return label_train, label_test\r\n\r\n\r\ndef valid_data():\r\n # verify data existing\r\n # it will extract data into 'data/{label}' if folder not exist.\r\n label_train, label_test = valid_data_exist()\r\n\r\n # clean existing link folder\r\n if not os.path.exists(get_train_data_path()) or not os.path.exists(get_valid_data_path()):\r\n make_train_link(label_train,\r\n train_path_dict=prepare_train_link_folders(get_train_data_path()),\r\n valid_path_dict=prepare_valid_link_folders(get_valid_data_path()))\r\n else:\r\n print(\"{} or {} existed.\".format(get_train_data_path(), get_valid_data_path()))\r\n\r\n make_train_all_link(label_train)\r\n make_test_link(label_test)\r\n print('Success!')\r\n\r\n\r\ndef make_train_all_link(label_test):\r\n target_path = get_train_all_data_path()\r\n if not os.path.exists(target_path):\r\n os.mkdir(target_path)\r\n source_path = get_extract_path(label_test)\r\n cats, dogs = scan_pic(source_path)\r\n\r\n pics = {\r\n LABEL_CAT: cats,\r\n LABEL_DOG: dogs\r\n }\r\n\r\n train_link = _prepare_link(target_path)\r\n for _class in (LABEL_CAT, LABEL_DOG):\r\n file_names = pics[_class]\r\n pic_target_dir = train_link[_class]\r\n _make_link(file_names, source_path, pic_target_dir)\r\n else:\r\n print('Path {} already existed.'.format(target_path))\r\n\r\n\r\ndef make_test_link(label_test):\r\n test_link_path = get_test_data_path()\r\n # if os.path.exists(test_link_path) and not os.path.isdir(test_link_path):\r\n # if os.path.islink(test_link_path):\r\n # os.unlink(test_link_path)\r\n # else:\r\n # shutil.rmtree(test_link_path)\r\n if not os.path.exists(test_link_path):\r\n os.mkdir(test_link_path)\r\n test_extract_path = get_extract_path(label_test)\r\n test_link_path = os.path.abspath(os.path.join(test_link_path, 'test'))\r\n print('Link test data from {} to {}'.format(test_extract_path, test_link_path))\r\n os.symlink(test_extract_path, test_link_path, target_is_directory=True)\r\n else:\r\n print('Path {} already existed.'.format(test_link_path))\r\n\r\n\r\ndef _clean_folder(path_generator):\r\n path = path_generator()\r\n if os.path.exists(path) and os.path.isdir(path):\r\n shutil.rmtree(path)\r\n os.mkdir(path)\r\n return path\r\n\r\n\r\ndef clean_train_link_folders():\r\n return _clean_folder(get_train_data_path)\r\n\r\n\r\ndef clean_valid_link_folders():\r\n return _clean_folder(get_valid_data_path)\r\n\r\n\r\ndef _prepare_link(link_path):\r\n print('Prepare links\\'s parent {}'.format(link_path))\r\n if not os.path.exists(link_path):\r\n os.mkdir(link_path)\r\n\r\n classes = [LABEL_CAT, LABEL_DOG]\r\n link_path_dict = {}\r\n for _class in classes:\r\n link_target = os.path.abspath(os.path.join(link_path, _class))\r\n os.mkdir(link_target)\r\n link_path_dict[_class] = link_target\r\n return link_path_dict\r\n\r\n\r\ndef prepare_train_link_folders(train_link_path):\r\n return _prepare_link(train_link_path)\r\n\r\n\r\ndef prepare_valid_link_folders(valid_link_path):\r\n return _prepare_link(valid_link_path)\r\n\r\n\r\ndef make_train_link(label_train, train_path_dict, valid_path_dict):\r\n source = get_extract_path(label_train)\r\n print('Will read training images from {}'.format(source))\r\n\r\n # for dir_entry in os.scandir(source):\r\n # _class = parse_file_name(dir_entry.name)[0]\r\n # if _class in train_path_dict:\r\n # target_path = train_path_dict[_class]\r\n # os.symlink(dir_entry.path, os.path.join(target_path, dir_entry.name))\r\n cats, dogs = scan_pic(source)\r\n cat_train, cat_valid, dog_train, dog_valid = train_test_split(cats, dogs,\r\n test_size=.2, random_state=47)\r\n print('Split into cats{}-{}, cogs-{}-{}'.format(len(cat_train), len(cat_valid), len(dog_train), len(dog_valid)))\r\n\r\n train_data = {\r\n LABEL_CAT: (cat_train, cat_valid),\r\n LABEL_DOG: (dog_train, dog_valid)\r\n }\r\n\r\n for _class in (LABEL_CAT, LABEL_DOG):\r\n train_link = train_path_dict[_class]\r\n valid_link = valid_path_dict[_class]\r\n\r\n file_names = train_data[_class]\r\n _make_link(file_names[0], source, train_link)\r\n _make_link(file_names[1], source, valid_link)\r\n\r\n\r\ndef scan_pic(source):\r\n file_list = os.listdir(source)\r\n cats = list(filter(lambda x: parse_file_name(x)[0] == LABEL_CAT, file_list))\r\n dogs = list(filter(lambda x: parse_file_name(x)[0] == LABEL_DOG, file_list))\r\n print('Find cats {}, dogs {}'.format(len(cats), len(dogs)))\r\n return cats, dogs\r\n\r\n\r\ndef _make_link(file_names, source_dir, target_dir):\r\n print('Link images from {} to {}'.format(source_dir, target_dir))\r\n for file_name in file_names:\r\n # train data\r\n file_path = os.path.join(source_dir, file_name)\r\n os.symlink(file_path, os.path.join(target_dir, file_name))\r\n","sub_path":"helpers/helpers_impl.py","file_name":"helpers_impl.py","file_ext":"py","file_size_in_byte":9268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"647925356","text":"#THEME 8\n#EXERCICE 9.1\ndef diff_sym(E1,E2):\n \"\"\"set[alpha]**2-> set[alpha]\n Retourne la différence symétrique entre deux ensembles E1 et E2\n \"\"\"\n #e:set[alpha]\n e = set()\n #i: alpha\n for i in E1:\n if i not in E2:\n e.add(i)\n #c:alpha\n for c in E2:\n if c not in E1:\n e.add(c) \n return e\n#jeu de tests\nassert diff_sym({2,5,9},{3,5,8})=={2,3,8,9}\nassert diff_sym({'a','b','c'},{'d','a','i'}) == {'c','b','d','i'}\n\n\n#Question2:\ndef diff_sym2(E1,E2):\n '''set[alpha]**2-> set[alpha]\n Retourne la différence symétrique entre deux ensembles E1 et E2\n '''\n #e:set[alpha]\n e=E1|E2\n #intersection:set[]alpha\n intersection=E1&E2\n return e-intersection\n#jeu de tests\nassert diff_sym2({2,5,9},{3,5,8})=={2,3,8,9}\nassert diff_sym2({'a','b','c'},{'d','a','i'}) == {'c','b','d','i'}\n\n\n#EXERCICE 9.6\n\n\n# Dict_Ang_Fra : dict[str:str]\nDict_Ang_Fra = {'the': 'le', 'cat': 'chat', 'fish' : 'poisson', 'catches': 'attrape'}\n# Dict_Fra_Ita : dict[str:str]\nDict_Fra_Ita = {'le': 'il', 'chat': 'gatto', 'poisson' : 'pesce', 'attrape': 'cattura'}\n\n\n#Question 1\ndef traduction_mot_a_mot(L,D):\n '''list[str] * dict[str:str] -> list[str]\n retourne la clé du dictionnaire D pour chaque mot dans L\n '''\n #l:list[str]\n l=[]\n #i:str\n for i in L:\n l.append(D[i]) \n return l\nassert traduction_mot_a_mot(['cat'],Dict_Ang_Fra) == ['chat']\n\n\n#Question 2:\ndef dictionnaire_inverse(D):\n '''dict[str:str] -> dict[str:str]\n retourne l'inverse du dictionnaire D\n '''\n #res:dict[str:str]\n res = {}\n #i:str\n for i in D:\n res[D[i]]=i\n return res\n#Jeu de test\nassert dictionnaire_inverse(Dict_Fra_Ita) == {'il':'le','gatto':'chat','pesce':'poisson','cattura':'attrape'}\n\n\n#Question 3\ndef composition_dictionnaires(D1,D2):\n '''dict[str:str]**2 -> dict[str:str]\n hypothese : len(D1) == len (D2)\n retourne la composition des dictionnaires entre D1 ET D2'''\n #res:dict[str:str]\n res = {}\n #c:str\n for c in D1:\n res[c]=D2[D1[c]]\n return res\nassert composition_dictionnaires(Dict_Ang_Fra,Dict_Fra_Ita) == {'the': 'il', 'cat': 'gatto', 'fish' : 'pesce', 'catches': 'cattura'}\n\n#D1 'le':'the'\n#D2 'the':'il'\n\n#D1['le']=='the'\n#D2['the']=='il'\n\n#res['le']=D2[D1['le']]==D2['the']=='il'\n\n#EXERCICE 9.5\ndef est_lettre(c):\n return((c >= 'a') and (c <= 'z'))\\\n or ((c >='A') and (c <='Z'))\\\n or (c in {'é','à','è','ù'})\n \ndef frequence_lettre(s):\n '''str->dict[str:int]\n renvoie un dictionnaire avec les lettre et leur nombre d occurence dans s\n '''\n #res:dict[str:int]\n res = {}\n #nb:int\n nb = 0\n for c in s:\n if est_lettre(c):\n nb\n \n","sub_path":"1I001 PYTHON/TME8.py","file_name":"TME8.py","file_ext":"py","file_size_in_byte":2767,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"214018265","text":"import maze\n\n\ndef move(con, str):\n \"\"\"Use to send multiple commands at once.\"\"\"\n for s in str:\n con.move(s)\n\n\nc = maze.Connect(\"SK1PY\", \"archeopteryx\")\nfield = c.get_all()\n\nisles = [field[i][-1] for i in range(len(field))]\njmps = [(False, 1) for i in range(len(isles))]\n\nfor i in range(len(jmps)-4, len(jmps)): # setup for last 4 blocks\n jmps[i] = (isles[i] == 1, 1)\n\ncurrent = len(jmps) - 4\nwhile current >= 0: # iterating from back\n current -= 1\n if isles[current] == 0:\n continue\n elif jmps[current+1][0] is True:\n jmps[current] = (True, 1)\n elif jmps[current+4][0] is True:\n jmps[current] = (True, 4)\n elif jmps[current+6][0] is True:\n jmps[current] = (True, 6)\n elif jmps[current+8][0] is True:\n jmps[current] = (True, 8)\n\nadress = 0\nwhile True: # reconstructing solution\n print(adress)\n _, jmp = jmps[adress]\n if jmp == 1:\n move(c, \"d\")\n elif jmp == 4:\n move(c, \"wddd\")\n elif jmp == 6:\n move(c, \"wwdddd\")\n elif jmp == 8:\n move(c, \"wdwddddd\")\n adress += jmp","sub_path":"Dinosaur/DoubleJump.py","file_name":"DoubleJump.py","file_ext":"py","file_size_in_byte":1083,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"143088172","text":"#!/usr/bin/env python\r\n# -*- encoding: utf-8 -*-\r\n# @File : jd.py\r\n# @Time : 2020/4/4 16:50\r\n# @Author : LJL\r\n# @Version : 1.0\r\n# @License : (C)Copyright 2019-2100, LJL\r\n# @Desc : None\r\n\r\n# here put the import lib\r\n\r\n\r\nimport requests\r\nimport re\r\nimport time\r\nimport json\r\nimport urllib3\r\nimport pymysql\r\nimport random\r\nimport math\r\n\r\nfrom urllib import parse\r\nfrom lxml import etree\r\nfrom threading import Thread,Lock\r\n\r\n\r\nclass JD(object):\r\n def __init__(self, kw,thnum):\r\n # 前30页url\r\n self.prv_url = 'https://search.jd.com/Search?'\r\n # 后30页url\r\n self.after_url = 'https://search.jd.com/s_new.php?'\r\n # 评论url\r\n self.comment_url = 'https://sclub.jd.com/comment/productPageComments.action?callback=fetchJSON_comment98vv{}&productId={}&score=0&sortType=5&page={}&pageSize=10&isShadowSku=0&fold=1'\r\n # 关键词\r\n self.kw = kw\r\n self.thnum = int(thnum)\r\n self.page_num = 1\r\n self.session = requests.session()\r\n self.user_agent = [\r\n \"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:34.0) Gecko/20100101 Firefox/34.0\",\r\n \"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36 OPR/26.0.1656.60\",\r\n \"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/534.57.2 (KHTML, like Gecko) Version/5.1.7 Safari/534.57.2\",\r\n \"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Win64; x64; Trident/5.0; .NET CLR 2.0.50727; SLCC2; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; InfoPath.3; .NET4.0C; Tablet PC 2.0; .NET4.0E)\",\r\n \"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36 OPR/26.0.1656.60\",\r\n \"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/534.57.2 (KHTML, like Gecko) Version/5.1.7 Safari/534.57.2\",\r\n \"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; QQDownload 732; .NET4.0C; .NET4.0E; LBBROWSER)\",\r\n \"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/38.0.2125.122 UBrowser/4.0.3214.0 Safari/537.36\",\r\n \"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/534.57.2 (KHTML, like Gecko) Version/5.1.7 Safari/534.57.2\",\r\n \"Mozilla/5.0 (Windows NT 5.1) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.84 Safari/535.11 SE 2.X MetaSr 1.0\",\r\n ]\r\n # 商品信息\r\n self.headers = {\r\n 'User-Agent': random.choice(self.user_agent),\r\n }\r\n # 评论\r\n self.comment_headers = {\r\n 'Referer': '',\r\n 'User-Agent': random.choice(self.user_agent),\r\n }\r\n # IP代理\r\n self.ip = [\r\n '117.191.11.113:80',\r\n '117.191.11.109:8080',\r\n '117.191.11.80:80',\r\n '117.191.11.76:8080',\r\n '117.191.11.80:80',\r\n '117.191.11.108:80',\r\n '117.191.11.111:80',\r\n '117.191.11.109:8080',\r\n '39.135.24.11:80',\r\n '117.191.11.109:80',\r\n '117.191.11.108:8080',\r\n '117.191.11.110:8080',\r\n '39.137.69.7:80',\r\n '39.137.69.7:8080',\r\n '39.137.69.10:8080'\r\n ]\r\n self.proxies = {\r\n 'http': random.choice(self.ip),\r\n }\r\n # 数据库连接\r\n self.connect = pymysql.connect(host='localhost',port=3306,user='root',passwd='0000',db='scrapytest')\r\n self.cur = self.connect.cursor()\r\n # 存储已经获取到的商品的id,避免重复爬取数据\r\n self.save_shop_id = []\r\n\r\n self.lock = Lock()\r\n\r\n def join_list(self, res):\r\n '''|拼接获取到的列表中的数据并去掉空格'''\r\n if len(res) != 0:\r\n result = '|'.join(res).strip()\r\n else:\r\n result = ''.join(res).strip()\r\n return result\r\n\r\n def join_null(self,res):\r\n '''判断并拼接获取到的列表中的数据并去掉空格'''\r\n if len(res) != 0:\r\n result = ''.join(res).strip()\r\n else:\r\n result = ''\r\n return result\r\n\r\n def get_shop_page_num(self):\r\n '''获取当前商品有多少页'''\r\n params = {\r\n 'keyword': self.kw,\r\n 'enc': 'utf-8',\r\n 'page': '1',\r\n }\r\n response = requests.get(self.prv_url + parse.urlencode(params), headers=self.headers, verify=False).content\r\n html = etree.HTML(response)\r\n page = html.xpath('//div[@id=\"J_topPage\"]/span/i/text()')[0]\r\n return page\r\n\r\n def get_price(self,url):\r\n '''提取商品的价格'''\r\n try:\r\n id = re.match(r'.*?(\\d+)\\.html', url).group(1)\r\n price_url = 'https://p.3.cn/prices/mgets?callback=jQuery2414702&type=1&area=1&pdtk=&pduid=15282860256122085625433&pdpin=&pin=null&pdbp=0&skuIds=J_{}'.format(id)\r\n price = requests.get(price_url,headers=self.headers,verify=False).text\r\n price_json = json.loads(re.match(r'jQuery2414702\\(\\[(.*)\\]\\)', price).group(1))\r\n return float(price_json.get('p', 0))\r\n except Exception as e:\r\n return 0\r\n\r\n def get_page_html(self):\r\n '''只获取页面html'''\r\n page = int(self.get_shop_page_num())\r\n num = 1\r\n print('{}商品总共有{}页'.format(self.kw, page))\r\n while True:\r\n '''前30条信息'''\r\n params = {\r\n 'keyword': self.kw,\r\n 'enc': 'utf-8',\r\n 'page': self.page_num,\r\n }\r\n p_url = self.prv_url + parse.urlencode(params)\r\n response = requests.get(p_url, headers=self.headers, verify=False).text.encode(encoding='utf-8', errors='ignore')\r\n html = etree.HTML(response)\r\n self.get_detail_url(html)\r\n\r\n '''后30条信息'''\r\n headers = {\r\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.108 Safari/537.36',\r\n \"referer\": p_url,\r\n }\r\n af_params = {\r\n 'keyword': self.kw,\r\n 'enc': 'utf-8',\r\n 'page': self.page_num + 1,\r\n 'qrst': '1',\r\n 'rt': '1',\r\n 'stop': '1',\r\n 'vt': '2',\r\n 'wq': self.kw,\r\n # 's': '27',\r\n 'scrolling': 'y',\r\n 'log_id': round(time.time(), 5),\r\n }\r\n af_url = self.after_url + parse.urlencode(af_params)\r\n af_response = requests.get(af_url, headers=headers, verify=False).text.encode(encoding='utf-8', errors='ignore')\r\n af_html = etree.HTML(af_response)\r\n self.get_detail_url(af_html)\r\n\r\n print('第{}页下载完成!'.format(num))\r\n num += 1\r\n\r\n if self.page_num < page*2-1:\r\n self.page_num += 2\r\n time.sleep(2)\r\n else:\r\n break\r\n\r\n def get_detail_url(self,html):\r\n '''获取商品详情页的url'''\r\n contents = html.xpath('//li[@class=\"gl-item\"]')\r\n # 创建thnum个线程来执行\r\n for i in range(math.ceil(len(contents)/self.thnum)):\r\n # 将线程存储\r\n thread_count = []\r\n for content in contents[i*self.thnum:(i+1)*self.thnum]:\r\n try:\r\n url = content.xpath('.//div[@class=\"p-img\"]/a/@href')[0]\r\n shop_id= int(content.xpath('./@data-sku')[0])\r\n if shop_id not in self.save_shop_id:\r\n self.save_shop_id.append(shop_id)\r\n t = Thread(target=self.get_shop_detail_info, args=(url, shop_id))\r\n thread_count.append(t)\r\n t.start()\r\n else:\r\n print('{}商品已经存在!'.format(shop_id))\r\n except Exception as e:\r\n print('一条详情页面链接提取错误!')\r\n\r\n for item in thread_count:\r\n item.join()\r\n\r\n def get_shop_detail_info(self,detail_url,shop_id):\r\n '''获取商品的详情信息'''\r\n url = 'https://' + detail_url.split('//')[1]\r\n con = requests.get(url,headers=self.headers,verify=False).text.encode(encoding='gbk', errors='ignore')\r\n content = etree.HTML(con)\r\n item = {}\r\n # 爬取时间\r\n item['spider_date'] = time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time()))\r\n # 商品ID\r\n item['shop_id'] = shop_id\r\n # 店铺名称\r\n item['store_name'] = self.join_null(content.xpath('//div[@class=\"w\"]//div[@class=\"item\"]/div[@class=\"name\"]/a/@title'))\r\n # 商品\r\n item['shop'] = self.join_null(content.xpath('//div[@class=\"w\"]//div[@class=\"itemInfo-wrap\"]/div[@class=\"sku-name\"]/text()'))\r\n # 链接\r\n item['url'] = url\r\n # 价格\r\n item['price'] = self.get_price(url)\r\n # 品牌\r\n item['brand'] = self.join_null(content.xpath('//ul[@id=\"parameter-brand\"]/li/a/text()'))\r\n\r\n comment = self.get_comment_info(shop_id)\r\n # 评论总数\r\n item['comment_count'] = comment[0]\r\n # 好评率\r\n item['good_rate'] = comment[1]\r\n # ���评率\r\n item['poor_rate'] = comment[2]\r\n # 选择的颜色\r\n item['select_color'] = self.join_list(content.xpath('//div[@class=\"summary p-choose-wrap\"]//div[@id=\"choose-attr-1\"]/div[@class=\"dd\"]/div/@data-value'))\r\n # 选择的大小\r\n item['select_size'] = self.join_list(content.xpath('//div[@class=\"summary p-choose-wrap\"]//div[@id=\"choose-attr-2\"]/div[@class=\"dd\"]/div/@data-value'))\r\n # 效果图片\r\n item['image'] = self.join_list(content.xpath('//div[@id=\"spec-list\"]/ul/li/img/@src'))\r\n\r\n self.save_con(item)\r\n print(item)\r\n self.get_comment(comment[3], shop_id,item['shop'])\r\n\r\n def get_comment_info(self,shopid):\r\n '''获取评论的页数、数量、好评率、差评率'''\r\n self.comment_headers['Referer'] = 'https://item.jd.com/{}.html#comment'.format(shopid)\r\n url = self.comment_url.format(random.choice(range(100,99999)),shopid,0)\r\n response = self.session.get(url,headers=self.comment_headers,proxies=self.proxies,verify=False).text\r\n try:\r\n json_data = re.findall(r'fetchJSON_comment98vv\\d+\\((.*)\\)', response)[0]\r\n except Exception as e:\r\n print('{}商品提取错误!'.format(shopid))\r\n about_comment = {'goodRate': 1, 'poorRate': 0, 'commentCount': 0}\r\n good_rate = about_comment.get('goodRate', 1)\r\n poor_ate = about_comment.get('poorRate', 0)\r\n comment_count = about_comment.get('commentCount', 0)\r\n return (comment_count, good_rate, poor_ate, 1)\r\n else:\r\n contents = json.loads(json_data)\r\n pagenum = contents.get('maxPage', 0)\r\n about_comment = contents.get('productCommentSummary')\r\n if len(about_comment) == 0:\r\n about_comment = {'goodRate':1,'poorRate':0,'commentCount':0}\r\n\r\n good_rate = about_comment.get('goodRate',1)\r\n poor_ate = about_comment.get('poorRate',0)\r\n comment_count = about_comment.get('commentCount',0)\r\n\r\n return (comment_count,good_rate,poor_ate,pagenum)\r\n\r\n def get_comment(self,pagenum,shopid,shop):\r\n '''获取商品评论'''\r\n page = 0\r\n # 只获取前十页\r\n if pagenum > 5:\r\n num = 5\r\n else:\r\n num = pagenum\r\n while page < num:\r\n self.comment_headers['Referer'] = 'https://item.jd.com/{}.html#comment'.format(shopid)\r\n url = self.comment_url.format(random.choice(range(100, 99999)), shopid, page)\r\n response = self.session.get(url, headers=self.comment_headers, proxies=self.proxies, verify=False).text\r\n try:\r\n json_data = re.findall(r'fetchJSON_comment98vv\\d+\\((.*)\\)', response)[0]\r\n except Exception as e:\r\n print('{}商品第{}页评论提取错误!'.format(shop, page+1))\r\n else:\r\n contents = json.loads(json_data)\r\n for content in contents.get('comments'):\r\n item = {}\r\n item['shop_id'] = shopid # 商品ID\r\n item['shop'] = content.get('referenceName', '') # 商品名称\r\n item['color'] = content.get('productColor', '') # 商品颜色\r\n item['size'] = content.get('productSize', '') # 商品大小\r\n item['user_id'] = content.get('id', '') # 用户ID\r\n item['nickname'] = content.get('nickname', '') # 用户昵称\r\n item['user_image'] = content.get('userImage', '') # 用户头像\r\n item['comment_time'] = content.get('creationTime', '') # 评论时间\r\n item['content'] = content.get('content', '') # 评论内容\r\n item['shopping_time'] = content.get('referenceTime', '') # 购买时间\r\n item['good'] = content.get('usefulVoteCount', '') # 点赞数量\r\n item['score'] = content.get('score', '') # 评分\r\n item['reply_count'] = content.get('replyCount', '') # 回复数量\r\n item['user_level'] = content.get('userLevelName', '') # 用户会员等级\r\n item['user_client'] = content.get('userClientShow', '') # 用户客户端\r\n\r\n print(item)\r\n self.save_comment(item)\r\n\r\n print('{}商品第{}页评论提取完成!'.format(shop, page+1))\r\n\r\n finally:\r\n page += 1\r\n time.sleep(random.choice(range(3,6)))\r\n\r\n print('{}商品评论提取完成!'.format(shop))\r\n print()\r\n\r\n def save_con(self,item):\r\n '''保存数据'''\r\n self.lock.acquire()\r\n self.cur.execute(\r\n \"\"\"insert into jd_shop (spider_date,shop_id,store_name,shop,url,price,brand,comment_count,good_rate,poor_rate,select_color,select_size,image) values (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)\"\"\",\r\n (\r\n item['spider_date'],\r\n item['shop_id'],\r\n item['store_name'],\r\n item['shop'],\r\n item['url'],\r\n item['price'],\r\n item['brand'],\r\n item['comment_count'],\r\n item['good_rate'],\r\n item['poor_rate'],\r\n item['select_color'],\r\n item['select_size'],\r\n item['image'],\r\n ))\r\n self.connect.commit()\r\n self.lock.release()\r\n\r\n def save_comment(self,item):\r\n '''保存数据'''\r\n self.lock.acquire()\r\n self.cur.execute(\r\n \"\"\"insert into jd_comment (shop_id,shop,shop_color,shop_size,user_id,nickname,user_image,comment_time,content,shopping_time,good,score,reply_count,user_level,user_client) values (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)\"\"\",\r\n (\r\n item['shop_id'],\r\n item['shop'],\r\n item['color'],\r\n item['size'],\r\n item['user_id'],\r\n item['nickname'],\r\n item['user_image'],\r\n item['comment_time'],\r\n item['content'],\r\n item['shopping_time'],\r\n item['good'],\r\n item['score'],\r\n item['reply_count'],\r\n item['user_level'],\r\n item['user_client'],\r\n ))\r\n self.connect.commit()\r\n self.lock.release()\r\n\r\n def get_data_from_mysql(self):\r\n '''从数据库读取已经获取的商品id'''\r\n self.cur.execute('select shop_id from jd_shop')\r\n res = self.cur.fetchall()\r\n\r\n for shopid in res:\r\n self.save_shop_id.append(shopid[0])\r\n\r\n def main(self):\r\n self.get_data_from_mysql()\r\n self.get_page_html()\r\n\r\n\r\nif __name__ == '__main__':\r\n urllib3.disable_warnings()\r\n\r\n keyword = input('请输入商品名称:')\r\n th_num = input('请输入多少个线程进行爬取信息:')\r\n\r\n jd = JD(keyword,th_num)\r\n jd.main()\r\n\r\n","sub_path":"电商平台/jingdong_requests/jd.py","file_name":"jd.py","file_ext":"py","file_size_in_byte":16503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"476365401","text":"from collections import namedtuple\nfrom enum import Enum\nimport statistics\nimport re\nimport math\nimport requests\nimport langid\n\nUA_REVO = -38854900\nLUHANSK = -2424065\nOZIV = -72444174\nTROIESHCHYNA = -3933663\nTYPICAL_KYIV = -32195333\nDYNAMO_KYIV = -4645142\nPOLTAWA = -1919231\n\n__author__ = 'aindrias'\n\nPost = namedtuple('Post', 'language like share')\n\n\nclass Language(Enum):\n UKRAINIAN = 1\n RUSSIAN = 2\n BOTH = 3\n UNDEFINED = 4\n\n\ndef load_html(group_id, offset):\n url = 'https://vk.com/al_wall.php'\n data = 'act=get_wall&al=1&fixed=&offset=' + offset.__str__() + '&owner_id=' + group_id.__str__() + '&type=own'\n return requests.post(url, data).text\n\n\ndef language_langid(text):\n classify = langid.classify(text)\n lang_code = classify[0]\n probability = classify[1]\n if probability < 0.5:\n return Language.UNDEFINED\n elif lang_code == 'uk':\n return Language.UKRAINIAN\n elif lang_code == 'ru':\n return Language.RUSSIAN\n else:\n return Language.UNDEFINED\n\n\ndef language(text):\n text = text.replace('Показати повністю', '')\n\n ukrainian_letters = set('іІїЇєЄґҐ')\n looks_like_ukrainian = any((c in ukrainian_letters) for c in text)\n\n russian_letters = set('ЁёЪъЭэЫы')\n looks_like_russian = any((c in russian_letters) for c in text)\n\n if looks_like_ukrainian and looks_like_russian:\n return Language.BOTH\n elif looks_like_ukrainian:\n return Language.UKRAINIAN\n elif looks_like_russian:\n return Language.RUSSIAN\n else:\n return language_langid(text)\n\n\ndef parse_single_post(snippet):\n message = re.search('
(.*?)
', snippet)\n like = re.search('post_like_count fl_l\" id=\"like_count-\\d*_\\d*\">(\\d*)(\\d*) 1:\n print(' SD ' + statistics.stdev(numbers_list).__int__().__str__())\n\n print('\\n[%s]' % ', '.join(map(str, transform(numbers_list))))\n print('\\n')\n\n\ndef transform(param):\n param.sort()\n result = []\n while len(param) > 0:\n pop = param.pop(0)\n while result.__len__() <= pop:\n result.append(0)\n result[-1] += 1\n return result\n\n\ndef analyze(posts):\n ukrainian_posts = []\n russian_posts = []\n undefined_posts = []\n both_posts = []\n for post in posts:\n if post.language == Language.UKRAINIAN:\n ukrainian_posts.append(post)\n elif post.language == Language.RUSSIAN:\n russian_posts.append(post)\n elif post.language == Language.UNDEFINED:\n undefined_posts.append(post)\n elif post.language == Language.BOTH:\n both_posts.append(post)\n\n print(\"LIKES\", \"\\n\")\n print(\"UKRAINIAN \" + (ukrainian_posts.__len__() * 100 / posts.__len__()).__int__().__str__() + '%')\n print_list((likes(ukrainian_posts)))\n print(\"RUSSIAN \" + (russian_posts.__len__() * 100 / posts.__len__()).__int__().__str__() + '%')\n print_list((likes(russian_posts)))\n print(\"BOTH \" + (both_posts.__len__() * 100 / posts.__len__()).__int__().__str__() + '%')\n print_list(likes(both_posts))\n print(\"UNDEFINED \" + (undefined_posts.__len__() * 100 / posts.__len__()).__int__().__str__() + '%')\n print_list(likes(undefined_posts))\n\n # print(\"SHARES\", \"\\n\")\n # print(\"UKRAINIAN\")\n # print_list(shares(ukrainian_posts))\n # print(\"RUSSIAN\")\n # print_list(shares(russian_posts))\n # print(\"BOTH\")\n # print_list(shares(both_posts))\n # print(\"UNDEFINED\")\n # print_list(shares(undefined_posts))\n\n\ndef likes(posts):\n return [post.like for post in posts]\n\n\ndef shares(posts):\n return [post.share for post in posts]\n\n\ndef load_posts(group_id, count=50):\n offset = 20\n posts = []\n while posts.__len__() < count:\n posts.extend(parse_posts(load_html(group_id, offset)))\n offset += 10\n print(posts.__len__(), '..')\n print('\\n')\n return posts\n\n\nif __name__ == '__main__':\n analyze(load_posts(TYPICAL_KYIV, 5000))","sub_path":"LoadVkPosts.py","file_name":"LoadVkPosts.py","file_ext":"py","file_size_in_byte":4985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"208042506","text":"def _merge(array, left_starts, left_ends, right_starts, right_ends):\n \"\"\"Swaps values in array depending on order\n\n Starts with array[left] up to array[right] until array[end]\n \"\"\"\n A = array[left_starts : left_ends + 1]\n B = array[right_starts : right_ends + 1]\n l, r = 0, 0\n\n i = left_starts\n while l < len(A) and r < len(B):\n if A[l] <= B[r]:\n array[i] = A[l]\n l += 1\n else:\n array[i] = B[r]\n r += 1\n i += 1\n if l < len(A):\n # add remaining of A\n for l in range(l, len(A)):\n array[i] = A[l]\n i += 1\n else:\n # add remaining of B\n for r in range(r, len(B)):\n array[i] = B[r]\n i += 1\n\n\ndef merge_sort(array: list, left: int = 0, right: int = None):\n \"\"\"Sort array\"\"\"\n if right is None:\n right = len(array) - 1\n\n if right == left:\n return\n\n m = (right + left) // 2\n merge_sort(array, left, m)\n merge_sort(array, m + 1, right)\n _merge(array, left, m, m + 1, right)\n","sub_path":"specialization-data-structures-algorithms/algorithmic-toolbox/week4/solutions/4_number_of_inversions/python/merge_sort.py","file_name":"merge_sort.py","file_ext":"py","file_size_in_byte":1065,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"345019277","text":"def time_converter(time):\n\t# replace this for solution\n\tsplit = time.split(':')\n\n\tif int(split[0]) < 12:\n\t\tif split[0] == '00':\n\t\t\tsplit[0] = 12\n\t\tstring = ' a.m.'\n\telse:\n\t\tif split[0] != '12':\n\t\t\tsplit[0] = int(split[0]) - 12\n\t\tstring = ' p.m.'\n\n\treturn str(int(split[0])) + ':' + split[1] + string\n\n\nif __name__ == '__main__':\n\tprint(\"Example:\")\n\tprint(time_converter('12:30'))\n\n\t# These \"asserts\" using only for self-checking and not necessary for auto-testing\n\tassert time_converter('12:30') == '12:30 p.m.'\n\tassert time_converter('09:00') == '9:00 a.m.'\n\tassert time_converter('23:15') == '11:15 p.m.'\n\tprint(\"Coding complete? Click 'Check' to earn cool rewards!\")\n","sub_path":"Scientific Expedition/time-converter-24h-to-12h.py","file_name":"time-converter-24h-to-12h.py","file_ext":"py","file_size_in_byte":670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"227455137","text":"epochs = 10\n# We don't use the whole dataset for efficiency purpose, but feel free to increase these numbers\nn_train_items = 640\nn_test_items = 640\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torchvision import datasets, transforms\nfrom torch.utils import data\nimport keras\n\nimport time\nclass Arguments():\n def __init__(self):\n self.batch_size = 8\n self.test_batch_size = 8\n self.epochs = epochs\n self.lr = 0.02\n self.seed = 1\n self.log_interval = 1 # Log info at each batch\n self.precision_fractional = 3\n\nargs = Arguments()\n\n_ = torch.manual_seed(args.seed)\nimport syft as sy # import the Pysyft library\nhook = sy.TorchHook(torch) # hook PyTorch to add extra functionalities like Federated and Encrypted Learning\nx_train_features =torch.from_numpy(np.load('x_train_features.npy'))\ny_train = torch.from_numpy(np.load('y_train.npy'))\n\nx_test_features = torch.from_numpy(np.load('x_test_features.npy'))\ny_test = torch.from_numpy(np.load('y_test.npy'))\n\n#train_loader = (x_train_features, y_train)\n#test_loader = (x_test_features,y_test)\nmy_dataset=data.TensorDataset(x_train_features,y_train)\ndataset2 = data.TensorDataset(x_test_features,y_test)\n\nprint(x_train_features.shape,y_train.shape,x_test_features.shape,y_test.shape)\n\n# simulation functions\ndef connect_to_workers(n_workers):\n return [\n sy.VirtualWorker(hook, id=f\"worker{i+1}\")\n for i in range(n_workers)\n ]\ndef connect_to_crypto_provider():\n return sy.VirtualWorker(hook, id=\"crypto_provider\")\n\nworkers = connect_to_workers(n_workers=2)\ncrypto_provider = connect_to_crypto_provider()\ndef get_private_data_loaders(precision_fractional, workers, crypto_provider):\n \n def one_hot_of(index_tensor):\n \"\"\"\n Transform to one hot tensor\n \n Example:\n [0, 3, 9]\n =>\n [[1., 0., 0., 0., 0., 0., 0., 0., 0., 0.],\n [0., 0., 0., 1., 0., 0., 0., 0., 0., 0.],\n [0., 0., 0., 0., 0., 0., 0., 0., 0., 1.]]\n \n \"\"\"\n onehot_tensor = torch.zeros(*index_tensor.shape, 10) # 10 classes for MNIST\n onehot_tensor = onehot_tensor.scatter(1, index_tensor.view(-1, 1), 1)\n return onehot_tensor\n \n def secret_share(tensor):\n \"\"\"\n Transform to fixed precision and secret share a tensor\n \"\"\"\n return (\n tensor\n .fix_precision(precision_fractional=precision_fractional)\n .share(*workers, crypto_provider=crypto_provider, requires_grad=True)\n )\n \n transformation = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))\n ])\n train_loader = torch.utils.data.DataLoader(my_dataset,batch_size=args.batch_size)\n \n private_train_loader = [\n (secret_share(data), secret_share(target))\n for i, (data,target) in enumerate(train_loader)\n if i < n_train_items / args.batch_size\n ]\n \n test_loader = torch.utils.data.DataLoader(dataset2,batch_size=args.test_batch_size)\n \n private_test_loader = [\n (secret_share(data), secret_share(target))\n for i, (data,target) in enumerate(test_loader)\n if i < n_test_items / args.test_batch_size\n ]\n \n return private_train_loader, private_test_loader\n \n \nprivate_train_loader, private_test_loader = get_private_data_loaders(\n precision_fractional=args.precision_fractional,\n workers=workers,\n crypto_provider=crypto_provider\n)\nclass Net(nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n self.fc1 = nn.Linear(4732, 128)\n self.fc2 = nn.Linear(128, 64)\n self.fc3 = nn.Linear(64, 10)\n\n def forward(self, x):\n x = x.view(-1, 4732)\n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n x = self.fc3(x)\n return x\nclass NeuralNet(nn.Module):\n def __init__(self):\n super(NeuralNet, self).__init__()\n self.conv = nn.Conv2d(1, 28, kernel_size=3)\n self.pool = nn.AvgPool2d(2)\n self.hidden= nn.Linear(28*13*13, 128)\n #self.drop = nn.Dropout(0.2)\n self.out = nn.Linear(128, 10)\n #self.act = nn.ReLU()\n\n def forward(self, x):\n x = self.conv(x) # [batch_size, 28, 26, 26]\n x = F.relu(x)\n x = self.pool(x) # [batch_size, 28, 13, 13]\n x = x.view(args.batch_size, -1) # [batch_size, 28*13*13=4732]\n x = self.hidden(x)\n x = F.relu(x) # [batch_size, 128]\n #x = F.dropout(0.5)\n x = self.out(x) # [batch_size, 10]\n return x\nfeature_layers = [\n keras.layers.Conv2D(28, (3, 3), padding='same', input_shape=(28, 28, 1)),\n #keras.layers.Activation('sigmoid'),\n keras.layers.AveragePooling2D(pool_size=(2,2)),\n keras.layers.Flatten()\n #keras.layers.Activation('relu'),\n #keras.layers.Conv2D(32, (3, 3), padding='same'),\n #keras.layers.Dropout(.25),\n \n]\n\nclassification_layers = [\n keras.layers.Dense(128),\n keras.layers.Activation('relu'),\n keras.layers.Dropout(.50),\n keras.layers.Dense(5),\n keras.layers.Activation('softmax')\n]\n\nmodel = keras.models.Sequential(feature_layers + classification_layers)\n\n\ndef train(args, model, private_train_loader, optimizer, epoch):\n model.train()\n for batch_idx, (data, target) in enumerate(private_train_loader): # <-- now it is a private datase\n\t\n start_time = time.time()\n print (\"Start Training\")\n \n optimizer.zero_grad()\n \n output = model(data)\n \n # loss = F.nll_loss(output, target) <-- not possible here\n batch_size = output.shape[0]\n loss = ((output - target)**2).sum().refresh()/batch_size\n \n loss.backward()\n \n optimizer.step()\n print (\"Done first step\")\n if batch_idx % args.log_interval == 0:\n loss = loss.get().float_precision()\n print('Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}\\tTime: {:.3f}s'.format(\n epoch, batch_idx * args.batch_size, len(private_train_loader) * args.batch_size,\n 100. * batch_idx / len(private_train_loader), loss.item(), time.time() - start_time))\n\ndef test(args, model, private_test_loader):\n model.eval()\n test_loss = 0\n correct = 0\n with torch.no_grad():\n for data, target in private_test_loader:\n start_time = time.time()\n \n output = model(data)\n pred = output.argmax(dim=1)\n #print(pred)\n #print(target)\n correct += pred.eq(target.argmax(dim=1)).sum()\n\n correct = correct.get().float_precision()\n print('\\nTest set: Accuracy: {}/{} ({:.0f}%)\\n'.format(\n correct.item(), len(private_test_loader)* args.test_batch_size,\n 100. * correct.item() / (len(private_test_loader) * args.test_batch_size)))\n\n#model = Net()\n#model = NeuralNet()\npre_trained_weights = 'pre-trained.h5'\nmodel.load_weights(pre_trained_weights)\nmodel = model.fix_precision().share(*workers, crypto_provider=crypto_provider, requires_grad=True)\noptimizer = optim.SGD(model.parameters(), lr=args.lr)\noptimizer = optimizer.fix_precision() \nfor epoch in range(1, args.epochs + 1):\n train(args, model, private_train_loader, optimizer, epoch)\n test(args, model, private_test_loader)\n\n","sub_path":"examples/tutorials/fine-tune2.py","file_name":"fine-tune2.py","file_ext":"py","file_size_in_byte":7386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"290725803","text":"#no longer used in checkbook app\ndef CSV_check_balance():\n '''\n check_balance() takes no arguments and returns the sum of transactions.csv\n '''\n import csv\n with open(\"transactions.csv\") as file:\n balance = 0\n for row in csv.reader(file):\n balance += float(row[0])\n return balance\n\ndef check_balance(user):\n '''\n select all transaction values from user transactions table and returns sum of those values\n works with sql commands\n '''\n import sqlite3\n conn = sqlite3.connect('checkbook.db')\n c = conn.cursor()\n\n rows = []\n for row in c.execute('SELECT amount FROM '+ user):\n rows.append(row[0])\n return sum(rows)\n\n conn.close()\n\n#no longer used in checkbook app\ndef make_transaction(amount,category,description,timestamp):\n '''\n make_transaction() takes in transaction amount, category and the time the transaction was maded and stores them in transactions.csv\n '''\n import csv\n fields = [amount, category, description, timestamp]\n with open(r'transactions.csv', 'a') as file:\n writer = csv.writer(file)\n writer.writerow(fields)\n pass\n\ndef add_description():\n '''\n Prompts user to add description\n \n returns user input for description if user opts to add one\n\n else returns 'n/a'\n '''\n prompt_input = input(\"Would You Like to Add a Description? (y/n) \")\n while prompt_input.lower() not in ['y','n']:\n prompt_input = input(\"Invalid Response, Please Enter 'y' for Yes or 'n' for No \")\n \n if prompt_input.lower() == 'y':\n return input(\"Please Enter Description. \\n\")\n \n else:\n print('~No Description~')\n return 'n/a'\n \ndef print_table(raw_table):\n '''\n takes in list of tuples (standard output for sql queries) and buids a table around values\n\n row height will increase if description length > 30 characters\n '''\n print ('*------------------------------------------------------------------------*\\n' +\n '|Date |Amount |Category |Description |\\n' +\n '*------------------------------------------------------------------------*')\n for i in raw_table:\n date_col = '|' + date_from_timestamp(float(i[0])) + '|'\n # print((10-len(str(i[1])) *' '))\n amount_col = (10-len(str(i[1]))) * ' ' + str(i[1]) + '|'\n category_col = i[2] + (15-len(i[2])) * ' ' + '|'\n description = string_wrap_text(i[3], 30).split('\\n')\n description_col = description[0] + (30-len(description[0])) * ' ' + '|'\n print(date_col + amount_col + category_col+ description_col )\n #adds extra height to rows for extra lines returned from wrap_text() function\n for i in description[1:]:\n print('| | | |' + i + (30 -len(i)) * ' ' +'|')\n print ('*------------------------------------------------------------------------*')\n\n#no longer used in checkbook app\ndef check_username():\n '''\n\n '''\n import pymysql\n new_or_existing_user = input('Are you an existing user? (y/n) ')\n #**add while to check input later\n if new_or_existing_user.strip().lower() == 'y':\n username = input('please Enter Username\\n')\n #**add while to verify real user\n return username\n else:\n new_username = input('please enter username')\n \n #**add while to check if username already taken\n add_new_user_table(new_username)\n return new_username\n\n#moved to cb_sql_functions.py and referenced there\ndef update_sql_table(time, amount, category, description, username):\n '''\n receives time (as a timestamp), transaction amount, transaction category and description, and the user's username\n uses username to reference user's unique transaction table from sql checkbook.db\n inserts new information row into user's tranaction column\n '''\n import sqlite3\n conn = sqlite3.connect('checkbook.db')\n \n c = conn.cursor()\n\n transaction = (time, amount, category, description,)\n execute_line = \"INSERT INTO \" + username + \" VALUES (?,?,?,?)\"\n c.execute(execute_line, transaction)\n\n conn.commit()\n\n conn.close()\n\n#moved to cb_sql_functions.py and referenced there\ndef add_new_user_table(user):\n '''\n receives new user name and initializes blank transaction table for new user and appends username table to contain new username\n this one is out of date from the used one, lacks commit statement and user password argument\n '''\n import sqlite3\n conn = sqlite3.connect('checkbook.db')\n c = conn.cursor()\n\n c.execute(\"CREATE TABLE \" + user + \" (date text, amount real, Category text, description text)\")\n\n c.execute(\"insert into usernames values(\"+user+\")\")\n\n conn.close()\n\n#moved to cb_sql_functions.py and referenced there\ndef select_all():\n '''\n executes SELECT * sql command on user's transacion table\n '''\n import sqlite3\n conn = sqlite3.connect('checkbook.db')\n c = conn.cursor()\n\n table = []\n for row in c.execute(\"SELECT * FROM dom\"):\n table.append(row)\n conn.close()\n return table\n\n#moved to cb_sql_functions.py and referenced there\ndef search_by_date():\n '''\n Refines transaction table select to dates within 12:00:00am on startdate and 11:59:59pm on end date\n '''\n start_date = input(\"Enter Start Date (MM/DD/YYYY) \")\n end_date = input(\"Enter End Date (MM/DD/YYYY) \")\n date_range = (timestamp_from_date(start_date), timestamp_from_date(end_date) + 86400)\n import sqlite3\n conn = sqlite3.connect('checkbook.db')\n c = conn.cursor()\n\n table = []\n for row in c.execute(\"SELECT * FROM dom WHERE date BETWEEN ? and ?\", date_range):\n table.append(row)\n conn.close()\n return table\n\n#moved to cb_sql_functions.py and referenced there\ndef search_by_category():\n '''\n filters select statement to return either all withdrawals or all deposits\n '''\n choose_category = input(\"Type 'D' to see Deposits and 'W' to see Withdrawals\")\n while True:\n if choose_category.lower() in ['w', 0]:\n category = 'Withdrawal'\n break\n elif choose_category.lower() in ['d', 1]:\n category = 'Deposit'\n break\n else:\n category, choose_category = pick_one(['Deposit', 'Withdrawal'])\n break\n import sqlite3\n conn = sqlite3.connect('checkbook.db')\n c = conn.cursor()\n\n table = []\n for row in c.execute(\"SELECT * FROM dom WHERE Category = ?\", [category]):\n table.append(row)\n conn.close()\n return table\n\n#moved to cb_sql_functions.py and referenced there\ndef search_by_keyword():\n '''\n refines select statement by keyword found in description\n '''\n keyword = input('Enter Keyword ')\n import sqlite3\n conn = sqlite3.connect('checkbook.db')\n c = conn.cursor()\n\n table = []\n for row in c.execute(\"SELECT * FROM dom WHERE description LIKE '%\" +keyword+\"%'\"):\n table.append(row)\n conn.close()\n return table\n\ndef timestamp_from_date(date):\n '''\n receives a date in mm:dd:yyyy format and returns the timestamp of that date\n ''' \n import time\n from datetime import datetime\n timestamp = datetime(int(date[-4:]), int(date[:2]), int(date[3:5]), 0, 0).timestamp()\n return timestamp\n\ndef date_from_timestamp(timestamp):\n '''\n receives a timestamp and returns date in mm/dd/yy format\n '''\n from datetime import date, datetime\n ugly_date = datetime.fromtimestamp(timestamp)\n pretty_date = str(ugly_date)[5:7] + '/' + str(ugly_date)[8:10] + '/' + str(ugly_date)[2:4]\n #print(str(ugly_date)[11:16])\n time = str(ugly_date)[11:16]\n return pretty_date + ' ' + time\n\ndef string_wrap_text(string, width):\n '''\n receives string input and inserts newline characters (\\n) between words when length of line exceedes specified width\n this function can be broken by setting width shorter that longest word in string, so like... don't do that\n\n '''\n word_list = string.split(' ')\n return_string = ''\n running_len = 0\n for i in word_list:\n if len(return_string + i) - running_len < width:\n return_string += (i + ' ')\n else:\n return_string += ('\\n' + i + ' ') \n running_len = len(return_string)-len('\\n' + i + ' ')\n return return_string\n\ndef check_value(value):\n '''\n ensures input is a positive integer <= 50000\n '''\n value = check_valid_number(value)\n while float(value) > 50000 and float(value) >= 0:\n value = input(\"\\nWe Do Not Accept Withdrawls or Deposits \\nof Greater Than 50,000 at a Time, \\n\\nPlease Enter A Value of Up to 50,000.00 \")\n value = check_valid_number(value)\n return value\n\ndef pick_one(choices):\n '''\n when user won't pick an acceptable selection, this function prevents user from getting caught in an infinite loop by providing all options and forcing them to choose one\n function takes in list of choices and displays them on screen where user can use arrow keys to select\n returns name of choice selected as well as choice's index in the input string\n stupid proof solution for those idiot users out there\n '''\n from pick import pick\n ask = \"Sorry, Your Selection is Invalid,\\n\\nPlease Select One of the Following:\"\n selection, index = pick(choices, ask, indicator = '->')\n return selection, str(index + 1)\n\ndef check_valid_number(value):\n '''\n ensures input is a number, will loop infinately untill user inputs a valid number\n tell those users to suck it, they're not breaking your code today!\n '''\n while True:\n try:\n float(value)\n break\n except(ValueError):\n value = input('Please Enter a Valid, Positive Number ')\n continue\n return value\n\ndef check_date(date):\n while True:\n try: \n int(date.replace('/', ''))\n if len(date) == 10:\n break\n else:\n int('a')\n except(ValueError):\n date = input(\"Please Enter Valid Date of format (mm/dd/yyyy): \")\n return date\n","sub_path":"checkbook_functions.py","file_name":"checkbook_functions.py","file_ext":"py","file_size_in_byte":10159,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"275992559","text":"class Solution:\n def numSquares(self, n: int) -> int:\n return bfs(n)\n # return dp(n)\n\n\ndef bfs(n: int) -> int:\n # 204 ms\t14.6 MB\n if n < 2:\n return n\n\n squares = []\n i = 1\n while i ** 2 <= n:\n squares.append(i ** 2)\n i += 1\n\n count = 0\n rest_nums = {n}\n while len(rest_nums) != 0:\n count += 1\n next = set()\n for rest_num in rest_nums:\n for square in squares:\n if rest_num == square:\n return count\n if rest_num < square:\n break\n next.add(rest_num - square)\n rest_nums = next\n\n return count\n\n\ndef dp(n: int) -> int:\n # Time Limit Exceeded\n if n <= 3:\n return n\n\n dp = [0] * (n + 1)\n dp[0] = 0\n dp[1] = 1\n dp[2] = 2\n dp[3] = 3\n for i in range(4, n + 1):\n dp[i] = dp[i - 1] + 1\n j = 1\n while j ** 2 <= i:\n dp[i] = min(dp[i], dp[i - j ** 2] + 1)\n j += 1\n\n return dp[n]\n\n\nif __name__ == '__main__':\n s = Solution()\n # print(s.numSquares(12))\n # print(s.numSquares(13))\n # print(s.numSquares(14))\n print(s.numSquares(255))\n","sub_path":"python/279.py","file_name":"279.py","file_ext":"py","file_size_in_byte":1195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"111995405","text":"__author__ = 'Gabriel'\n'''\nCreate a neural network then train in for the character recognition\nUse pybrain library\n'''\nfrom numpy import genfromtxt\nimport numpy as np\nimport opticalCharacterManipulation\n\n#from sklearn.neural_network import MLPClassifier # not implemented in sci learn version 0.17\nfrom pybrain.structure import FeedForwardNetwork\nfrom pybrain.structure import LinearLayer, SigmoidLayer\nfrom pybrain.structure import FullConnection\n\nfrom pybrain.datasets import ClassificationDataSet\nfrom pybrain.utilities import percentError\nfrom pybrain.supervised.trainers import BackpropTrainer\n\nimport matplotlib.pyplot as plt\nimport time\n\n\ndata = genfromtxt(\"..\\data\\Optical character recognition\\optdigits.tra\", delimiter=',', dtype=None,skip_header=0)\nprint(data.shape)\ndatatest = genfromtxt(\"..\\data\\Optical character recognition\\optdigits.tes\", delimiter=',', dtype=None,skip_header=0)\n\nstart = time.time()\n#Last row is the label (64 features + 1 label)\n\n'''\nImprovement :\nLayer join on subcells above\nThen second hidden layer\nThen result\n'''\n\n(X,Y)= opticalCharacterManipulation.builtXndY(data)\n(Xtest,Ytest)= opticalCharacterManipulation.builtXndY(datatest)\n\n# Generate the neural network model\nnn = FeedForwardNetwork()\ninLayer = LinearLayer(64, name=\"Input\")\nhiddenLayer = SigmoidLayer(50, name=\"First hidden layer\")\noutLayer = LinearLayer(10, name=\"Output\")\n\nnn.addInputModule(inLayer)\nnn.addModule(hiddenLayer)\nnn.addOutputModule(outLayer)\n\nin_to_hidden = FullConnection(inLayer, hiddenLayer,name=\"c1\")\nhidden_to_out = FullConnection(hiddenLayer, outLayer,name=\"c2\")\n\nnn.addConnection(in_to_hidden)\nnn.addConnection(hidden_to_out)\n\nnn.sortModules()\n\nprint(nn)\n#Activate for a particular value\n#print(nn.activate([0,0,5,13,9,1,0,0,0,0,13,15,10,15,5,0,0,3,15,2,0,11,8,0,0,4,12,0,0,8,8,0,0,5,8,0,0,9,8,0,0,4,11,0,1,12,7,0,0,2,14,5,10,12,0,0,0,0,6,13,10,0,0,0]))\n\nalldata = ClassificationDataSet(64,nb_classes=10, class_labels= [str(x) for x in range(0,10)] )\nalldata.setField(\"input\",X)\nalldata.setField(\"target\",Y)\n\nalldatatest = ClassificationDataSet(64,nb_classes=10, class_labels= [str(x) for x in range(0,10)] )\nalldatatest.setField(\"input\",Xtest)\nalldatatest.setField(\"target\",Ytest)\n\nalldata._convertToOneOfMany( )\nalldatatest._convertToOneOfMany( )\n\nprint(\"Number of training patterns: \", len(alldata))\nprint(\"Input and output dimensions: \", alldata.indim, alldata.outdim)\nprint(\"First sample (input, target, class):\")\nprint(alldata['input'][0], alldata['target'][0], alldata['class'][0])\n#normalize data\n\ntrainer = BackpropTrainer( nn, dataset=alldata, learningrate=0.01,momentum=0.1, verbose=False, weightdecay=0.002,batchlearning=False)\n\nx_errors = np.zeros((1,20),dtype=int)\nerrors = np.zeros((2,20),dtype=float)\n\nfor i in range(20):\n trainer.trainEpochs( 1 )\n trnresult = percentError( trainer.testOnClassData(),alldata['class'] )\n tstresult = percentError( trainer.testOnClassData(dataset=alldatatest ), alldatatest['class'] )\n\n x_errors[0,i]= trainer.totalepochs\n errors[0,i]= trnresult\n errors[1,i]= tstresult\n print(\"epoch: %4d\" % trainer.totalepochs, \\\n \" train error: %5.2f%%\" % trnresult, \\\n \" test error: %5.2f%%\" % tstresult)\n\n\n#plt.plot(x_errors[0,:],errors[0,:],'g',x_errors[0,:],errors[1,:],'b')\n#plt.show()\n\n#epoch: 20 train error: 4.34% test error: 6.84%\n#Time elapsed :84.42\n\n#Without normalization of the input values\n#epoch: 20 train error: 6.70% test error: 10.46%\n#Time elapsed :80.95\nend = time.time()\nprint(\"Time elapsed :\"+str(end-start))","sub_path":"NeuralNetworks/NN.py","file_name":"NN.py","file_ext":"py","file_size_in_byte":3564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"163327988","text":"numList = [1,2,3,4,5,6,7,8]\nevenNumList = []\noddNumList = []\n\nfor item in numList:\n if(item % 2 == 0):\n evenNumList.append(item)\n if(item % 2 != 0):\n oddNumList.append(item)\n\nprint(evenNumList)\nprint(oddNumList)\n","sub_path":"Solutions_01/solution_02.py","file_name":"solution_02.py","file_ext":"py","file_size_in_byte":232,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"157870720","text":"\n\n# list of users \n# dictionary ---> key(national code user) , value (instance of user class)\nListUsers = {}\n\n\n# list of employees\n# dictionary ---> key(national code employee) , value (instance of Employee class)\nListEmployees = {}\n\n\n# list of banks \n# dictionary ---> key(bank name) , value(instance of Bank class)\nListBanks = {}\n\n# tuple(not change value) of banks name\nListBanksName = ('Melli' ,'Mellat' ,'Sepah','Industry and Mine','Tosee Taavon','Saderat','Keshavarzi','Maskan','Post Bank')\n\n# tuple(not change value) of binary equivalent banks name\nListBinaryBanksName = ('0001','0010','0011','0100','0101','0110','0111','1000','1001')\n\n# tuple(not change value) of accounts type\nListAccountsType = ('Savings' , 'Current','Long Term Investments' , 'Short Term Investments')\n\n# tuple(not change value) of binary equivalent accounts name\nListBinaryAccountsType = ('00','01','10','11')\n\n\n","sub_path":"data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"539296821","text":"import csv\nfrom tqdm import tqdm\nimport argparse\n\npar = argparse.ArgumentParser()\npar.add_argument(\"-d\", \"--data_path\", default='test',\n type=str, help=\"Please enter data path\")\nargs = par.parse_args()\n\nwith open(args.data_path + '.csv','r') as f:\n rdr = csv.reader(f)\n\n insert_tok = list(range(1,422))\n max_length = 400\n\n with open(args.data_path + '_2.csv', 'w', newline='') as new_f:\n wr = csv.writer(new_f)\n for i, line in tqdm(enumerate(rdr)):\n if i == 0:\n line.append('pos')\n line.append('sync_pos')\n wr.writerow(line)\n continue\n pos = list()\n sync_pos = list()\n target = line[2].split()\n length = len(line[1].split())\n current_length = len(line[1].split())\n for m in range(max_length):\n if length - m > 0:\n pos.append(str(length - m))\n else:\n pos.append(str(0))\n if current_length <= 0:\n sync_pos.append(str(0))\n else:\n if int(target[m]) not in insert_tok:\n current_length -= 1\n sync_pos.append(str(current_length+1))\n\n line.append(' '.join(pos))\n line.append(' '.join(sync_pos))\n wr.writerow(line)\n","sub_path":"data/deepfix_raw_data/add_pos.py","file_name":"add_pos.py","file_ext":"py","file_size_in_byte":1396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"587333139","text":"import requests\r\nimport re\r\nimport urllib3\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.image as img\r\nfrom math import *\r\nfrom PIL import Image\r\nfrom io import BytesIO\r\n\r\npage = requests.get('https://www.go-tel.si/instrukcije/instruktorji.php').text\r\ntabela = re.split(r'
', page)[2]\r\ninstruktorji = re.split(r'
\\w+\\s*', s)\r\n if len(vrstica) == 0: continue\r\n ime = re.match(r'class=\"nondecor\">(\\w+)\\s*', vrstica[0]).group(1)\r\n podatki = re.search(r'.*razpolozljivost=\"(\\d+)\".*reference=\"(\\d+)\".*razdalja=\"(\\d+)\".*starost=\"(\\d+)\".*href=\"(\\d+)-(\\w+)\".*src=\"images/Instruktor(\\w+).(\\w+)\".*', s, re.S)\r\n if podatki is None: continue\r\n razpolozljivost = podatki.group(1)\r\n reference = podatki.group(2)\r\n razdalja = podatki.group(3)\r\n starost = podatki.group(4)\r\n koda = podatki.group(5)+'-'+podatki.group(6)\r\n urlslika = 'images/Instruktor'+podatki.group(7)+'.'+podatki.group(8)\r\n #slika = requests.get('https://www.go-tel.si/instrukcije/'+urlslika).text\r\n\r\n '''Za vsazga instruktorja posebi'''\r\n instr = requests.get('https://www.go-tel.si/instrukcije/'+ koda).text\r\n cenapod = re.search(r'.*onchange=\"cena\\(\\'(\\d+)\\'\\).*', instr)\r\n cena = cenapod.group(1)\r\n krajpod = re.search(r'.*naslov=\".+, (.+)\"', instr)\r\n kraj = krajpod.group(1)\r\n urepod = re.search(r'.*

(\\d+) ur

.*', instr)\r\n ure = urepod.group(1)\r\n ucencipod = re.search(r'.*

(\\d+)\\+.*', instr)\r\n ucenci = ucencipod.group(1)\r\n predmetipod = re.search(r'.*content=.+predmetov\\:(.+)Individualno.*', instr)\r\n predmetipod1 = predmetipod.group(1)\r\n predmeti = []\r\n \r\n if 'v krajih' in predmetipod1:\r\n pred, _ = predmetipod1.split('v krajih')\r\n else:\r\n pred = predmetipod1\r\n pred = pred.strip()\r\n\r\n predmeti = pred.split(', ')\r\n \r\n f = open('instruktorji.txt', 'a')\r\n f.write(ime+'-'+kraj+'-'+str(predmeti)+'-'+reference+'-'+razpolozljivost+'-'+ucenci+'-'+ure+'\\n')\r\n f.close\r\n\r\n \r\n## img = open('instruktor'+str(i)+'.png', 'wb')\r\n## img.write(slika)\r\n with open('instruktor'+str(i)+'.png', 'wb') as handle:\r\n response = requests.get('https://www.go-tel.si/instrukcije/'+urlslika, stream=True)\r\n\r\n if not response.ok:\r\n print(response)\r\n\r\n for block in response.iter_content(1024):\r\n if not block:\r\n break\r\n\r\n handle.write(block)\r\n i += 1\r\n\r\n\r\n \r\n","sub_path":"Projekt/podatki_instrutorjev.py","file_name":"podatki_instrutorjev.py","file_ext":"py","file_size_in_byte":2605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"112436321","text":"from web3 import shh\nfrom web3 import HTTPProvider, Web3\nstatus_host = 'http://localhost:8080'\nprivatekey = '0x3DB531D13A0AAD9B47689778B29686501B5E383D4CFE4099EFCC9BCEB2BEC144'\nconnect = Web3(HTTPProvider(status_host))\nprint('connect status ===> ', connect.isConnected())\nsh_con = shh.Shh(connect)\nprint('info ===>>>> ', sh_con.info)\nid = sh_con.addPrivateKey(key=privatekey)\nprint('id ===>>>> ', id)\nuser_pubkey = sh_con.getPublicKey(id)\nprint('user_pubkey ===>>> ', user_pubkey)\ntopic = Web3.toHex(b'ESSC')\nprint(\"topic => \", topic)\ntext = b'First test'\nsumkey = sh_con.addSymKey(privatekey)\nprint('sumkey ====>>>> ', sumkey)\naddress_to = '0x043c03b3b0a0cd00...'\nmes_send = sh_con.post(\n {\n 'ttl': 40,\n 'powTarget': 11,\n 'powTime': 5,\n 'payload': Web3.toHex(text),\n 'topic': topic,\n 'pubKey': address_to,\n 'sig': user_pubkey,\n }\n)\nif mes_send:\n print('Message Send')\nelse:\n print('Message not send')","sub_path":"John_Jay_Daap1/test3.py","file_name":"test3.py","file_ext":"py","file_size_in_byte":962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"324540509","text":"#!/usr/bin/env python3\n# Should work fine with python2 as well, but testing and development was done in 3.\n\n# Q1c\n# This one is a self-imposed bonus challenge.\n# Tried to figure out another way to to do q1 without using for loops.\n# Did this one using NumPy and vectorization.\n\n# Given a list of positive numbers, write a function that prints the following\n# \"fizz\" if number is divisible by 3\n# \"buzz\" if number is divisible by 5\n# \"fizzbuzz\" if number is divisible by 3 and 5\n\n\nimport numpy as np # This forms the core of what's doing the work.\n # Also, let's be frank, there are probably some for loops happening in the numpy methods I'm using.\n # But at least they're hidden in the shadows and this miiiiiiiight go faster on gigantic numberlists.\n\n\n# This is the function that does the actual work.\n# Different from answer in Q1a and Q1b in that this is doing it via NumPy arrays.\n# INPUT: a list of integer numbers.\n# Input list can contain any quantity of entries, from en empty list to arbitrarily large.\n# There is no error checking on the input. If strings or floats are passed, bad things might happen.\n# Similarly, non-positive integers will be weird, albeit not necessarily function-breaking.\n# OUTPUT: returns a list of \"fizz\", \"buzz\", and \"fizzbuzz\" depending on input list of numbers.\ndef numpy_fizzbuzz_translator(list_of_numbers):\n # We need to create a numpy array from the original list. Because these are just standard numbers, it's easy.\n array_data = np.array(list_of_numbers) # Create an array of the input to figure out where we 3 and 5 divisibility.\n mod3_array = np.mod(array_data, 3) # Returns a (mod 3) array of array_data (e.g. [1 2 3 4 5] --> [1 2 0 1 2])\n mod5_array = np.mod(array_data, 5) # Returns a (mod 5) array of array_data (e.g. [1 2 3 4 5] --> [1 2 3 4 0])\n # Note that we do not need a (mod 15) version.\n # Because we have both the mod3 and mod5 arrays, we know that 15 divisibility occurs where they \"overlap\".\n\n # More setup.\n # We need arrays of strings to build off of. One for doing nothing (''), one for fizz, and one for buzz.\n # For each of the below, we are making an array that is as long as our original list_of_numbers, then filling it.\n # The 'dtype' statement is very important: it declares that we're working with strings of given lengths.\n # Since these are all simple ASCII, fizz and buzz are both at length 4, while the empties just take 1.\n # Note that these strings are all bytestrings, so we will eventually want to undo that down the road.\n empties_array = np.full(len(list_of_numbers), fill_value='', dtype='S1') # Match list_of_numbers length, all empty strings.\n # Be careful to not use np.empty -- it does not set the values in memory, just allocates.\n # This will break things, so we have to make sure the values get set to the empty string ourselves.\n array_of_all_fizz = np.full(len(list_of_numbers), fill_value='fizz', dtype='S4') # All 'fizz' strings.\n array_of_all_buzz = np.full(len(list_of_numbers), fill_value='buzz', dtype='S4') # All 'buzz' strings.\n\n # We can now use these arrays to do some conditionals based on divisibility.\n # For both of the below, we use np.where(condition, TrueThings, FalseThings) for slotting in based on divisibility.\n # The mod#_array goes in the first argument to determine which array we pull from to slot that location.\n # Because 0's (implying divisibility) are False, we put the word ('fizz'/'buzz') to slot in the third argument.\n # Anything other than 0 (implying non-divisibility) are True, we put the empty strings in the second argument.\n slot_in_fizz = np.where(mod3_array, empties_array, array_of_all_fizz) # Puts a 'fizz' at 3 divisible, '' otherwise.\n slot_in_buzz = np.where(mod5_array, empties_array, array_of_all_buzz) # Puts a 'buzz' at 5 divisible, '' otherwise.\n\n # Because 15 is divisible by 3 and 5, we can avoid having to slot in 'fizzbuzz' because of concatenation.\n # Note that 'fizzbuzz' is just 'fizz' + 'buzz', so if we just concatenate all of our strings from both arrays,\n # we'll have a fully slotted array with 'fizz', 'buzz', 'fizzbuzz', and empty strings '' in correct spots.\n # To use concatenation, we need to change the type of these arrays to np.char.array (not standard for np.array).\n fully_slotted_array = np.char.array(slot_in_fizz) + np.char.array(slot_in_buzz) # np.char.array enables concat +\n # Additionally, because we declare them as np.char.arrays above, we no longer have the length restriction of 'S4'.\n # That's important because otherwise it would truncate our 'fizzbuzz'.\n\n # Also, because we're now in the world of np.char.array, we can do standard Python string methods like .decode()\n fully_slotted_array = fully_slotted_array.decode('utf-8') # Back to the ordinary world of text. Byte strings yucky.\n\n # Finally, we need to get rid of all those empty strings from our array so we can spit it out as a list and finish.\n # We'll do this by finding the locations of all empty strings, then creating a new array with them deleted.\n\n # Below conditional is just an edge case dodge: NumPy gets grumpy if we do comparisons on an empty array.\n if len(fully_slotted_array) > 0:\n empty_locations = np.argwhere(fully_slotted_array == '') # Array of all locations where we have empty strings.\n finished_array = np.delete(fully_slotted_array, empty_locations) # Arrays non-mutable, so declare new var.\n else: # If the full_slotted_array was empty, we just need to pass it along.\n finished_array = fully_slotted_array # This could also be a return of []. But let's not break the flow.\n\n # Finally, we need to convert back from NumPy array into standard Python list land. We do this as we return.\n return finished_array.tolist() # Hand back the finished product as a list.\n\n\ntest = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22]\n\nprint(\"Running NumPy (no loop) version on this input:\", test)\n\nprint(\"Our output:\", numpy_fizzbuzz_translator(test))\n","sub_path":"q1/q1c.py","file_name":"q1c.py","file_ext":"py","file_size_in_byte":6083,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"553639311","text":"from matplotlib import pyplot as plt\nimport numpy as np\n\nx = np.arange(1, 20)\ny = x * 2\ny2 = [i*1.5 for i in y]\ny3 = [i*1.7 for i in y2]\ny4 = [i*1.7 for i in y3]\n\nflg, axs = plt.subplots(2, 2, figsize=(12, 7))\naxs[0, 0].plot(x, y, 'r--')\naxs[0, 1].plot(x, y2, 'b:')\naxs[1, 0].plot(x, y3, 'g-.')\n\nplt.title(label='my title')\naxs[1, 1].plot(x, y4, 'rx')\nplt.show()","sub_path":"matplotlib/2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"152266858","text":"from django.utils.http import is_safe_url, urlunquote\n\n\ndef get_next_url(request):\n next = request.META.get('HTTP_REFERER')\n if next:\n next = urlunquote(next) # HTTP_REFERER may be encoded.\n if not is_safe_url(url=next, host=request.get_host()):\n next = '/'\n return next\n","sub_path":"InformationSystem/Altai/special_func.py","file_name":"special_func.py","file_ext":"py","file_size_in_byte":298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"209869819","text":"import os\nimport sys\nimport torch\n\nfrom pytorch3d.datasets import (\n R2N2,\n ShapeNetCore,\n collate_batched_meshes,\n render_cubified_voxels,\n)\n\nfrom pytorch3d.structures import Meshes\nfrom torch.utils.data import DataLoader\n\n# add path for demo utils functions\nsys.path.append(os.path.abspath(''))\n\n# Setup\nif torch.cuda.is_available():\n device = torch.device(\"cuda:0\")\n torch.cuda.set_device(device)\nelse:\n device = torch.device(\"cpu\")\n\nSHAPENET_PATH = \"/Data/leo/Pixel2Mesh_3d/dataset/ShapeNetCore.v2\"\nshapenet_dataset = ShapeNetCore(SHAPENET_PATH, version=2, synsets=[\"airplane\"])\n\nshapenet_loader = DataLoader(shapenet_dataset, batch_size=12, collate_fn=collate_batched_meshes)\n\nshapenet_batch = next(iter(shapenet_loader))\n\nprint(shapenet_batch[\"mesh\"].verts_packed().shape)\nprint(shapenet_batch[\"mesh\"].edges_packed().shape)","sub_path":"data_loading/batching.py","file_name":"batching.py","file_ext":"py","file_size_in_byte":850,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"51289336","text":"from MyMaths import Polynomial, Term\nimport matplotlib.pyplot as plt\n\nclass Function(Polynomial):\n def __init__(self, range, terms = []):\n super().__init__(terms)\n self.range = range\n \n def isinrange(self, x):\n if x>=self.range[0] and x<=self.range[1]:\n return True\n else:\n return False\n \n def sub(self, x):\n if self.isinrange:\n ret = 0\n for term in self.terms:\n ret +=term.sub(x)\n return ret\n \n else:\n return 0\n \n def overlaps(self, other):\n if self.range[1] - other.range[0] <0:\n return True\n \n return False\n\n \n\nclass ListOfFunctions:\n def __init__(self):\n self.functions = []\n \n def addfunction(self, function):\n #check if range overlaps\n overlap = False\n for afunction in self.functions:\n if function.overlaps(afunction):\n overlap = True\n \n if not overlap:\n self.functions.append(function)\n \n def sub(self, x):\n for afunction in self.functions:\n if afunction.isinrange(x):\n return afunction.sub(x)\n\n raise ValueError('Input Not In Range')\n\n def plot(self, a, b):\n datax =[]\n datay =[]\n x=a\n while x<=b:\n datax.append(x)\n datay.append(self.sub(x))\n x+=0.01\n plt.plot(datax, datay, label = str(self))\n \n def findlocalmax(self, x):\n if self.sub(x+1)>self.sub(x) and self.sub(x-1)self.sub(x):\n return self.findlocalmax(x-1)\n if self.sub(x+1)<=self.sub(x) and self.sub(x-1)<=self.sub(x):\n return x\n \n\n \n \n\n\n\n\n\nif __name__ == \"__main__\":\n alist = ListOfFunctions()\n poly = Polynomial(Polynomial.parser(\"2x^2+5x+-240\"))\n \n alist.addfunction(Function((0, 10), Polynomial.parser(\"2x^2+5x+-240\")))\n alist.addfunction(Function((10,15), Polynomial.parser(\"x\")))\n alist.addfunction(Function((15,22), Polynomial.parser(\"x^2+-210\")))\n alist.plot(0,22)\n\n plt.show()\n\n \n\n \n \n\n \n \n \n\n \n","sub_path":"scripts/python_scripts/Function.py","file_name":"Function.py","file_ext":"py","file_size_in_byte":2311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"593425918","text":"from dotenv import load_dotenv\nfrom bs4 import BeautifulSoup\nimport requests\nimport json\nimport time\nimport sys\nimport os\nload_dotenv()\nsys.path.append(os.path.abspath(os.getenv(\"system_path\")))\nfrom lib import rabbit_mq, selenium\n\ntry:\n connection = rabbit_mq.create_connection()\n channel = connection.channel()\n\n channel.queue_declare(queue='quoraE_fetch_job_details', durable=True) \n channel.queue_declare(queue='mongoE_jobs_save', durable=True)\n\n count = 0\n\n def callback(ch, method, properties, body):\n global count\n count += 1\n print(\"count::\", count)\n # print(\" [x] Received %r\" % json.loads(body))\n job = json.loads(body)\n print(\"\\n\\n\\njob url::\", job['job_link'])\n driver = selenium.driver()\n try:\n driver.get(job['job_link'])\n except Exception as e:\n print('selenium exception...........continuing next iteration.........')\n return\n time.sleep(10)\n page = driver.page_source\n soup = BeautifulSoup(page, 'lxml')\n driver.close()\n # print(\"soup::\", soup)\n print(\"Creating job data...\")\n try:\n job['question'] = soup.find(\"div\", class_=\"q-inline\").getText() if soup.find(\"div\", class_=\"q-inline\") else ''\n #job['company'] = soup.find(\"div\", class_=\"pipeline-desc\").get(\"a\") if soup.find(\"div\", class_=\"pipeline-desc\") else ''\n #job['location'] = \"Remote\"\n answer = soup.find_all(\"div\", class_=\"q-relative\")if soup.find(\"div\", class_=\"q-relative\") else None\n job['answer'] = ''\n i = 0\n for item in answer:\n i += 1\n if i == 3 or i == 5 or i == 7:\n job['answer'] = job['answer'] + ' ' + item.getText().strip()\n job['answer'] = job['answer'].strip()\n job['source'] = 'Quora'\n print(\"job::\", job)\n \n channel.basic_publish(exchange='', routing_key='mongoE_jobs_save', body=json.dumps(job))\n except Exception as e:\n print(\"Quora...Unable to parse html::\", e)\n\n # channel.basic_consume(callback, 'remote_fetch_job_details', no_ack=True)\n channel.basic_consume(\n queue='quoraE_fetch_job_details', on_message_callback=callback, auto_ack=False)\n\n print(' [*] Waiting for messages. To exit press CTRL+C')\n channel.start_consuming()\n\nexcept Exception as e:\n error = {\n \"status\": \"Quora......... Error occured while fetching job details\",\n \"errorMsg\": e\n }\n print(\"Error: \",error)","sub_path":"crawler/quora/product_designer-e/fetch_job_detail.py","file_name":"fetch_job_detail.py","file_ext":"py","file_size_in_byte":2578,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"505169655","text":"import pandas as pd\nimport lightgbm as lgb\n\nimport validation\nimport data\nimport paths\n\nfrom lgbm_util import LgbmAdapter\nfrom models.learning_rate import *\n\nfrom scipy.stats import randint as sp_randint\nfrom scipy.stats import uniform as sp_uniform\nfrom sklearn.model_selection import RandomizedSearchCV, GridSearchCV\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.model_selection import StratifiedKFold\nfrom sklearn.metrics import roc_auc_score, roc_curve\n\nfrom lightgbm import LGBMClassifier\n\n\ndef name():\n return 'layer_1_model_lgbm_v4'\n\n\n# https://www.kaggle.com/mlisovyi/lightgbm-hyperparameter-optimisation-lb-0-761\ndef params_optimize(x_train, y_train):\n x_train, x_test, y_train, y_test = train_test_split(x_train, y_train, test_size=0.10, stratify=y_train)\n fit_params = {\"early_stopping_rounds\": 30,\n \"eval_metric\": 'auc',\n \"eval_set\": [(x_test, y_test)],\n 'eval_names': ['valid'],\n # 'callbacks': [lgb.reset_parameter(learning_rate=learning_rate_010_decay_power_099)],\n 'verbose': 100,\n 'categorical_feature': 'auto'}\n\n param_test = {'num_leaves': sp_randint(6, 50),\n 'min_child_samples': sp_randint(100, 500),\n 'min_child_weight': [1e-5, 1e-3, 1e-2, 1e-1, 1, 1e1, 1e2, 1e3, 1e4],\n 'subsample': sp_uniform(loc=0.2, scale=0.9),\n 'colsample_bytree': sp_uniform(loc=0.4, scale=0.6),\n 'reg_alpha': [0, 1e-1, 1, 2, 5, 7, 10, 50, 100],\n 'reg_lambda': [0, 1e-1, 1, 5, 10, 20, 50, 100]}\n hp_points_to_test = 200\n clf = lgb.LGBMClassifier(max_depth=-1, silent=True, metric='None', n_jobs=4, n_estimators=10000)\n clf = LGBMClassifier(nthread=4, n_estimators=10000, learning_rate=0.02, num_leaves=34, colsample_bytree=0.9497036,\n subsample=0.8715623, max_depth=8, reg_alpha=0.041545473, reg_lambda=0.0735294,\n min_split_gain=0.0222415, min_child_weight=39.3259775, silent=-1, verbose=-1, )\n\n gs = RandomizedSearchCV(estimator=clf, param_distributions=param_test, n_iter=hp_points_to_test, scoring='roc_auc',\n cv=5, refit=True, verbose=True)\n\n gs.fit(x_train, y_train, **fit_params)\n print('Best score reached: {} with params: {} '.format(gs.best_score_, gs.best_params_))\n\n\ndef run(train_df, test_df):\n features_to_drop = ['TARGET', 'TRAIN', 'SK_ID_CURR', 'SK_ID_BUREAU', 'SK_ID_PREV', 'index']\n '''\n # removendo features com correlacao 1.0\n # layer_1_model_lgbm_v3 [ 0.74027767 0.74787884 0.74235477 0.75003905 0.75075121] - LB 0.741\n 'CC_NAME_CONTRACT_STATUS_Refused_MAX', 'PREV_NAME_CONTRACT_STATUS_Unused offer_MEAN',\n 'CC_NAME_CONTRACT_STATUS_Refused_MEAN', 'CC_NAME_CONTRACT_STATUS_Sent proposal_MEAN',\n 'CC_NAME_CONTRACT_STATUS_Sent proposal_MAX', 'NEW_ANNUITY_TO_INCOME_RATIO',\n 'APPROVED_AMT_APPLICATION_MAX', 'PREV_NAME_CONTRACT_TYPE_XNA_MEAN', 'MEAN_DAYS_ENDDATE_DIFF',\n 'NEW_EMPLOY_TO_BIRTH_RATIO', 'POS_MONTHS_BALANCE_SIZE', 'CC_AMT_RECIVABLE_MIN'\n ]\n '''\n features = list(set(train_df.columns) - set(features_to_drop))\n\n x_train, y_train = data.extract_values_from_dataframe(train_df, features)\n # params_optimize(x_train, y_train)\n # return\n\n dataset = lgb.Dataset(x_train, y_train)\n\n params = {'n_estimators': 50000, 'learning_rate': 1, 'num_leaves': 20, 'colsample_bytree': 0.64331178360329422,\n 'subsample': 0.89456134209978089, 'max_depth': 8, 'reg_alpha': 10, 'reg_lambda': 10,\n 'min_split_gain': 0.0222415, 'min_child_weight': 1e-05, 'objective': 'binary', 'is_unbalance': True,\n 'min_child_samples': 224}\n\n # RandomGridSearch best params 0.7662859436419786\n # {'colsample_bytree': 0.64331178360329422, 'min_child_samples': 224, 'min_child_weight': 1e-05, 'num_leaves': 20, 'reg_alpha': 10, 'reg_lambda': 10, 'subsample': 0.89456134209978089}\n\n classifier = LGBMClassifier(nthread=4, n_estimators=10000, learning_rate=0.02, num_leaves=34,\n colsample_bytree=0.9497036, subsample=0.8715623, max_depth=8, reg_alpha=0.041545473,\n reg_lambda=0.0735294, min_split_gain=0.0222415, min_child_weight=39.3259775,\n silent=-1, verbose=-1, )\n train_pred, test_pred, cross_scores = validation.cross_val_predict(classifier, train_df, test_df, features,\n use_proba=True)\n\n return cross_scores, \\\n pd.DataFrame(data={'SK_ID_CURR': train_df[\"SK_ID_CURR\"].values, 'TARGET': train_pred}), \\\n pd.DataFrame(data={'SK_ID_CURR': test_df[\"SK_ID_CURR\"].values, 'TARGET': test_pred})\n\n\nif __name__ == '__main__':\n import numpy as np\n\n np.random.seed(1985)\n\n train_predict_df, test_predict_df = data.load_dataset()\n scores, train_predict_df, test_predict_df = run(train_predict_df, test_predict_df)\n\n data.save_submission(train_predict_df, name(), 'train', scores)\n data.save_submission(test_predict_df, name(), 'test', scores)\n","sub_path":"models/layer_1_model_lgbm_v4.py","file_name":"layer_1_model_lgbm_v4.py","file_ext":"py","file_size_in_byte":5320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"626922245","text":"import subprocess\nimport sys\n\n\ndef buy_bitcoin(cash, coins, price):\n \"\"\"\n Inputs:\n cash (int): amount of cash on hand at start of day\n coins (int): number of coins on hand at start of day\n price (int): price of Bitcoin on a given day\n Outputs:\n cash (int): amount of cash on hand at end of day\n coins (int): number of coins on hand at end of day\n \"\"\"\n price = int(price)\n if coins + int(cash/price) > 100000:\n coins_bought = 100000 - coins\n cash = cash - coins_bought*price\n else:\n coins += int(cash/price)\n cash = cash % price\n return coins, cash\n\n\ndef sell_bitcoin(cash, coins, price):\n \"\"\"\n Inputs:\n cash (int): amount of cash on hand at start of day\n coins (int): number of coins on hand at start of day\n price (int): price of Bitcoin on a given day\n Outputs:\n cash (int): amount of cash on hand at end of day\n coins (int): number of coins on hand at end of day\n \"\"\"\n price = int(price)\n cash = cash + coins*price\n coins = 0\n return coins, cash\n\n\ndef buy_sell(start_cash, daily_prices):\n \"\"\"\n Inputs:\n start_cash (int): amount of cash that you start the time period with\n daily_prices (list): list of daily Bitcoin prices\n Outputs:\n cash (int): max amount of cash at the end of the time period\n \"\"\"\n cash = start_cash\n coins = 0\n for i in range(len(daily_prices)-1):\n price_today = int(daily_prices[i])\n price_tomorrow = int(daily_prices[i+1])\n if price_tomorrow > price_today:\n coins, cash = buy_bitcoin(cash, coins, price_today)\n else:\n coins, cash = sell_bitcoin(cash, coins, price_today)\n coins, cash = sell_bitcoin(cash, coins, daily_prices[-1])\n return int(cash)\n\nif __name__ == \"__main__\":\n starting_cash = 100\n all_prices = sys.stdin.readlines()[1:]\n print(buy_sell(starting_cash, all_prices))\n","sub_path":"lazy/lazy_crypto.py","file_name":"lazy_crypto.py","file_ext":"py","file_size_in_byte":1959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"558363307","text":"# -*- coding:utf-8 -*-\nfrom __future__ import division\nimport random\nimport cv2\nimport os\nimport re\nimport numpy as np\nimport pandas as pd\nfrom nltk.corpus import stopwords\nimport keras.backend as K\nfrom scipy import interp\nfrom time import sleep\nfrom matplotlib import pyplot as plt\nfrom sklearn.metrics import accuracy_score, precision_recall_fscore_support, confusion_matrix, roc_curve, auc\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import LabelEncoder\nfrom keras.models import Model, load_model\nfrom keras.callbacks import ModelCheckpoint, EarlyStopping\nfrom keras.utils import to_categorical\nfrom keras.preprocessing.text import Tokenizer\nfrom keras.preprocessing.sequence import pad_sequences\nfrom keras.layers.core import Reshape\nfrom keras.layers.normalization import BatchNormalization\nfrom keras.layers import Flatten, Dense, Dropout, Lambda, Input, Embedding, Permute, RepeatVector, TimeDistributed\nfrom keras.layers import Conv2D, MaxPooling2D, ZeroPadding2D, AveragePooling2D, Convolution2D\nfrom keras.layers import Conv1D, MaxPooling1D, ZeroPadding1D, AveragePooling1D\nfrom keras.layers import LSTM, GRU, Bidirectional\nfrom keras.layers import GlobalMaxPooling2D, GlobalAveragePooling2D, GlobalAveragePooling1D\nfrom keras.layers import Activation, initializers\nfrom keras.layers import concatenate, merge\nfrom keras.optimizers import Nadam, Adadelta, Adam\nfrom keras.regularizers import l2\n# from attention_context import AttentionWithContext\nrandom.seed(2018)\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\n\n\nclass TextModel:\n def __init__(self, N_CLASSES, MAX_TEXT, MAX_ITEM_DESC_SEQ):\n self.N_CLASSES = N_CLASSES\n self.MAX_TEXT = MAX_TEXT\n self.MAX_ITEM_DESC_SEQ = MAX_ITEM_DESC_SEQ\n self.encoder_units = 16\n self.decoder_units = 16\n self.emb_size = 40\n self.lr = 0.001\n self.dropout = 0.2\n\n @staticmethod\n def conv1d_bn(x, filters, width, padding='same', strides=1):\n if K.image_data_format() == 'channels_first':\n bn_axis = 1\n else:\n bn_axis = 3\n x = Conv1D(filters, width, strides=strides, padding=padding)(x)\n # x = BatchNormalization(axis=bn_axis, scale=False)(x)\n x = Activation('relu')(x)\n return x\n\n def branch_cnn(self, emb):\n\n m_cnn_1 = self.conv1d_bn(emb, 64, 3, padding='same')\n m_cnn_2 = self.conv1d_bn(emb, 32, 3, padding='same')\n m_cnn_2 = self.conv1d_bn(m_cnn_2, 128, 3, padding='same')\n m_cnn_3 = self.conv1d_bn(emb, 128, 3, padding='same')\n m_cnn_3 = self.conv1d_bn(m_cnn_3, 64, 1, padding='same')\n m_cnn = concatenate([m_cnn_1, m_cnn_2, m_cnn_3])\n m_cnn = MaxPooling1D(pool_size=2, padding='valid')(m_cnn)\n m_cnn = self.conv1d_bn(m_cnn, 64, 3, padding='same')\n m_cnn = MaxPooling1D(pool_size=2, padding='valid')(m_cnn)\n m_cnn = Flatten()(m_cnn)\n return m_cnn\n\n # def branch_bilstm_am(self, emb):\n # \"\"\"\n # attention model\n # \"\"\"\n # # m_lstm = LSTM(self.encoder_units, return_sequences=True, trainable=True)(emb)\n # m_lstm = Bidirectional(LSTM(self.encoder_units, return_sequences=True, trainable=True))(emb)\n # attention = AttentionWithContext()(m_lstm)\n #\n # return attention\n\n def branch_cnn_am1(self, q1, q2, X_train_q1, X_train_q2):\n emb_layer = Embedding(self.MAX_TEXT, self.emb_size, trainable=True)\n emb_q1 = emb_layer(q1)\n emb_q2 = emb_layer(q2)\n\n match_score = self.MatchScore(emb_q1, emb_q2, mode='cos')\n attention_left = TimeDistributed(Dense(self.emb_size, activation=\"tanh\"), input_shape=(X_train_q1.shape[1], X_train_q2.shape[1]))(match_score)\n match_score_t = Permute((2, 1))(match_score)\n attention_right = TimeDistributed(Dense(self.emb_size, activation=\"tanh\"), input_shape=(X_train_q2.shape[1], X_train_q1.shape[1]))(match_score_t)\n\n left_reshape = Reshape((1, attention_left._keras_shape[1], attention_left._keras_shape[2]))\n attention_left = left_reshape(attention_left)\n emb_q1 = left_reshape(emb_q1)\n\n right_reshape = Reshape((1, attention_right._keras_shape[1], attention_right._keras_shape[2]))\n attention_right = right_reshape(attention_right)\n emb_q2 = right_reshape(emb_q2)\n\n emb_q1 = merge([emb_q1, attention_left], mode=\"concat\", concat_axis=1)\n emb_q2 = merge([emb_q2, attention_right], mode=\"concat\", concat_axis=1)\n\n left_embed_padded = ZeroPadding2D((int(3 / 2), 0))(emb_q1)\n right_embed_padded = ZeroPadding2D((int(3 / 2), 0))(emb_q2)\n\n conv_left = Conv2D(filters=64, kernel_size=(3, self.emb_size), activation=\"tanh\", padding=\"valid\")(left_embed_padded)\n conv_left = (Reshape((conv_left._keras_shape[1], conv_left._keras_shape[2])))(conv_left)\n conv_left = AveragePooling1D(pool_size=3, strides=1, padding='same')(conv_left)\n\n # text 1d convolution\n conv_left = Conv1D(128, 3, strides=1, padding='valid')(conv_left)\n conv_left = Activation('relu')(conv_left)\n conv_left = MaxPooling1D(pool_size=2)(conv_left)\n conv_left = Dropout(0.2)(conv_left)\n\n conv_left = Conv1D(32, 3, strides=1, padding='valid')(conv_left)\n conv_left = Activation('relu')(conv_left)\n conv_left = MaxPooling1D(pool_size=2)(conv_left)\n\n # conv_right\n conv_right = Conv2D(filters=64, kernel_size=(3, self.emb_size), activation=\"tanh\", padding=\"valid\")(right_embed_padded)\n conv_right = (Reshape((conv_right._keras_shape[1], conv_right._keras_shape[2])))(conv_right)\n conv_right = AveragePooling1D(pool_size=3, strides=1, padding='same')(conv_right)\n\n conv_right = Conv1D(128, 3, strides=1, padding='valid', activation='relu')(conv_right)\n conv_right = MaxPooling1D(pool_size=2)(conv_right)\n conv_right = Dropout(0.2)(conv_right)\n\n conv_right = Conv1D(32, 3, strides=1, padding='valid', activation='relu')(conv_right)\n conv_right = MaxPooling1D(pool_size=2)(conv_right)\n\n cnn = concatenate([conv_left, conv_right])\n return cnn\n\n def build_cnn_lstm_am(self, X_train):\n sentimenttext = Input(shape=[X_train.shape[1]], name=\"seq_sentimenttext\")\n emb_sentimenttext = Embedding(self.MAX_TEXT, self.emb_size, trainable=True)(sentimenttext)\n\n m_cnn = self.branch_cnn(emb_sentimenttext)\n m_lstm_am = self.branch_bilstm_am(emb_sentimenttext)\n m_sent_representation = concatenate([m_cnn, m_lstm_am])\n\n fc = Dense(128, activation='relu')(m_sent_representation)\n fc = Dropout(0.2)(fc)\n fc = Dense(64, activation='relu')(fc)\n fc = Dropout(0.2)(fc)\n\n output = Dense(self.N_CLASSES, activation='softmax')(fc)\n\n model = Model([sentimenttext], output)\n print(model.summary())\n return model\n\n def compute_euclidean_match_score(self, l_r):\n l, r = l_r\n denominator = 1. + K.sqrt(\n -2 * K.batch_dot(l, r, axes=[2, 2]) +\n K.expand_dims(K.sum(K.square(l), axis=2), 2) +\n K.expand_dims(K.sum(K.square(r), axis=2), 1)\n )\n denominator = K.maximum(denominator, K.epsilon())\n return 1. / denominator\n\n def compute_cos_match_score(self, l_r):\n l, r = l_r\n return K.batch_dot(\n K.l2_normalize(l, axis=-1),\n K.l2_normalize(r, axis=-1),\n axes=[2, 2]\n )\n\n def MatchScore(self, l, r, mode=\"euclidean\"):\n if mode == \"euclidean\":\n return merge(\n [l, r],\n mode=self.compute_euclidean_match_score,\n output_shape=lambda shapes: (None, shapes[0][1], shapes[1][1])\n )\n elif mode == \"cos\":\n return merge(\n [l, r],\n mode=self.compute_cos_match_score,\n output_shape=lambda shapes: (None, shapes[0][1], shapes[1][1])\n )\n elif mode == \"dot\":\n return merge([l, r], mode=\"dot\")\n else:\n raise ValueError(\"Unknown match score mode %s\" % mode)\n\n def build_cnn_am(self, X_train_q1, X_train_q2):\n q1 = Input(shape=[X_train_q1.shape[1]], name=\"seq_question1\")\n q2 = Input(shape=[X_train_q2.shape[1]], name=\"seq_question2\")\n # conv_left = emb_q1\n # conv_right = emb_q2\n\n cnn = self.branch_cnn_am1(q1, q2, X_train_q1, X_train_q2)\n\n # cnn1 = self.branch_cnn(conv_left)\n # cnn2 = self.branch_cnn(conv_right)\n # cnn = concatenate([cnn1, cnn2])\n\n cnn = Flatten()(cnn)\n\n fc = Dense(128, activation='relu')(cnn)\n fc = Dropout(0.2)(fc)\n fc = Dense(64, activation='relu')(fc)\n fc = Dropout(0.2)(fc)\n\n output = Dense(self.N_CLASSES, activation='softmax')(fc)\n\n model = Model([q1, q2], output)\n print(model.summary())\n return model\n\n def build_cnn(self, X_train_q1, X_train_q2):\n emb_layer = Embedding(self.MAX_TEXT, self.emb_size, trainable=True)\n q1 = Input(shape=[X_train_q1.shape[1]], name=\"seq_question1\")\n emb_q1 = emb_layer(q1)\n q2 = Input(shape=[X_train_q2.shape[1]], name=\"seq_question2\")\n emb_q2 = emb_layer(q2)\n\n cnn1 = self.branch_cnn(emb_q1)\n cnn2 = self.branch_cnn(emb_q2)\n\n cnn = concatenate([cnn1, cnn2])\n fc = Dense(128, activation='relu')(cnn)\n fc = Dropout(0.4)(fc)\n fc = Dense(64, activation='relu')(fc)\n fc = Dropout(0.2)(fc)\n\n output = Dense(self.N_CLASSES, activation='softmax')(fc)\n\n model = Model([q1, q2], output)\n print(model.summary())\n return model\n\n def compile(self, model):\n model.compile(optimizer=Nadam(lr=self.lr), loss='binary_crossentropy', metrics=['accuracy'])\n # model.compile(optimizer='Adadelta', loss='categorical_crossentropy', metrics=['accuracy'])\n return model\n\n\nclass Train:\n def __init__(self):\n self.n_classes = 2\n self.nb_epoch = 10\n self.batch_size = 256 * 3\n self.val_split = 0.3\n self.MAX_ITEM_DESC_SEQ = 50\n\n self.train_data = 'E:/data/quora-duplicate/train.tsv'\n self.model_path = 'E:/data/quora-duplicate/model/'\n\n @staticmethod\n def evaluation(y_true, y_predict):\n accuracy = accuracy_score(y_true, y_predict)\n precision, recall, f1, support = precision_recall_fscore_support(y_true, y_predict)\n print('accuracy:' + str(accuracy))\n print('precision:' + str(precision))\n print('recall:' + str(recall))\n print('f1:' + str(f1))\n\n def flatten(self, l):\n return [item for sublist in l for item in sublist]\n\n def data_cleaning(self, data):\n data['question1'] = data['question1'].str.lower()\n data['question1'].fillna(value=\"nan\", inplace=True)\n data['question2'] = data['question2'].str.lower()\n data['question2'].fillna(value=\"nan\", inplace=True)\n\n # f1 = lambda a: re.sub(r'(@.*? )', '', a)\n # f2 = lambda a: re.sub(r'(@.*?$)', '', a)\n # f3 = lambda a: re.sub(' +', ' ', a)\n # data['SentimentText'] = data['SentimentText'].apply(f1)\n # data['SentimentText'] = data['SentimentText'].apply(f2)\n # data['SentimentText'] = data['SentimentText'].apply(f3)\n\n # english_stopwords = stopwords.words('english')\n # list_senti = []\n # for row in data['SentimentText']:\n # senti = [' '.join(a for a in row.split(' ') if a not in english_stopwords)]\n # list_senti.append(senti)\n # data['SentimentText'] = list_senti\n\n return data\n\n def preprocessing(self, train_x, val_x):\n print(\"start preprocessing\")\n raw_text = np.hstack([train_x['question1'], train_x['question2'], val_x['question1'], val_x['question2']])\n tok_raw = Tokenizer()\n tok_raw.fit_on_texts(raw_text)\n\n train_x['seq_question1'] = tok_raw.texts_to_sequences(train_x['question1'])\n train_x['seq_question2'] = tok_raw.texts_to_sequences(train_x['question2'])\n val_x['seq_question1'] = tok_raw.texts_to_sequences(val_x['question1'])\n val_x['seq_question2'] = tok_raw.texts_to_sequences(val_x['question2'])\n self.MAX_TEXT = np.unique(self.flatten(np.concatenate([train_x['seq_question1'], train_x['seq_question2'], val_x['seq_question1'], val_x['seq_question2']]))).shape[0] + 1\n\n train_Q1 = pad_sequences(train_x['seq_question1'], maxlen=self.MAX_ITEM_DESC_SEQ)\n train_Q2 = pad_sequences(train_x['seq_question2'], maxlen=self.MAX_ITEM_DESC_SEQ)\n val_Q1 = pad_sequences(val_x['seq_question1'], maxlen=self.MAX_ITEM_DESC_SEQ)\n val_Q2 = pad_sequences(val_x['seq_question2'], maxlen=self.MAX_ITEM_DESC_SEQ)\n return train_Q1, train_Q2, val_Q1, val_Q2\n\n def show_model_effect(self, history):\n\n # summarize history for accuracy\n plt.plot(history.history[\"acc\"])\n plt.plot(history.history[\"val_acc\"])\n plt.title(\"Model accuracy\")\n plt.ylabel(\"accuracy\")\n plt.xlabel(\"epoch\")\n plt.legend([\"train\", \"test\"], loc=\"upper left\")\n plt.savefig(self.model_path+\"/Performance_accuracy.jpg\")\n\n # summarize history for loss\n plt.plot(history.history[\"loss\"])\n plt.plot(history.history[\"val_loss\"])\n plt.title(\"Model loss\")\n plt.ylabel(\"loss\")\n plt.xlabel(\"epoch\")\n plt.legend([\"train\", \"test\"], loc=\"upper left\")\n plt.savefig(self.model_path+\"/Performance_loss.jpg\")\n\n def process(self):\n data = pd.read_csv(self.train_data, sep=\"\\t\", error_bad_lines=False)\n print(pd.value_counts(data['is_duplicate']))\n\n data = self.data_cleaning(data)\n\n train_x, val_x, train_y, val_y = train_test_split(data[['question1', 'question2']],\n data['is_duplicate'], test_size=self.val_split, random_state=2018)\n\n train_Q1, train_Q2, val_Q1, val_Q2 = self.preprocessing(train_x, val_x)\n\n train_y = to_categorical(train_y, num_classes=self.n_classes)\n val_y = to_categorical(val_y, num_classes=self.n_classes)\n\n model_obj = TextModel(N_CLASSES=self.n_classes, MAX_TEXT=self.MAX_TEXT, MAX_ITEM_DESC_SEQ=self.MAX_ITEM_DESC_SEQ)\n # text_model = model_obj.build_cnn_lstm_am(train_X)\n text_model = model_obj.build_cnn_am(train_Q1, train_Q2)\n # text_model = model_obj.build_cnn(train_Q1, train_Q2)\n text_model = model_obj.compile(text_model)\n\n sleep(5)\n\n # Checkpoint\n model_info = \"/cnn_bilstm_am_model_classNum2_\"\n epoch_info = 'model-ep{epoch:03d}-acc{acc:.3f}-val_acc{val_acc:.3f}.h5'\n ckpt_fn = self.model_path + model_info + epoch_info\n ckpt = ModelCheckpoint(filepath=ckpt_fn, monitor='val_acc', save_best_only=False, mode='max')\n print(ckpt_fn)\n\n early_stopping = EarlyStopping(monitor='val_acc', patience=2, verbose=1)\n\n history = text_model.fit([train_Q1, train_Q2], train_y, epochs=self.nb_epoch, batch_size=self.batch_size,\n validation_data=([val_Q1, val_Q2], val_y), callbacks=[ckpt, early_stopping], verbose=1)\n\n\n self.show_model_effect(history)\n\n y_predict = text_model.predict([val_Q1, val_Q2], batch_size=self.batch_size, verbose=0)\n print(y_predict)\n y_predict = np.argmax(y_predict, axis=1) + 1\n print(y_predict)\n y_true = np.argmax(val_y, axis=1) + 1\n self.evaluation(y_true, y_predict)\n\n\nif __name__ == '__main__':\n obj_train = Train()\n obj_train.process()\n\n","sub_path":"text_classification_keras.py","file_name":"text_classification_keras.py","file_ext":"py","file_size_in_byte":15603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"624341217","text":"# @File : 5.selenium浏览器自动化.py\n# @Author: ZhouXiangJing\n# @Date : 2020/10/19\n# @Desc :\n\n\"\"\"\n\nselenium 浏览器自动化\n\nselenium 使用流程\n\n - 安装 pip install selenium\n - 下载浏览器驱动\n 下载路径 https://chromedriver.chromium.org/downloads\n\n - 实例化一个浏览器对象\n - 基本操作\n 发起请求 get(url)\n 标签定位 find系列方法\n 标签交互 sen_keys('xxx')\n 执行js程序 excute_script('js code')\n 前进 后退 back() forward()\n 关闭浏览器 quit()\n\n - selenium 处理 iframe\n - 如果定位标签在iframe中需要先执行 switch_to.frame(id)\n - 动作链 (拖动)\n - 实例化动作链对象 actions = ActionChains(bro)\n - 点击且长按操作 click_and_hold(div)\n - 偏移操作 move_by_offset(x, y)\n - 让动作链立即执行 perform()\n - 释放动作链对象 actions.release\n\n\n\n\n\"\"\"\n\n\"\"\"\n在chorme地址栏输入 chrome://version/ 查看版本号\nchromedriver的版本一定要与Chrome的大版本号一致,不然就不起作用。\n有两个下载地址:\n1、http://chromedriver.storage.googleapis.com/index.html\n2、https://npm.taobao.org/mirrors/chromedriver/\n\n在chorme的console查看是否被检测到\nwindow.navigator.webdriver\n\"\"\"\n\n\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.webdriver import ChromeOptions\n\n# 实现无可视化界面的操作\nchrome_options = Options()\nchrome_options.add_argument('--headless')\nchrome_options.add_argument('--disable-gpu')\n\n# 实现规避服务器检测\noptions = ChromeOptions()\n# 添加UA\noptions.add_argument('user-agent=\"MQQBrowser/26 Mozilla/5.0 (Linux; U; Android 2.3.7; zh-cn; MB200 Build/GRJ22; CyanogenMod-7) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1\"')\n# 指定浏览器分辨率\noptions.add_argument('window-size=1920x3000')\n# 设置开发者模式启动,该模式下webdriver属性为正常值\noptions.add_experimental_option('excludeSwitches', ['enable-automation'])\n# 以最高权限运行\noptions.add_argument('--no-sandbox')\n\nbro = webdriver.Chrome(executable_path='./chormedriver', chrome_options=chrome_options, options=options)\n\n\n\n","sub_path":"爬虫/5.selenium浏览器自动化.py","file_name":"5.selenium浏览器自动化.py","file_ext":"py","file_size_in_byte":2286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"496197702","text":"import simulate\n\ndef simulate_iterations(iterations=100, output=\"output/games.txt\", agent_names = [\"Hungry\", \"Path\", \"OneStepThinker\", \"LongRouteJunkie\"], point_table={1:1, 2:2, 3:4, 4:7, 5:10, 6:15}):\n output_file = open(output, \"a\")\n for iteration in range(iterations):\n reload(simulate)\n results = simulate.run_game(agent_names=agent_names, point_table=point_table)\n results['point_table'] = point_table\n output_file.write(str(results) + '\\n')\n output_file.close()\n\ndef simulate_iterations_two_player(iterations=100, four_names=[\"Hungry\", \"Path\", \"OneStepThinker\", \"LongRouteJunkie\"]):\n for i in range(iterations):\n two_names = get_two_agents(four_names, i)\n simulate_iterations(iterations=1, agent_names=two_names)\n\ndef get_two_agents(four_names, i):\n\tfirst_index = i//3 % 4\n\tvalid_choices = four_names[:first_index] + four_names[first_index + 1:]\n\ttwo_names = [four_names[first_index], valid_choices[i % 3]]\n\treturn two_names\n\ndef get_linear_point_table(alpha):\n point_table = {}\n for k in range(1, 7):\n point_table[k] = alpha * k\n return point_table\n\ndef float_range(start, stop, step):\n result = []\n for i in range(int((stop-start+step)/step)):\n result += [start + step * i]\n return result\n\ndef simulate_point_tables(iterations, start=1, stop=2.5, step=.1):\n for alpha in float_range(start=start, stop=stop, step=step):\n point_table = get_linear_point_table(alpha)\n simulate_iterations(\n iterations=iterations, \n output=\"output/point_tables.txt\",\n point_table=point_table\n )\n\n \n\n#simulate_iterations(iterations=10000)\n#simulate_iterations_two_player(iterations=10000)\nsimulate_point_tables(iterations=10000, start =5.1, stop=7.1, step=.1)","sub_path":"simulate_wrapper.py","file_name":"simulate_wrapper.py","file_ext":"py","file_size_in_byte":1788,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"578590456","text":"# Copyright 2020 The SQLNet Company GmbH\n\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to\n# deal in the Software without restriction, including without limitation the\n# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n# sell copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.\n\n\"\"\"\nCustom class for handling the features of a pipeline.\n\"\"\"\n\nimport json\n\nimport pandas as pd\nimport numpy as np\n\nimport getml.communication as comm\n\nfrom getml.data.helpers import (\n _is_typed_list\n)\n\nfrom .helpers import _attach_empty\n\nfrom .sql_code import SQLCode\n\nclass Features():\n \"\"\"\n Custom class for handling the\n features generated by the pipeline.\n\n Example:\n\n .. code-block:: python\n\n names, importances = my_pipeline.features.importances()\n\n names, correlations = my_pipeline.features.correlations()\n\n sql_code = my_pipeline.features.to_sql()\n \"\"\"\n\n # ----------------------------------------------------------------\n\n def __init__(self, name, targets):\n\n if not isinstance(name, str):\n raise ValueError(\n \"'name' must be a str.\")\n\n if not _is_typed_list(targets, str):\n raise TypeError(\n \"'targets' must be a list of str.\")\n\n self.name = name\n\n self.targets = targets\n\n # ----------------------------------------------------------------\n\n def _to_pandas(self, target_num, target_name):\n\n names, correlations = self.correlations(\n target_num=target_num,\n sort=False)\n\n names, importances = self.importances(\n target_num=target_num,\n sort=False)\n\n sql_code = self.to_sql()\n\n code = [sql_code[name].to_str() for name in names \\\n if name[:8] == \"feature_\"]\n\n max_length = np.max([\n len(names),\n len(correlations),\n len(importances),\n len(code)\n ])\n\n data_frame = pd.DataFrame(\n index=np.arange(max_length)\n )\n\n data_frame[\"names\"] = _attach_empty(\n names.tolist(), max_length, \"--\")\n\n data_frame[\"correlations\"] = _attach_empty(\n correlations.tolist(), max_length, np.NaN)\n\n data_frame[\"importances\"] = _attach_empty(\n importances.tolist(), max_length, np.NaN)\n\n data_frame[\"target\"] = [target_name] * max_length\n\n data_frame[\"sql\"] = _attach_empty(\n code, max_length, \"--\")\n\n return data_frame\n\n # ----------------------------------------------------------------\n\n def correlations(self, target_num=0, sort=True):\n \"\"\"\n Returns the data for the feature correlations,\n as displayed in the getML monitor.\n\n Args:\n target_num (int):\n Indicates for which target you want to view the\n importances.\n (Pipelines can have more than one target.)\n\n sort (bool):\n Whether you want the results to be sorted.\n\n Return:\n (:class:`numpy.ndarray`, :class:`numpy.ndarray`):\n - The first array contains the names of\n the features.\n - The second array contains the correlations with\n the target.\n \"\"\"\n\n # ------------------------------------------------------------\n\n cmd = dict()\n\n cmd[\"type_\"] = \"Pipeline.feature_correlations\"\n cmd[\"name_\"] = self.name\n\n cmd[\"target_num_\"] = target_num\n\n\t# ------------------------------------------------------------\n\n sock = comm.send_and_receive_socket(cmd)\n\n msg = comm.recv_string(sock)\n\n if msg != \"Success!\":\n comm.engine_exception_handler(msg)\n\n\t# ------------------------------------------------------------\n\n msg = comm.recv_string(sock)\n\n json_obj = json.loads(msg)\n\n # ------------------------------------------------------------\n\n names = np.asarray(json_obj[\"feature_names_\"])\n correlations = np.asarray(json_obj[\"feature_correlations_\"])\n\n\t# ------------------------------------------------------------\n\n if not sort:\n return names, correlations\n\n\t# ------------------------------------------------------------\n\n indices = np.argsort(np.abs(correlations))[::-1]\n\n # ------------------------------------------------------------\n\n return (\n names[indices],\n correlations[indices]\n )\n\n # ----------------------------------------------------------------\n\n def importances(self, target_num=0, sort=True):\n \"\"\"\n Returns the data for the feature importances,\n as displayed in the getML monitor.\n\n Args:\n target_num (int):\n Indicates for which target you want to view the\n importances.\n (Pipelines can have more than one target.)\n\n sort (bool):\n Whether you want the results to be sorted.\n\n Return:\n (:class:`numpy.ndarray`, :class:`numpy.ndarray`):\n - The first array contains the names of\n the features.\n - The second array contains their importances.\n By definition, all importances add up to 1.\n \"\"\"\n\n # ------------------------------------------------------------\n\n cmd = dict()\n\n cmd[\"type_\"] = \"Pipeline.feature_importances\"\n cmd[\"name_\"] = self.name\n\n cmd[\"target_num_\"] = target_num\n\n\t# ------------------------------------------------------------\n\n sock = comm.send_and_receive_socket(cmd)\n\n msg = comm.recv_string(sock)\n\n if msg != \"Success!\":\n comm.engine_exception_handler(msg)\n\n\t# ------------------------------------------------------------\n\n msg = comm.recv_string(sock)\n\n json_obj = json.loads(msg)\n\n # ------------------------------------------------------------\n\n names = np.asarray(json_obj[\"feature_names_\"])\n importances = np.asarray(json_obj[\"feature_importances_\"])\n\n\t# ------------------------------------------------------------\n\n if not sort:\n return names, importances\n\n\t# ------------------------------------------------------------\n\n indices = np.argsort(importances)[::-1]\n\n # ------------------------------------------------------------\n\n return (\n names[indices],\n importances[indices]\n )\n\n # ----------------------------------------------------------------\n\n def to_pandas(self):\n \"\"\"Returns all information related to the features\n in a pandas data frame.\"\"\"\n\n data_frame = None\n\n for t_num, t_name in enumerate(self.targets):\n current_df = self._to_pandas(t_num, t_name)\n\n if data_frame is None:\n data_frame = current_df\n continue\n\n data_frame = pd.concat(\n [data_frame, current_df],\n ignore_index=True\n )\n\n return data_frame\n\n # ----------------------------------------------------------------\n\n def to_sql(self):\n \"\"\"Returns SQL statements visualizing the features.\n\n Examples:\n\n .. code-block:: python\n\n my_pipeline.features.to_sql()\n\n Raises:\n IOError: If the pipeline could not be found\n on the engine or\n the pipeline could not be fitted.\n KeyError: If an unsupported instance variable is\n encountered .\n TypeError: If any instance variable is of wrong type.\n\n Returns:\n :class:`~getml.pipeline.SQLCode`\n Object representing the features.\n\n Note:\n Only fitted pipelines\n (:meth:`~getml.pipeline.Pipeline.fit`) can hold trained\n features which can be returned as SQL statements.\n The dialect is based on the SQLite3 standard.\n \"\"\"\n\n # ------------------------------------------------------------\n\n cmd = dict()\n cmd[\"type_\"] = \"Pipeline.to_sql\"\n cmd[\"name_\"] = self.name\n\n sock = comm.send_and_receive_socket(cmd)\n\n # ------------------------------------------------------------\n\n msg = comm.recv_string(sock)\n\n if msg != \"Found!\":\n comm.engine_exception_handler(msg)\n\n # ------------------------------------------------------------\n\n sql = comm.recv_string(sock)\n\n # ------------------------------------------------------------\n\n sock.close()\n\n # ------------------------------------------------------------\n\n return SQLCode(sql.split(\"\\n\\n\\n\")[:-1])\n","sub_path":"getml/pipeline/features.py","file_name":"features.py","file_ext":"py","file_size_in_byte":9563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"123830811","text":"#!/usr/bin/evn python\r\n#-*- coding:utf-8 -*-\r\n'''\r\nCreated on 2010-9-13\r\n\r\n@author: zdl\r\n'''\r\nfrom avp import AVP\r\n\r\nclass Float32(AVP):\r\n '''\r\n 该类型表示单精度浮点数,遵循IEEE标准754-1985中关于浮点的描述。\r\n 该32比特值按网络字节顺序传送。AVP长度字段必须设置为12(如果“V”比特有效,则为16)。\r\n '''\r\n def __init__(self, avp_code=0, avp_data=None, vendor_id=0, \r\n mandatory=0, private=0, level=0, decode_buf=None,\r\n cmd_etc_instance=None):\r\n AVP.__init__(self, avp_code, avp_data, vendor_id, \r\n mandatory, private, level, decode_buf,\r\n cmd_etc_instance)\r\n self.avp['AVP_CODE_OPERATOR'] = \"!f\"\r\n self.avp['AVP_DATA_TYPE'] = \"Float32\"\r\n ","sub_path":"0.1/src/PyDccLib/avp_float32.py","file_name":"avp_float32.py","file_ext":"py","file_size_in_byte":819,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"86785667","text":"#serialising\r\n# converting the data which is in python data types to json data type\r\nimport json\r\nemp={\"empId\":101,\"empName\":\"sara\",\"salary\":87989.879,\"status\":True,\"projects\":(\"p1\",\"p2\")}\r\nlistarr=[{\"emp_id\":101,\"emp_name\":\"sara\",\"sal\":90},{\"emp_id\":102,\"emp_name\":\"tara\",\"sal\":190},{\"emp_id\":103,\"emp_name\":\"lara\",\"sal\":920}]\r\n\r\nwith open(\"emp.json\",\"w\") as filePtr:\r\n json.dump(emp,filePtr);\r\n\r\nempDataInJson=json.dumps(emp)\r\nprint(\"data in json format\",empDataInJson)\r\n","sub_path":"writeJsonData.py","file_name":"writeJsonData.py","file_ext":"py","file_size_in_byte":476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"215593037","text":"from django.shortcuts import render\n\nimport json\n\nfrom django.http import JsonResponse\nfrom django.http import HttpResponseBadRequest\nfrom django.http import HttpResponseNotFound\nfrom django.shortcuts import redirect\n\nfrom api.models import ShortLink\n\nfrom os import urandom\nfrom base64 import b32encode\n\ndef createShortURL(request):\n if request.method == 'POST':\n if not request.body:\n return HttpResponseBadRequest(\"Bad request\")\n \n jsonRequest = None\n urllist = []\n try:\n jsonRequest = json.loads(request.body)\n except json.decoder.JSONDecodeError:\n return HttpResponseBadRequest(\"Bad request\")\n \n if isinstance(jsonRequest, list):\n urllist = jsonRequest\n else:\n urllist.append(jsonRequest['url'])\n \n shortList = []\n for url in urllist:\n shortKey = b32encode(urandom(5)).decode('ascii')\n shortUrl = 'http://' + request.get_host() + '/api/' + shortKey\n shortLink = ShortLink(original=url, shortKey=shortKey)\n shortLink.save()\n shortList.append({\n 'original': url,\n 'short': shortUrl\n })\n return JsonResponse(shortList, safe=False)\n else:\n return HttpResponseBadRequest(\"Bad request\")\n\n\n\ndef getOriginalURL(request, shortKey):\n shortLink = ShortLink.objects.get(shortKey=shortKey)\n if not shortLink:\n return HttpResponseNotFound(\"URL Not found\")\n return redirect(shortLink.original)","sub_path":"api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"285586700","text":"# coding=utf8\n\n'''\n对于客户端来说,建立一个TCP连接过程分为两步:建立socket对象 和 调用connect()来建立一个和服务器连接\n'''\n\n\nimport sys,socket\n\nport = 21567\nhost = '127.0.0.1'\nfilename = 'guojian'\n\ns = socket.socket((socket.AF_INET,socket.SOCK_STREAM))\ns.connect((host,port))\n\ns.sendall(filename + \"\\r\\n\")\n\nwhile True:\n buf = s.recv(1024)\n if not len(buf):\n break\n sys.stdout.write(buf)\n\n# s =socket.getservbyname()\ns.close()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"code/untitled1/9-2-网络socket编程/1-客户-服务器网络介绍/Tcp/示例1/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"571892984","text":"from WYB_web import my_wallet,payment,QRshouhuo\nfrom CYB_app.ChengYunRen.CYR_company import CYR\nimport unittest,time\n\nclass LiuCheng(unittest.TestCase):\n def setUp(self):\n pass\n def test_1(self):\n QRshouhuo.qrshouhuo()\n def test_2(self):\n time.sleep(5)\n # 获取支付前承运人的金额\n SJ_shouyi_1, SJ_yue_1 = CYR.CYR_ShouYi_1()\n self.assertEqual(SJ_shouyi_1, SJ_yue_1, '此处判断余额与收益是否一致,为test_9收集数据')\n # 网运版支付\n yue_1 = float(my_wallet.wallet())\n payment.Payment()\n yue_2 = float(my_wallet.wallet())\n yue_3 = round(yue_1 - yue_2, 2)\n yue_4 = round(250 / (1 - 0.09), 2)\n print(yue_4)\n self.assertEqual(yue_3, yue_4, '此处判断网运版支付完后余额是否准确')\n # 获取企业支付后司机的金额\n SJ_shouyi_2, SJ_yue_2 = CYR.CYR_ShouYi()\n # 获取司机端差额\n SJ_shouyi_3 = float(SJ_shouyi_2) - float(SJ_shouyi_1)\n SJ_yue_3 = float(SJ_yue_2) - float(SJ_yue_1)\n self.assertEqual(SJ_shouyi_3, 250, '此处判断企业支付金额是否到账累计收益')\n self.assertEqual(SJ_yue_3, 250, '此处判断企业支付金额是否到账我的钱包')\n # 退出\n\nif __name__ == '__main__':\n unittest.main()\n print('ceshi')\n","sub_path":"HuoYunBao_WangYun_Web_1/TEXT/qiye_chengyunren/test_3_payment.py","file_name":"test_3_payment.py","file_ext":"py","file_size_in_byte":1351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"297662114","text":"import tensorflow as tf\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom Req1_2 import LinearModel\n\n\n# 데이터 불러오기\ntrain_data = np.load(\"..\\\\datasets\\\\linear_train.npy\")\ntest_x = np.load(\"..\\\\datasets\\\\linear_test_x.npy\")\n\n\n# train_data를 tf 형식에 맞게 변환\nx_data = np.expand_dims(train_data[:,0], axis=1) #train_data의 x값만 따로 저장\ny_data = train_data[:,1] #train_data의 y값만 따로 저장\n\n\n# 모델 생성\nmodel = LinearModel(num_units=1)\n\n# 최적화 함수, 손실함수와 모델 바인딩\nmodel.compile(optimizer=tf.keras.optimizers.SGD(learning_rate=0.001),\n\t\t\t loss=tf.keras.losses.MSE,\n\t\t\t metrics=[tf.keras.metrics.MeanSquaredError()])\n\n# 모델 학습\nmodel.fit(x=x_data, \n\t\t y=y_data, \n\t\t epochs=10, \n\t\t batch_size=32)\n\n\n# 모델 테스트\nprediction = model.predict(x=test_x,\n \t\t\t\t\t batch_size=None)\n\n\n# 결과 시각화\nplt.title('Req 1-5')\nplt.xlabel('X value')\nplt.ylabel('Y value')\nplt.scatter(x_data,y_data,s=5,label=\"train data\")\nplt.scatter(test_x,prediction,s=5,label=\"prediction data\")\nplt.legend()\nplt.show()\n\n\n# 모델 정리\nmodel.summary()\n#load 종료\ntrain_data.close()","sub_path":"works/Req1_5.py","file_name":"Req1_5.py","file_ext":"py","file_size_in_byte":1150,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"341418420","text":"from django.urls import path\n\nfrom . import views\n\nurlpatterns = [\n path('', views.index, name='index'),\n path('Dashboard/', views.Dashboard, name='Dashboard'),\n path('Filter/', views.Filter, name='Filter'),\n path('Login/', views.Login, name='Login'),\n path('Register/', views.Register, name='Register'),\n path('Search/', views.Search, name='Search'),\n]\n","sub_path":"src/nysanitation/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"312999342","text":"import random\r\nrandom.seed()\r\ndb = {}\r\npeople = []\r\n\r\nclass Indiv(object):\r\n\r\n\tdef __init__(self, route):\r\n\t\tself.route = route\r\n\t\tself.fitness = self.evaluate()\r\n\r\n\tdef evaluate(self):\r\n\t\thappiness = 0\r\n\t\tfor i in range(len(self.route)):\r\n\t\t\thappiness += db[(self.route[i], self.route[(i + 1) % len(self.route)])]\t\r\n\t\t\thappiness += db[(self.route[i], self.route[(i - 1) % len(self.route)])]\t\r\n\t\treturn happiness\r\n\r\n\tdef mutate(self):\r\n\t\ta = random.randrange(len(self.route))\r\n\t\tb = a\r\n\t\twhile a == b:\r\n\t\t\tb = random.randrange(len(self.route))\r\n\t\tnew_route = []\r\n\t\tfor i in range(len(self.route)):\r\n\t\t\tif i == a:\r\n\t\t\t\tnew_route.append(self.route[b])\r\n\t\t\telif i == b:\r\n\t\t\t\tnew_route.append(self.route[a])\r\n\t\t\telse:\r\n\t\t\t\tnew_route.append(self.route[i])\r\n\t\tself.route = new_route\r\n\t\tself.fitness = self.evaluate()\r\n\r\n\tdef offspring(self):\r\n\t\ta = random.randrange(len(self.route))\r\n\t\treturn self.route[a:] + self.route[:a]\r\n\r\nwith open(\"input13.txt\", \"r\") as textfile:\r\n\tfor line in textfile:\r\n\t\tcut = line.split(' ')\r\n\t\tif cut[2] == 'gain':\r\n\t\t\tsign = '+'\r\n\t\telse:\r\n\t\t\tsign = '-'\r\n\t\tdb[(cut[0], cut[10][:-2])] = int(sign + cut[3])\r\n\t\tif not cut[0] in people:\r\n\t\t\tpeople.append(cut[0])\r\n\r\ndef evolution(pop_size, generations):\r\n\t#create the first population, sort it \r\n\tpopulation = []\r\n\tfor i in range(2 * pop_size):\r\n\t\torder = list(range(len(people)))\r\n\t\trandom.shuffle(order)\r\n\t\troute = []\r\n\t\tfor i in order:\r\n\t\t\troute.append(people[i])\r\n\t\tpopulation.append(Indiv(route))\r\n\tpopulation = sorted(population, key=(lambda x: x.fitness), reverse=True)\r\n\r\n\tfor i in range(generations):\r\n\t\t#reproduction\r\n\t\tfor j in range(int(pop_size / 2)):\r\n\t\t\tpopulation.append(Indiv(population[j].offspring()))\r\n\t\tpopulation = sorted(population, key=(lambda x: x.fitness), reverse=True)\r\n\r\n\t\t#mutation\r\n\t\tfor j in range(1,len(population)):\r\n\t\t\tif random.randrange(100) < 5:\r\n\t\t\t\tpopulation[j].mutate()\r\n\t\tpopulation = sorted(population, key=(lambda x: x.fitness), reverse=True)\r\n\r\n\t\t#death\r\n\t\tpopulation = population[0:pop_size - int(pop_size / 10)] + population[-int(pop_size / 10):]\r\n\r\n\treturn population[0].fitness\r\n\r\n\r\nvalues = []\r\nfor i in range(3):\r\n\tvalues.append(evolution(300,50))\r\nprint(\"Answer to 13A:\", max(values))\r\n\r\nfor person in people:\r\n\tdb[('You', person)] = 0\r\n\tdb[(person, 'You')] = 0\r\n\r\npeople.append('You')\r\n\r\nvalues = []\r\nfor i in range(3):\r\n\tvalues.append(evolution(300,50))\r\nprint(\"Answer to 13B:\", max(values))\r\n","sub_path":"13.py","file_name":"13.py","file_ext":"py","file_size_in_byte":2417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"303818936","text":"import pandas as pd\nimport patsy\nfrom lifelines import AalenAdditiveFitter\nimport numpy as np\nfrom line_profiler import LineProfiler\n\n\ndata = pd.read_csv('./dd.csv', header=0, index_col=0)\nT = data['duration'].values[:, None]\nC = data['observed'].values[:, None]\nX = patsy.dmatrix('un_continent_name + regime + start_year -1', data)\nfeatures = X.design_info.column_names\n\n\naaf = AalenAdditiveFitter(penalizer=1., fit_intercept=True)\n\ndef run_test():\n model=aaf.fit(T, X, censorship=C, columns=features)\n return model\n\n\ntest = np.array([[0., 0., 1., 0., 0., 0., 0., 1., 0., 0., 2003.]])\npred_hazard = model.predict_cumulative_hazard(test, columns=[\"Harper's hazard rate\"])\npred_survival = model.predict_survival_function(test, columns=[\"Harper's survival function\"])\n","sub_path":"lifelines/tests/aaf_test.py","file_name":"aaf_test.py","file_ext":"py","file_size_in_byte":805,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"263484298","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 14 15:43:46 2018\n\n@author: Hat_Pinlei\n\"\"\"\n\nimport Tkinter as tk\nimport os\nimport re\ndirectory = re.findall(r'(.*\\\\ScienceTools)',os.getcwd(),re.S)[0]\nidle = re.findall(r'(.*\\\\Python)',os.getcwd(),re.S)[0]\nimport sys\nsys.path.append(directory+'\\\\Package') \nsys.path.append(idle+'\\\\PythonLib')\nimport QubitFunc as QF\n\nroot = tk.Tk()\nroot.geometry('800x600+500+300')\nroot.title('Pinlei Function')\nroot.wm_iconbitmap(directory + '\\\\scriptL.ico')\n\n\n\n\ntk.Label(root, text = ' Constants Related ').grid(column = 0, row = 0)\ntk.Label(root, text = ' Qubit Related ').grid(column = 1, row = 0)\ntk.Label(root, text = ' JPC Related ').grid(column = 2, row = 0)\n \ntk.Button(root, text = 'Calculation of JJ', width = 15,\n command = QF.cal_junction).grid(column = 1, row = 1)\ntk.Button(root, text = 'BBQ Analysis', width = 15,\n command = QF.hfss_simulation).grid(column = 1, row = 2)\n\n\nroot.mainloop()","sub_path":"Python/Pinlei_func_mpVersion/ScienceTools/PinleiFunc.py","file_name":"PinleiFunc.py","file_ext":"py","file_size_in_byte":1010,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"512433266","text":"import cv2\r\nimport numpy as np\r\n\r\n# -- 정답 사진과 입력된 사진의 위치\r\nans_src = \"5-rotated.png\"\r\ninp_src = \"o5.jpg\"\r\n\r\ndef findContours(img, s=2, k=1):\r\n contours, hierarchy = cv2.findContours(img, s, k)\r\n return contours[0]\r\n\r\ndef getSimilarityRate(img1, img2, k=14):\r\n ret = cv2.matchShapes(findContours(img1), findContours(img2), 1, 0)\r\n return round(100 - ret * 10, k)\r\n\r\n# -- 이미지 불러오고 윤곽선 따기\r\ninp = cv2.resize(cv2.imread(inp_src, cv2.IMREAD_GRAYSCALE), (300,300))\r\nans = cv2.resize(cv2.imread(ans_src, cv2.IMREAD_GRAYSCALE), (300,300), interpolation=cv2.INTER_LINEAR)\r\nedge = cv2.Canny(cv2.blur(inp.copy(), (3,3)), 50, 150)\r\n\r\n# -- 이미지 일치 비율 구하기\r\nprint(\"유사도 점수: {0}점\".format(getSimilarityRate(ans, edge)))","sub_path":"getSimilarityRate.py","file_name":"getSimilarityRate.py","file_ext":"py","file_size_in_byte":784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"507684449","text":"from AuxQuantity1Phase import AuxQuantity1Phase, AuxQuantity1PhaseParameters\n\nclass PressureParameters(AuxQuantity1PhaseParameters):\n def __init__(self):\n AuxQuantity1PhaseParameters.__init__(self)\n self.registerFunctionParameter(\"p_function\", \"Pressure function from EOS\")\n\nclass Pressure(AuxQuantity1Phase):\n def __init__(self, params):\n AuxQuantity1Phase.__init__(self, params)\n self.name = self.p\n self.p_function = params.get(\"p_function\")\n\n def compute(self, data, der):\n p, dp_dv, dp_de = self.p_function(data[self.v], data[self.e])\n data[self.name] = p\n\n dp_daA1 = dp_dv * der[self.v][\"aA1\"]\n dp_darhoA = dp_dv * der[self.v][self.arhoA] + dp_de * der[self.e][self.arhoA]\n dp_darhouA = dp_de * der[self.e][self.arhouA]\n dp_darhoEA = dp_de * der[self.e][self.arhoEA]\n der[self.name][\"aA1\"] = dp_daA1\n der[self.name][self.arhoA] = dp_darhoA\n der[self.name][self.arhouA] = dp_darhouA\n der[self.name][self.arhoEA] = dp_darhoEA\n","sub_path":"src/aux/Pressure.py","file_name":"Pressure.py","file_ext":"py","file_size_in_byte":978,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"635942149","text":"\"\"\"Tests for Resolver class.\"\"\"\nfrom math import inf\n\nimport pytest\n\nfrom astrality.resolver import Resolver\n\n\nclass TestResolverClass:\n def test_initialization_of_config_class_with_no_config_parser(self):\n Resolver()\n\n def test_invocation_of_class_with_application_config(self, conf):\n Resolver(conf)\n\n def test_initialization_of_config_class_with_dict(self):\n conf_dict = {\n 'key1': 'value1',\n 'key2': 'value2',\n 'key3': ('one', 'two', 'three'),\n 'key4': {'key4-1': 'uno', 'key4-2': 'dos'}\n }\n config = Resolver(conf_dict)\n assert config == conf_dict\n\n def test_values_for_max_key_property(self):\n config = Resolver()\n assert config._max_key == -inf\n\n config['string_key'] = 1\n assert config._max_key == -inf\n\n config[2] = 'string_value'\n assert config._max_key == 2\n\n config[1] = 'string_value'\n assert config._max_key == 2\n\n config[3] = 'string_value'\n assert config._max_key == 3\n\n def test_getting_item_from_empty_config(self):\n config = Resolver()\n with pytest.raises(KeyError) as exception:\n config['empty_config_with_no_key']\n\n def test_accessing_existing_key(self):\n config = Resolver()\n config['some_key'] = 'some_value'\n assert config['some_key'] == 'some_value'\n\n config[-2] = 'some_other_value'\n assert config[-2] == 'some_other_value'\n\n def test_integer_index_resolution(self):\n config = Resolver()\n config['some_key'] = 'some_value'\n config[1] = 'FureCode Nerd Font'\n assert config[2] == 'FureCode Nerd Font'\n\n def test_integer_index_resolution_without_earlier_index_key(self):\n config = Resolver()\n config['some_key'] = 'some_value'\n with pytest.raises(KeyError) as exception:\n config[2]\n assert exception.value.args[0] == \\\n 'Integer index \"2\" is non-existent and ' \\\n 'had no lower index to be substituted for'\n\n def test_index_resolution_with_string_key(self):\n config = Resolver()\n config[2] = 'some_value'\n with pytest.raises(KeyError) as exception:\n config['test']\n assert exception.value.args[0] == 'test'\n\n def test_use_of_recursive_config_objects_created_by_dicts(self):\n conf_dict = {\n 'key1': 'value1',\n 1: 'value2',\n 2: {1: 'some_value'},\n 'key3': ('one', 'two', 'three'),\n 'key4': {1: 'uno', 'key4-2': 'dos'}\n }\n config = Resolver(conf_dict)\n assert config == conf_dict\n assert config[3][2] == 'some_value'\n assert config[2] == {1: 'some_value'}\n assert config[3] == {1: 'some_value'}\n\n assert isinstance(config['key4'], Resolver)\n assert config['key4'] == {1: 'uno', 'key4-2': 'dos'}\n assert config['key4'][1] == 'uno'\n assert config['key4'][2] == 'uno'\n\n def test_getter(self):\n config = Resolver()\n assert config.get('from_empty_config') is None\n\n config['test'] = 'something'\n assert config.get('test') == 'something'\n assert config.get('test', '4') == 'something'\n\n assert config.get('non_existent_key') is None\n assert config.get('non_existent_key', '4') == '4'\n\n def test_items(self):\n config = Resolver()\n config['4'] = 'test'\n config['font'] = 'Comic Sans'\n config['5'] = '8'\n assert list(config.items()) == [('4', 'test',), ('font', 'Comic Sans',), ('5', '8',)]\n\n def test_keys(self):\n config = Resolver()\n config['4'] = 'test'\n config['font'] = 'Comic Sans'\n config['5'] = '8'\n assert list(config.keys()) == ['4', 'font', '5']\n\n def test_values(self):\n config = Resolver()\n config['4'] = 'test'\n config['font'] = 'Comic Sans'\n config['5'] = '8'\n assert list(config.values()) == ['test', 'Comic Sans', '8']\n\n def test_update(self):\n one_conf_dict = {\n 'key1': 'value1',\n 1: 'value2',\n 2: {1: 'some_value'},\n }\n another_conf_dict = {\n 'key3': ('one', 'two', 'three'),\n 'key4': {1: 'uno', 'key4-2': 'dos'}\n }\n merged_conf_dicts = {\n 'key1': 'value1',\n 1: 'value2',\n 2: {1: 'some_value'},\n 'key3': ('one', 'two', 'three'),\n 'key4': {1: 'uno', 'key4-2': 'dos'}\n }\n config = Resolver(one_conf_dict)\n config.update(another_conf_dict)\n assert config == merged_conf_dicts\n\n def test_resolver_class(self):\n resolver = Resolver()\n resolver[1] = 'firs_value'\n resolver[2] = 'second_value'\n resolver['string_key'] = 'string_value'\n\n assert resolver[1] == 'firs_value'\n assert resolver[2] == 'second_value'\n assert resolver[3] == 'second_value'\n assert resolver['string_key'] == 'string_value'\n\n def test_initializing_resolver_with_resolver(self):\n resolver1 = Resolver({'key1': 1})\n resolver2 = Resolver(resolver1)\n assert resolver1 == resolver2\n\n def test_updating_resolver_with_resolver(self):\n resolver1 = Resolver({'key1': 1})\n resolver2 = Resolver({'key2': 2})\n\n resolver1.update(resolver2)\n expected_result = Resolver({'key1': 1, 'key2': 2})\n assert resolver1 == expected_result\n","sub_path":"astrality/tests/test_resolver.py","file_name":"test_resolver.py","file_ext":"py","file_size_in_byte":5482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"192893975","text":"import tkinter as tk\r\nfrom tkinter import * \r\nimport qrcode\r\nfrom openpyxl import Workbook\r\nfrom sqlalchemy import *\r\nfrom PIL import ImageTk, Image\r\nimport os\r\n\r\n# define the name of the directory to be deleted\r\npath = \"QR_images\"\r\ntry:\r\n\tos.mkdir(path)\r\nexcept Exception as e:\r\n print(1)\r\n\r\nmetadata = MetaData()\r\nengine = create_engine('sqlite:///db.sqlite')\r\nconn = engine.connect()\r\nusers = Table('users', metadata,\r\n\t Column('id', Integer, primary_key=True),\r\n\t Column('name', String),\r\n\t Column('number', Integer),\r\n\t Column('code', String),\r\n )\r\nmetadata.create_all(engine)\r\nins = users.insert()\r\nx=None\r\n\r\ndef aa2():\r\n\tworkbook = Workbook()\r\n\tsheet = workbook.active\r\n\tworkbook.save(filename=\"db.xlsx\")\r\n\tconn = engine.connect()\r\n # The result of a \"cursor.execute\" can be iterated over by row\r\n\tx=1\r\n\tfor row in conn.execute('SELECT * FROM users;'):\r\n\t\tsheet[\"A\"+str(x)] = str(row.name)\r\n\t\tsheet[\"B\"+str(x)] = str(row.number)\r\n\t\tsheet[\"c\"+str(x)] = str(row.code)\r\n\t\tx=x+1\r\n\tconn.close()\r\n\tworkbook.save(filename=\"db.xlsx\")\r\n\tres.configure(text = \"تم استخراج البيانات\")\r\n\r\ndef evaluate():\r\n\ttry:\r\n\t\tif variable.get() and int(entry2.get()):\r\n\t\t\t# The result of a \"cursor.execute\" can be iterated over by row\r\n\t\t\tconn = engine.connect()\r\n\t\t\tfor row in conn.execute('SELECT * FROM users;'):\r\n\t\t\t\tglobal x\r\n\t\t\t\tx = row[1] in variable.get()\r\n\t\t\tif x :\r\n\t\t\t\tqr = qrcode.QRCode(\r\n\r\n\t\t\t version=1,\r\n\t\t\t error_correction=qrcode.constants.ERROR_CORRECT_L,\r\n\t\t\t box_size=20,\r\n\t\t\t border=4,\r\n\t\t\t )\r\n\t\t\t\tnumber=int(entry2.get())+int(row[2])\r\n\t\t\t\tqr.add_data(\"اسم القطعة :\"+variable.get()+\"\\n عدد القطع :\"+str(number)+\"\\n الرمز:: \"+entry3.get())\r\n\t\t\t\tqr.make(fit=True)\r\n\t\t\t\tprint(row[0])\r\n\t\t\t\timg = qr.make_image(fill_color=\"black\", back_color=\"white\")\r\n\t\t\t\tstmt = users.update().\\\r\n\t\t\t\t values(number=number).\\\r\n\t\t\t\t where(users.c.id == row[0])\r\n\t\t\t\tconn.execute(stmt)\r\n\t\t\t\tconn.close()\r\n\t\t\t\tres.configure(text =\"تم ادخال البيانات\" )\r\n\t\t\t\timg.show()\r\n\t\t\t\twith open('QR_images/'+entry3.get()+'.png', 'wb') as f:\r\n\t\t\t\t\timg.save(f)\r\n\t\t\telse:\r\n\t\t\t\tqr = qrcode.QRCode(\r\n\r\n\t\t\t version=1,\r\n\t\t\t error_correction=qrcode.constants.ERROR_CORRECT_L,\r\n\t\t\t box_size=20,\r\n\t\t\t border=4,\r\n\t\t\t )\r\n\t\t\t\tqr.add_data(\"اسم القطعة :\"+str(variable.get())+\"\\n عدد القطع :\"+str(entry2.get()))\r\n\t\t\t\tqr.make(fit=True)\r\n\t\t\t\timg = qr.make_image(fill_color=\"black\", back_color=\"white\")\r\n\t\t\t\tprint(conn.execute(ins, { \"name\":str(variable.get()), \"number\": entry2.get() , \"code\": str(entry3.get())}))\r\n\t\t\t\tconn.close()\r\n\t\t\t\tres.configure(text =\"تم ادخال البيانات\" )\r\n\t\t\t\timg.show()\r\n\t\t\t\twith open('QR_images/'+entry3.get()+'.png', 'wb') as f:\r\n\t\t\t\t\timg.save(f)\r\n\texcept Exception as e:\r\n\t print(e)\r\n\t res.configure(text = \"تاكد من ادخال البيانات\")\r\n\r\ndef helloCallBack():\r\n\r\n\tconn = engine.connect()\r\n\r\n\t# The result of a \"cursor.execute\" can be iterated over by row\r\n\tf = open(\"db.txt\", \"w\")\r\n\tfor row in conn.execute('SELECT * FROM users;'):\r\n\t\tf.write(str(row)+\"\\n\")\r\n\tf.close()\r\n\r\n\t# Be sure to close the connection\r\n\tconn.close()\r\n\tres.configure(text = \"تم استخراج البيانات \")\r\n \r\nw = tk.Tk()\r\n#w.geometry(\"200x350\")\r\nw.title(\"كلية الحكمة الجامعة \")\r\ntk.Label(w, text=\"اسم المادة\").grid()\r\nvariable = tk.StringVar(w)\r\nvariable.set(\"حاسبات\" )\r\nr = tk.OptionMenu(w, variable,\"حاسبات\",\"طابعة\",\"راوتر\" )\r\nr.grid()\r\ntk.Label(w, text=\"العدد\").grid()\r\nentry2 = tk.Entry(w,width=30)\r\nentry2.bind(\"\", evaluate)\r\nentry2.grid()\r\ntk.Label(w, text=\"الرمز\").grid()\r\nentry3 = tk.Entry(w,width=30)\r\nentry3.bind(\"\", evaluate)\r\nentry3.grid()\r\ntk.Label(w, text=\" \").grid()\r\nB = tk.Button(w, text =\"ادخال\", command = evaluate)\r\nB.grid()\r\ntk.Label(w,text=\"\").grid()\r\no = tk.Button(w, text =\"Excel\", command = aa2)\r\no.grid()\r\ntk.Label(w,text=\"\").grid()\r\nc = tk.Button(w, text =\"Text\", command = helloCallBack)\r\nc.grid()\r\nres = tk.Label(w)\r\nres.grid()\r\nload = Image.open(\"b.png\")\r\nrender = ImageTk.PhotoImage(load)\r\nlabel1 = tk.Label(image=render)\r\nlabel1.image = render\r\nlabel1.grid()\r\n\r\nw.mainloop()","sub_path":"QR.py","file_name":"QR.py","file_ext":"py","file_size_in_byte":4251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"434997970","text":"import pandas as pd\nimport numpy as np\nimport datetime\nfrom sklearn.preprocessing import LabelEncoder, OneHotEncoder\n\n\nsales_per_day_col_no = 1\ncountries = ['Denmark','Columbia','Belgium','Argentina','England','Finland']\n################## PART-0: AUXILLARY FUNCTIONS #########################\n\ndef num_days_month(m,y):\n if m==1 or m==3 or m==5 or m==7 or m==8 or m==10 or m==12:\n return 31\n if m==4 or m==6 or m==9 or m==11:\n return 30\n if m==2 and y%4==0:\n return 29\n else:\n return 28\n\n\ndef startDate(year, week):\n d = str(year) + '-W' + str(week)\n r = datetime.datetime.strptime(d + '-0', \"%Y-W%W-%w\")\n r = str(r)\n start_date = int(r.split('-')[2].split(' ')[0])\n month = int(r.split('-')[1])\n yr = int(r.split('-')[0])\n return start_date, month,yr\n \n# Number of days in each week by month and year\ndaysByWeek = []\nfor y in range(2013,2018):\n for week in range(52):\n d_prev_week = -100\n d_week = -100\n ytemp = -100\n wtemp = -100\n mtemp = -100\n start_date,month,year = startDate(y,week)\n if start_date<=7:\n d_prev_week = start_date-1 \n if(week==0):\n wtemp = 53\n mtemp = 12\n ytemp = year-1\n else:\n wtemp = week - 1\n mtemp = month\n ytemp = year\n \n if(startDate(y,week)[0]>startDate(ytemp,wtemp)[0]):\n if(d_week==-100):\n d_week = 7\n else:\n d_prev_week = 7 - startDate(y,week)[0] + 1\n d_week = startDate(y,week)[0] - 1\n temp1 = [y,month, week+1, d_week]\n if(d_prev_week!=-100):\n if(month==1):\n month = 12\n ytemp = y-1\n week = 52\n else:\n month -= 1\n ytemp = y\n temp = [ytemp,month, week+1, d_prev_week]\n daysByWeek.append(temp)\n if(d_week!=0):\n daysByWeek.append(temp1)\n \n\ndef unique(list,index):\n unique_list = []\n for row in list:\n if row[index] not in unique_list:\n unique_list.append(row[index])\n return unique_list\n\n\n\n\n############## PART-1: PREPROCESSING #################################\n \n\n\n\n# HOLIDAYS #\n \nholidays_csv = pd.read_csv('holidays.csv', encoding = \"ISO-8859-1\")\ndataset = holidays_csv.iloc[:406,:3].values\n\ndef getHolidaysByCountry(country):\n holidaysByWeek = []\n for holiday in dataset:\n if(str(holiday[1])==country):\n h = []\n temp = str(holiday[0]).split(',')\n week_no=(datetime.date(int((temp[0])), int(temp[1]), int(temp[2])).isocalendar()[1])\n h_name = holiday[2]\n h.append(temp[0])\n h.append(week_no)\n h.append(h_name)\n holidaysByWeek.append(h)\n return np.array(holidaysByWeek)\n\nholiday = dataset[0]\nprint(holiday)\nA_holidays = getHolidaysByCountry(countries[0])\nB_holidays = getHolidaysByCountry(countries[1])\nC_holidays = getHolidaysByCountry(countries[2])\nD_holidays = getHolidaysByCountry(countries[3])\nE_holidays = getHolidaysByCountry(countries[4])\nF_holidays = getHolidaysByCountry(countries[5])\n\n\n########### ARGENTINA ################\n\nLE_A_holidays = LabelEncoder()\nA_holidays[:,2] = LE_A_holidays.fit_transform(A_holidays[:,2])\nA_oneHotEncoder = OneHotEncoder(categorical_features=[2])\nA_holidays = A_oneHotEncoder.fit_transform(A_holidays).toarray()\n\nprint(len(A_holidays[0]))\n\nAHolByWeek = []\nunique_A_2 = unique(A_holidays,-2)\nfor i in unique(A_holidays,-1):\n for year in unique(A_holidays,-2):\n temp = [0 for _ in range(len(A_holidays[0])-2)]\n add = 0\n for A_h in A_holidays:\n if(A_h[-1]==i and A_h[-2]==year):\n add = 1\n for j in range(len(A_holidays[0])-2):\n temp[j] += A_h[j]\n if(add==1):\n temp.append(year)\n temp.append(i)\n AHolByWeek.append(temp)\n \n\nAHolByTime = []\nfor i in range(1,len(daysByWeek)):\n for row in AHolByWeek:\n if((daysByWeek[i][0]==row[-2]) and (daysByWeek[i][2]==row[-1])):\n temp = [i]\n for j in range(len(row)-2):\n temp.append(row[j])\n AHolByTime.append(temp)\n\n\n# TRAIN #\n \ntrain_csv = pd.read_csv('yds_train2018.csv')\nA_train = []\nB_train = []\nC_train = []\nD_train = []\nE_train = []\nF_train = []\n\ntrain_df = train_csv.iloc[:,:].values\n\nfor row in train_df:\n if row[4]==countries[0]:\n A_train.append(row)\n elif row[4]==countries[1]:\n B_train.append(row)\n elif row[4]==countries[2]:\n C_train.append(row)\n elif row[4]==countries[3]:\n D_train.append(row)\n elif row[4]==countries[4]:\n E_train.append(row)\n elif row[4]==countries[5]:\n F_train.append(row)\n\nprint(A_train[0])\n \n\n\n# MAPPING YEAR and WEEK NUMBER TO TIMELINE INDECES #\n\nA_timeline = []\n#[timestamp, product ID, sales, number of days in that timestamp]\nfor i in range(1,len(daysByWeek)):\n for row in A_train:\n if((daysByWeek[i][0]==row[0]) and (daysByWeek[i][2]==row[2])):\n temp = [i,row[3],row[5],daysByWeek[i][3]]\n A_timeline.append(temp)\n\n\nA_compact_timeline = []\n# [timestamp, product ID, Total Sales, Number of Days]\nfor row in A_timeline:\n added = 0\n for i in range(len(A_compact_timeline)):\n if(row[0]==A_compact_timeline[i][0] and row[1]==A_compact_timeline[i][1]):\n A_compact_timeline[i][2] += int(row[2])\n added = 1\n if(added==0):\n A_compact_timeline.append(row)\n \n\n############### PROMOTIONS ################################\n\npromotions_csv = pd.read_csv('promotional_expense.csv')\n\npromo_df = promotions_csv.iloc[:,:5].values\n\ndef Time2Date(timestamp):\n return daysByWeek[timestamp][0], daysByWeek[timestamp][1], daysByWeek[timestamp][2]\n\ndef Date2Time(year, month):\n list=[]\n for i in range(1,len(daysByWeek)):\n if daysByWeek[i][0]==year and daysByWeek[i][1]==month:\n list.append(i)\n return list\n\npromoByTime = []\nfor row in promo_df:\n if row[2] == countries[0]:\n month = row[1]\n year = row[0]\n days = num_days_month(month,year)\n promoPerDay = row[4]/days\n pID = row[3]\n times = Date2Time(year,month)\n for time in times:\n for i in range(len(A_compact_timeline)):\n if A_compact_timeline[i][0] == time and A_compact_timeline[i][1]==row[3]:\n A_compact_timeline[i].append(promoPerDay)\n temp = [time,pID, promoPerDay]\n promoByTime.append(temp)\n break\n \nfor row in A_compact_timeline:\n if(len(row)==4):\n row.append(0)\n\n#Getting Sales per day\nfor i in range(len(A_compact_timeline)):\n A_compact_timeline[i][2] = A_compact_timeline[i][2] / A_compact_timeline[i][3]\n\nfor i in range(len(A_compact_timeline)):\n for hol in AHolByTime:\n if(A_compact_timeline[i][0]==hol[0]):\n for j in range(1,len(hol)):\n A_compact_timeline[i].append(hol[j])\n \n#-----------------------------------------------------------------\nn_features = 16\nfor row in A_compact_timeline:\n if(len(row) may be bad in some cases...?\n current = list(self)\n fields = self._fields(self.parent.name)\n primary = self._get_primary(self.parent.name)\n i = fields.index(primary)\n value[i] = current[i]\n return value\n\n def _handle_element(self, key, value):\n # add a check to ensure the data types match up?\n current = list(self)\n current[key] = value\n return current\n\n def __setitem__(self, key, value):\n if type(value) == list: # this needs some work. Too brittle.\n values = self._handle_list(value)\n elif type(value) == SQLRow:\n values = self._handle_list(list(value)) # cast it to list just to be safe. Check this.\n else:\n values = self._handle_element(key, value)\n super().__setitem__(key, values)\n self._update_row(self.parent.name, self.name, values)\n self._commit()\n\n def __getitem__(self, item):\n # anything here? could just delete... There was something in the old version.\n return super().__getitem__(item)","sub_path":"somnia/sql_row.py","file_name":"sql_row.py","file_ext":"py","file_size_in_byte":3204,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"170771178","text":"import tensorflow as tf\n\ndef func():\n a = tf.constant([[10,10],[11.,1.]])\n b = tf.constant([[1.,0.],[0.,1.]])\n c = tf.Variable(12.)\n d = tf.matmul(a, b) + c\n return d\n\nprint(func().numpy())\n\n","sub_path":"books/Machine Learning/CompanionFiles/code/appendixb-tf2/tf2_simple_function.py","file_name":"tf2_simple_function.py","file_ext":"py","file_size_in_byte":196,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"360931012","text":"import os\r\nimport cv2\r\nimport csv\r\nimport shutil\r\nimport numpy as np\r\nimport random as rnd\r\nimport cloudpickle as cpickle\r\nfrom sklearn.ensemble import RandomForestClassifier\r\n\r\nSIZE = 1024\r\nSTEP = 600\r\nLENGTH = 375000\r\nMARGIN = 50\r\nVAL = 127\r\nCOUNT = 6000\r\n\r\nboxes = [(175, 175), (175, 250), (200, 225), (200, 400), (175, 325),\r\n (225, 525), (100, 125), (250, 600), (250, 700), (325, 800)]\r\n\r\nPATH_TRUE = \"/kaggle/input/learning-images/learning/true/\"\r\nPATH_FALSE = \"/kaggle/input/learning-images/learning/false/\"\r\n\r\ndef transform(img):\r\n bad_data = np.array(img)\r\n x = len(bad_data)\r\n y = len(bad_data[0])\r\n bad_data = bad_data.reshape((x * y, 3))\r\n data = np.zeros(x * y, int)\r\n for i in range(x * y):\r\n data[i] = bad_data[i][0]\r\n return data\r\n\r\n\r\nclf = RandomForestClassifier(n_estimators=0, max_depth=7, warm_start=True, n_jobs=-1)\r\n\r\nkey_true = []\r\nkey_false = []\r\n\r\n\r\ndef expend(arr):\r\n data = np.full(LENGTH, VAL, int)\r\n got = LENGTH - len(arr)\r\n for i in range(got // 2 + 1, got // 2 + 1 + len(arr)):\r\n data[i] = arr[i - got // 2 - 1]\r\n return data\r\n\r\n\r\ndef fit():\r\n key = []\r\n value = []\r\n for i in range(STEP):\r\n key.append(expend(key_true.pop()))\r\n key.append(expend(key_false.pop()))\r\n value.append(1)\r\n value.append(0)\r\n\r\n print(\"I've got something new\")\r\n clf.n_estimators += STEP * 4\r\n clf.fit(key, value)\r\n print(\"I've learnt something new\")\r\n\r\n\r\ncnt_true = 0\r\ncnt_false = 0\r\n\r\nwith open(\"/kaggle/input/train-labels/stage_1_train_labels.csv\", \"r\") as file:\r\n reader = csv.reader(file)\r\n for row in reader:\r\n if row[5] == '1':\r\n if cnt_true == COUNT:\r\n continue\r\n cnt_true += 1\r\n top_x = int(row[1])\r\n top_y = int(row[2])\r\n width = int(row[3])\r\n height = int(row[4])\r\n img = cv2.imread(PATH_TRUE + row[0] + '.jpg')[top_y:top_y + height, top_x:top_x + width]\r\n key_true.append(transform(img))\r\n elif row[5] == '0':\r\n if cnt_false == COUNT:\r\n continue\r\n cnt_false += 1\r\n num = rnd.randint(0, len(boxes) - 1)\r\n width = boxes[num][0]\r\n height = boxes[num][1]\r\n top_x = rnd.randint(0, SIZE - width - MARGIN)\r\n top_y = rnd.randint(0, SIZE - height - MARGIN)\r\n img = cv2.imread(PATH_FALSE + row[0] + '.jpg')[top_y:top_y + height, top_x:top_x + width]\r\n key_false.append(transform(img))\r\n\r\n if (len(key_true) >= STEP) and (len(key_false) >= STEP):\r\n fit()\r\n if (cnt_false + cnt_true) % 100 == 0:\r\n print(str(cnt_false + cnt_true) + '/' + str(2 * COUNT))\r\n if cnt_true + cnt_false == 2 * COUNT:\r\n break\r\n\r\n\r\ncpickle.dump(clf, open(\"/kaggle/working/model.pickle\", \"wb\"))\r\n","sub_path":"Random Forest/fitting-a-model.py","file_name":"fitting-a-model.py","file_ext":"py","file_size_in_byte":2873,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"518017155","text":"# -*- coding: utf-8 -*-\nimport logging\nlog = logging.getLogger('core.admin')\nfrom django.contrib import admin\nfrom django.conf import settings\nfrom django.contrib.admin.sites import AlreadyRegistered, AdminSite\nfrom models import VisibleContentType, TextSnippet, AdminComment\nfrom forms import AdminCommentForm\nfrom cc.conf import coresettings\n#\n# set up the client admins\n#\nsimpleadmin = AdminSite()\nsimpleadmin.root_path = '/admin/'\n# TODO : simpleadmin.index_template=\"supersweetindex.html\"\nadvancedadmin = AdminSite()\nadvancedadmin.root_path = '/admin/'\nadmin.root_path = '/designcc/'\n\n#\n# local admin models\n#\nclass BaseAdmin(admin.ModelAdmin):\n actions = ['make_visible', 'make_hidden', ]\n date_hierarchy = 'created'\n save_on_top = True\n list_per_page = 20\n list_filter = ['created', 'modified', 'visible']\n fieldsets = (\n ('Misc Information', {\n 'fields' : ('created', 'modified', 'visible', 'order'),\n 'classes' : ('collapse',)\n }),\n )\n \n def make_visible(self, request, queryset):\n rows_updated = queryset.update(visible=1)\n if rows_updated == 1:\n message_bit = \"1 item was\"\n else:\n message_bit = \"%s items were\" % rows_updated\n self.message_user(request, \"%s successfully changed to visible.\" % message_bit)\n make_visible.short_description = \"Mark selected items as visible\"\n \n def make_hidden(self, request, queryset):\n rows_updated = queryset.update(visible=0)\n if rows_updated == 1:\n message_bit = \"1 item was\"\n else:\n message_bit = \"%s items were\" % rows_updated\n self.message_user(request, \"%s successfully changed to hidden\" % message_bit)\n make_hidden.short_description = \"Mark selected items as hidden\"\n \n @staticmethod\n def register(*args, **kwargs):\n # get the model class\n model = False\n try:\n model = args[0]\n except IndexError:\n raise Exception('BaseAdmin.register requires the model class it\\'s first argument')\n # get the admin class\n adminclass = False\n try:\n adminclass = args[1]\n except IndexError:\n pass\n # and finally register them\n try:\n if model.__name__ not in coresettings.ADMIN_IGNORES:\n if kwargs.get('simple', True):\n simpleadmin.register(model, adminclass)\n if kwargs.get('advanced', True):\n advancedadmin.register(model, adminclass)\n if kwargs.get('designcc', True):\n admin.site.register(model, adminclass)\n except admin.sites.AlreadyRegistered:\n pass\n \n\nclass VisibleContentTypeAdmin(admin.ModelAdmin):\n pass\n \n\nclass TextSnippetAdmin(BaseAdmin):\n list_display = list_display_links = ['name', 'value', 'created']\n fieldsets = (\n ('Essential Information', {\n 'fields' : ('name', 'value', 'created'),\n }),\n )\n\nclass TextSnippetCCAdmin(BaseAdmin):\n list_display = list_display_links = ['name', 'locked', 'varname', 'value']\n prepopulated_fields={'varname': ('name',)}\n fieldsets = (\n ('Essential Information', {\n 'fields' : ('name', 'value' ),\n }),\n ('Advanced', {\n 'fields' : ('varname', 'locked',),\n 'classes' : ('collapse',)\n }), \n ('Misc Information', {\n 'fields' : ('created', 'modified'),\n 'classes' : ('collapse',)\n }),\n )\n \n\nclass AdminCommentAdmin(admin.ModelAdmin):\n form = AdminCommentForm\n \n\n#BaseAdmin.register(Message, MessageAdmin, simple=False)\n#BaseAdmin.register(DontSendEntry, DontSendEntryAdmin, simple=False)\n#BaseAdmin.register(MessageLog, MessageLogAdmin, simple=False)\nBaseAdmin.register(TextSnippet, TextSnippetAdmin, simple=False, advanced=True)\nBaseAdmin.register(AdminComment, AdminCommentAdmin, simple=False, advanced=False)\nBaseAdmin.register(VisibleContentType, VisibleContentTypeAdmin, simple=False)\n\n","sub_path":"cc/core/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":4057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"600774739","text":"# Copyright 2015 David Hadka\n#\n# This file is part of the PRIM module.\n#\n# PRIM is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# PRIM is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with PRIM. If not, see .\n\nfrom __future__ import absolute_import, division\n\nimport copy\nimport logging\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\nimport matplotlib.gridspec as gridspec\nimport mpldatacursor\nfrom operator import itemgetter\nfrom matplotlib.widgets import Button\nfrom mpl_toolkits.axes_grid1 import host_subplot\nfrom prim.exceptions import PRIMError\nfrom prim import pairs_plotting\nfrom prim import scenario_discovery_util as sdutil\nfrom prim.plotting_util import make_legend\n\ntry:\n import mpld3\nexcept ImportError:\n logging.getLogger(__name__).info(\"mpld3 library not found, some functionality will be disabled\")\n global mpld3\n mpld3 = None\n\ndef _pair_wise_scatter(x,y, box_lim, restricted_dims, grid=None):\n ''' helper function for pair wise scatter plotting\n\n Parameters\n ----------\n x : numpy structured array\n the experiments\n y : numpy array\n the outcome of interest\n box_lim : numpy structured array\n a boxlim\n restricted_dims : list of strings\n list of uncertainties that define the boxlims\n\n '''\n\n restricted_dims = list(restricted_dims)\n combis = [(field1, field2) for field1 in restricted_dims\\\n for field2 in restricted_dims]\n\n if not grid:\n grid = gridspec.GridSpec(len(restricted_dims), len(restricted_dims))\n grid.update(wspace = 0.1,\n hspace = 0.1)\n figure = plt.figure()\n else:\n figure = plt.gcf()\n\n for field1, field2 in combis:\n i = restricted_dims.index(field1)\n j = restricted_dims.index(field2)\n ax = figure.add_subplot(grid[i,j])\n\n # scatter points\n for n in [0,1]:\n x_n = x[y==n]\n x_1 = x_n[field2]\n x_2 = x_n[field1]\n\n if field1 == field2 and not len(restricted_dims) == 1:\n ec = 'white'\n elif n == 0:\n ec = 'b'\n else:\n ec = 'r'\n\n ax.scatter(x_1, x_2, facecolor=ec, edgecolor=ec, s=10)\n\n ax.autoscale(tight=True)\n\n # draw boxlim\n if field1 != field2 or len(restricted_dims) == 1:\n x_1 = box_lim[field2]\n x_2 = box_lim[field1]\n\n for n in [0,1]:\n ax.plot(x_1,\n [x_2[n], x_2[n]], c='k', linewidth=3)\n ax.plot([x_1[n], x_1[n]],\n x_2, c='k', linewidth=3)\n\n# #reuse labeling function from pairs_plotting\n if len(restricted_dims) > 1:\n pairs_plotting.do_text_ticks_labels(ax, i, j, field1, field2, None,\n restricted_dims)\n\n return figure\n\nclass CurEntry(object):\n '''a descriptor for the current entry on the peeling and pasting\n trajectory'''\n\n def __init__(self, name):\n self.name = name\n\n def __get__(self, instance, owner):\n # print instance.peeling_trajectory[self.name]\n return instance.peeling_trajectory[self.name][instance._cur_box]\n\n def __set__(self, instance, value):\n raise PRIMError(\"this property cannot be assigned to\")\n\nclass PrimBox(object):\n '''A class that holds information over a specific box\n\n Attributes\n ----------\n coverage : float\n coverage of currently selected box\n density : float\n density of currently selected box\n mean : float\n mean of currently selected box\n res_dim : int\n number of restricted dimensions of currently selected box\n mass : float\n mass of currently selected box\n peeling_trajectory : pandas dataframe\n stats for each box in peeling trajectory\n box_lims : list\n list of box lims for each box in peeling trajectory\n\n\n by default, the currently selected box is the last box on the peeling\n trajectory, unless this is changed via :meth:`PrimBox.select`.\n\n '''\n\n coverage = CurEntry('coverage')\n density = CurEntry('density')\n mean = CurEntry('mean')\n res_dim = CurEntry('res dim')\n mass = CurEntry('mass')\n\n _frozen=False\n\n def __init__(self, prim, box_lims, indices):\n '''init\n\n Parameters\n ----------\n prim : Prim instance\n box_lims : recarray\n indices : ndarray\n\n\n '''\n\n self.prim = prim\n\n # peeling and pasting trajectory\n colums = ['coverage', 'density', 'mean', 'res dim', 'mass']\n self.peeling_trajectory = pd.DataFrame(columns=colums)\n\n self.box_lims = []\n self._cur_box = -1\n\n # indices van data in box\n self.update(box_lims, indices)\n\n def __getattr__(self, name):\n '''\n used here to give box_lim same behaviour as coverage, density, mean\n res_dim, and mass. That is, it will return the box lim associated with\n the currently selected box.\n '''\n\n if name=='box_lim':\n return self.box_lims[self._cur_box]\n else:\n raise AttributeError\n\n def inspect(self, i=None, style='table'):\n '''\n\n Write the stats and box limits of the user specified box to standard\n out. if i is not provided, the last box will be printed\n\n Parameters\n ----------\n i : int, optional\n the index of the box, defaults to currently selected box\n style : {'table', 'graph'}\n the style of the visualization\n\n '''\n if i == None:\n i = self._cur_box\n\n stats = self.peeling_trajectory.iloc[i].to_dict()\n stats['restricted_dim'] = stats['res dim']\n # print stats\n\n qp_values = self._calculate_quasi_p(i)\n uncs = [(key, value) for key, value in qp_values.items()]\n uncs.sort(key=itemgetter(1))\n uncs = [uncs[0] for uncs in uncs]\n\n if style == 'table':\n return self._inspect_table(i, uncs, qp_values)\n elif style == 'graph':\n return self._inspect_graph(i, uncs, qp_values)\n else:\n raise ValueError(\"style must be one of graph or table\")\n\n def _inspect_table(self, i, uncs, qp_values):\n '''Helper function for visualizing box statistics in\n table form'''\n #make the descriptive statistics for the box\n i = 19\n # print(self.peeling_trajectory.iloc[i])\n # print()\n\n # make the box definition\n columns = pd.MultiIndex.from_product([['box {}'.format(i)],\n ['min', 'max', 'qp values']])\n box_lim = pd.DataFrame(np.zeros((len(uncs), 3)),\n index=uncs,\n columns=columns)\n\n for unc in uncs:\n values = self.box_lims[i][unc][:]\n box_lim.loc[unc] = [values[0], values[1], qp_values[unc]]\n\n # print(box_lim)\n # print()\n\n\n\n def show_box_details(self, fig=None):\n i = self._cur_box\n qp_values = self._calculate_quasi_p(i)\n uncs = [(key, value) for key, value in qp_values.items()]\n uncs.sort(key=itemgetter(1))\n uncs = [uncs[0] for uncs in uncs]\n n = len(uncs)\n\n if fig is not None:\n plt.figure(fig.number)\n plt.clf()\n else:\n fig = plt.figure(figsize=(12, 6))\n\n\n\n outer_grid = gridspec.GridSpec(1, 2, wspace=0.1, hspace=0.1)\n\n ax0 = plt.Subplot(fig, outer_grid[0], frame_on=False)\n ax0.xaxis.set_visible(False)\n ax0.yaxis.set_visible(False)\n ax0.set_title(\"Box Coverage Plot\")\n fig.add_subplot(ax0)\n\n inner_grid = gridspec.GridSpecFromSubplotSpec(n, n,\n subplot_spec=outer_grid[0], wspace=0.1, hspace=0.1)\n\n self.show_pairs_scatter(grid=inner_grid)\n\n inner_grid = gridspec.GridSpecFromSubplotSpec(2, 1,\n subplot_spec=outer_grid[1], wspace=0.0, hspace=0.0)\n\n ax1 = plt.Subplot(fig, inner_grid[0])\n\n fig.add_subplot(ax1)\n self.show_box()\n ax1.set_title(\"Restricted Dimensions\", y=1.08)\n\n ax2 = plt.Subplot(fig, inner_grid[1], frame_on=False)\n ax2.xaxis.set_visible(False)\n ax2.yaxis.set_visible(False)\n fig.add_subplot(ax2)\n\n ax2.add_table(plt.table(cellText=[[\"Coverage\", \"%0.1f%%\" % (100*self.peeling_trajectory['coverage'][i])],\n [\"Density\", \"%0.1f%%\" % (100*self.peeling_trajectory['density'][i])],\n [\"Mass\", \"%0.1f%%\" % (100*self.peeling_trajectory['mass'][i])],\n [\"Res Dim\", \"%d\" % self.peeling_trajectory['res dim'][i]],\n [\"Mean\", \"%0.2f\" % self.peeling_trajectory['mean'][i]]],\n cellLoc='center',\n colWidths=[0.3, 0.7],\n loc='center'))\n ax2.set_title(\"Statistics\", y=0.7)\n\n def next(event):\n i = (self._cur_box + 1) % self.peeling_trajectory.shape[0]\n self.select(i)\n self.show_box_details(fig=event.canvas.figure)\n\n def prev(event):\n i = (self._cur_box - 1) % self.peeling_trajectory.shape[0]\n self.select(i)\n self.show_box_details(fig=event.canvas.figure)\n\n axprev = plt.axes([0.7, 0.05, 0.1, 0.075])\n axnext = plt.axes([0.81, 0.05, 0.1, 0.075])\n self.bnext = Button(axnext, \"Next\")\n self.bprev = Button(axprev, \"Prev\")\n self.bnext.on_clicked(next)\n self.bprev.on_clicked(prev)\n\n plt.subplots_adjust(top=0.85)\n plt.draw()\n\n return fig\n\n\n def show_box(self, ax=None):\n i = self._cur_box\n qp_values = self._calculate_quasi_p(i)\n uncs = [(key, value) for key, value in qp_values.items()]\n uncs.sort(key=itemgetter(1))\n uncs = [uncs[0] for uncs in uncs]\n\n box_lim_init = self.prim.box_init\n box_lim = self.box_lims[i]\n norm_box_lim = sdutil._normalize(box_lim, box_lim_init, uncs)\n\n left = []\n height = []\n bottom = []\n\n for i, _ in enumerate(uncs):\n left.append(i)\n height.append(norm_box_lim[i][1]-norm_box_lim[i][0])\n bottom.append(norm_box_lim[i][0])\n\n plt.bar(left,\n height,\n width = 0.6,\n bottom = bottom,\n align=\"center\")\n plt.ylim(0, 1)\n plt.xticks(left, uncs)\n plt.tick_params(axis='y',\n which='both',\n right='off',\n left='off',\n labelleft='off')\n\n fig = plt.gcf()\n ax = plt.gca()\n\n for i, _ in enumerate(uncs):\n ax.text(i - 0.15,\n norm_box_lim[i][0], \"%0.2f\" % norm_box_lim[i][0],\n horizontalalignment='center',\n verticalalignment='bottom',\n color='w')\n\n ax.text(i + 0.15,\n norm_box_lim[i][1], \"%0.2f\" % norm_box_lim[i][1],\n horizontalalignment='center',\n verticalalignment='top',\n color='w')\n\n return fig\n\n def _inspect_graph(self, i, uncs, qp_values):\n '''Helper function for visualizing box statistics in\n graph form'''\n\n # normalize the box lims\n # we don't need to show the last box, for this is the\n # box_init, which is visualized by a grey area in this\n # plot.\n box_lim_init = self.prim.box_init\n box_lim = self.box_lims[i]\n norm_box_lim = sdutil._normalize(box_lim, box_lim_init, uncs)\n\n fig, ax = sdutil._setup_figure(uncs)\n\n for j, u in enumerate(uncs):\n # we want to have the most restricted dimension\n # at the top of the figure\n xj = len(uncs) - j - 1\n\n self.prim._plot_unc(box_lim_init, xj, j, 0, norm_box_lim, box_lim,\n u, ax)\n\n # new part\n dtype = box_lim_init[u].dtype\n\n props = {'facecolor':'white',\n 'edgecolor':'white',\n 'alpha':0.25}\n y = xj\n\n\n if dtype == object:\n pass\n elements = sorted(list(box_lim_init[u][0]))\n max_value = (len(elements)-1)\n values = box_lim[u][0]\n x = [elements.index(entry) for entry in\n values]\n x = [entry/max_value for entry in x]\n\n for xi, label in zip(x, values):\n ax.text(xi, y-0.1, label, ha='center', va='center',\n bbox=props, color='blue', fontweight='normal')\n\n else:\n props = {'facecolor':'white',\n 'edgecolor':'white',\n 'alpha':0.25}\n\n # plot limit text labels\n x = norm_box_lim[j][0]\n\n if not np.allclose(x, 0):\n label = \"{: .2g}\".format(self.box_lims[i][u][0])\n ax.text(x-0.01, y, label, ha='right', va='center',\n bbox=props, color='blue', fontweight='normal')\n\n x = norm_box_lim[j][1]\n if not np.allclose(x, 1):\n label = \"{: .2g}\".format(self.box_lims[i][u][1])\n ax.text(x+0.01, y, label, ha='left', va='center',\n bbox=props, color='blue', fontweight='normal')\n\n # plot uncertainty space text labels\n x = 0\n label = \"{: .2g}\".format(box_lim_init[u][0])\n ax.text(x-0.01, y, label, ha='right', va='center',\n bbox=props, color='black', fontweight='normal')\n\n x = 1\n label = \"{: .2g}\".format(box_lim_init[u][1])\n ax.text(x+0.01, y, label, ha='left', va='center',\n bbox=props, color='black', fontweight='normal')\n\n # set y labels\n labels = [\"{} ({:.2g})\".format(u, qp_values[u]) for u in uncs]\n labels = labels[::-1]\n ax.set_yticklabels(labels)\n\n # remove x tick labels\n ax.set_xticklabels([])\n\n # add table to the left\n coverage = '{:.3g}'.format(self.peeling_trajectory['coverage'][i])\n density = '{:.3g}'.format(self.peeling_trajectory['density'][i])\n\n ax.table(cellText=[[coverage], [density]],\n colWidths = [0.1]*2,\n rowLabels=['coverage', 'density'],\n colLabels=None,\n loc='right',\n bbox=[1.1, 0.9, 0.1, 0.1])\n\n #plt.tight_layout()\n return fig\n\n def select(self, i):\n '''\n select an entry from the peeling and pasting trajectory and update\n the prim box to this selected box.\n\n Parameters\n ----------\n i : int\n the index of the box to select.\n\n '''\n if self._frozen:\n raise PRIMError(\"\"\"box has been frozen because PRIM has found\n at least one more recent box\"\"\")\n\n indices = sdutil._in_box(self.prim.x[self.prim.yi_remaining],\n self.box_lims[i])\n self.yi = self.prim.yi_remaining[indices]\n self._cur_box = i\n\n def drop_restriction(self, uncertainty):\n '''\n drop the restriction on the specified dimension. That is, replace\n the limits in the chosen box with a new box where for the specified\n uncertainty the limits of the initial box are being used. The resulting\n box is added to the peeling trajectory.\n\n Parameters\n ----------\n uncertainty : str\n\n '''\n\n new_box_lim = copy.deepcopy(self.box_lim)\n new_box_lim[uncertainty][:] = self.box_lims[0][uncertainty][:]\n indices = sdutil._in_box(self.prim.x[self.prim.yi_remaining],\n new_box_lim)\n indices = self.prim.yi_remaining[indices]\n self.update(new_box_lim, indices)\n\n def update(self, box_lims, indices):\n '''\n\n update the box to the provided box limits.\n\n Parameters\n ----------\n box_lims: numpy recarray\n the new box_lims\n indices: ndarray\n the indices of y that are inside the box\n\n '''\n self.yi = indices\n\n y = self.prim.y[self.yi]\n\n self.box_lims.append(box_lims)\n\n coi = self.prim.determine_coi(self.yi)\n\n data = {'coverage':coi/self.prim.t_coi,\n 'density':coi/y.shape[0],\n 'mean':np.mean(y),\n 'res dim':sdutil._determine_nr_restricted_dims(self.box_lims[-1],\n self.prim.box_init),\n 'mass':y.shape[0]/self.prim.n}\n new_row = pd.DataFrame([data])\n self.peeling_trajectory = self.peeling_trajectory.append(new_row,\n ignore_index=True)\n\n self._cur_box = len(self.peeling_trajectory)-1\n\n def show_ppt(self):\n '''show the peeling and pasting trajectory in a figure'''\n\n ax = host_subplot(111)\n ax.set_xlabel(\"peeling and pasting trajectory\")\n\n par = ax.twinx()\n par.set_ylabel(\"nr. restricted dimensions\")\n\n ax.plot(self.peeling_trajectory['mean'], label=\"mean\")\n ax.plot(self.peeling_trajectory['mass'], label=\"mass\")\n ax.plot(self.peeling_trajectory['coverage'], label=\"coverage\")\n ax.plot(self.peeling_trajectory['density'], label=\"density\")\n par.plot(self.peeling_trajectory['res dim'], label=\"restricted dims\")\n ax.grid(True, which='both')\n ax.set_ylim(ymin=0,ymax=1)\n\n fig = plt.gcf()\n\n make_legend(['mean', 'mass', 'coverage', 'density', 'restricted_dim'],\n ax, ncol=5, alpha=1)\n return fig\n\n def formatter(self, **kwargs):\n i = kwargs.get(\"ind\")[0]\n data = self.peeling_trajectory.ix[i]\n label = \"Box %d\\nCoverage: %2.1f%%\\nDensity: %2.1f%%\\nMass: %2.1f%%\\nRes Dim: %d\" % (i, 100*data[\"coverage\"], 100*data[\"density\"], 100*data[\"mass\"], data[\"res dim\"])\n return label\n\n def handle_click(self, event):\n #if event.mouseevent.dblclick:\n i = event.ind[0]\n self.select(i)\n\n if event.mouseevent.button == 1:\n self.show_box_details().show()\n\n def show_tradeoff(self):\n '''Visualize the trade off between coverage and density. Color is used\n to denote the number of restricted dimensions.'''\n\n fig = plt.figure()\n ax = fig.add_subplot(111, aspect='equal')\n\n cmap = mpl.cm.YlGnBu_r #@UndefinedVariable\n boundaries = np.arange(-0.5,\n max(self.peeling_trajectory['res dim'])+1.5,\n step=1)\n ncolors = cmap.N\n norm = mpl.colors.BoundaryNorm(boundaries, ncolors)\n\n p = ax.scatter(self.peeling_trajectory['coverage'],\n self.peeling_trajectory['density'],\n c=self.peeling_trajectory['res dim'],\n norm=norm,\n cmap=cmap,\n picker=True)\n\n ax.set_ylabel('density')\n ax.set_xlabel('coverage')\n ax.set_ylim(ymin=0, ymax=1.2)\n ax.set_xlim(xmin=0, xmax=1.2)\n\n mpldatacursor.datacursor(formatter=self.formatter, hover=True)\n fig.canvas.mpl_connect('pick_event', self.handle_click)\n\n ticklocs = np.arange(0,\n max(self.peeling_trajectory['res dim'])+1,\n step=1)\n cb = fig.colorbar(p, spacing='uniform', ticks=ticklocs, drawedges=True)\n cb.set_label(\"nr. of restricted dimensions\")\n\n # make the tooltip tables\n if mpld3:\n # Define some CSS to control our custom labels\n css = \"\"\"\n table\n {\n border-collapse: collapse;\n }\n th\n {\n background-color: rgba(255,255,255,0.95);\n }\n td\n {\n background-color: rgba(255,255,255,0.95);\n }\n table, th, td\n {\n font-family:Tahoma, Tahoma, sans-serif;\n font-size: 16px;\n border: 1px solid black;\n text-align: right;\n }\n \"\"\"\n\n labels = []\n columns_to_include = ['coverage','density', 'mass', 'res dim']\n frmt = lambda x: '{:.2f}'.format( x )\n for i in range(len(self.peeling_trajectory['coverage'])):\n label = self.peeling_trajectory.ix[[i], columns_to_include]\n label.columns = [\"Coverage\", \"Density\", \"Mass\", \"Res. Dim.\"]\n label = label.T\n label.columns = [\"Box {0}\".format(i)]\n labels.append(str(label.to_html(float_format=frmt)))\n\n tooltip = mpld3.plugins.PointHTMLTooltip(p, labels, voffset=10,\n hoffset=10, css=css)\n mpld3.plugins.connect(fig, tooltip)\n\n return fig\n\n def show_pairs_scatter(self, grid=None):\n '''\n\n make a pair wise scatter plot of all the restricted dimensions\n with color denoting whether a given point is of interest or not\n and the boxlims superimposed on top.\n\n '''\n fig = _pair_wise_scatter(self.prim.x[self.prim.yi_remaining], self.prim.y[self.prim.yi_remaining], self.box_lim,\n sdutil._determine_restricted_dims(self.box_lim,\n self.prim.box_init),\n grid = grid)\n\n title = \"Box %d\" % self._cur_box\n fig.suptitle(title, fontsize=16)\n fig.canvas.set_window_title(title)\n return fig\n\n def write_ppt_to_stdout(self):\n '''write the peeling and pasting trajectory to stdout'''\n # print(self.peeling_trajectory)\n # print(\"\\n\")\n\n def _calculate_quasi_p(self, i):\n '''helper function for calculating quasi-p values as discussed in\n Bryant and Lempert (2010). This is a one sided binomial test.\n\n Parameters\n ----------\n i : int\n the specific box in the peeling trajectory for which the quasi-p\n values are to be calculated.\n\n '''\n from scipy.stats import binom\n\n box_lim = self.box_lims[i]\n restricted_dims = list(sdutil._determine_restricted_dims(box_lim,\n self.prim.box_init))\n # print restricted_dims\n\n # total nr. of cases in box\n Tbox = self.peeling_trajectory['mass'][i] * self.prim.n\n\n # total nr. of cases of interest in box\n Hbox = self.peeling_trajectory['coverage'][i] * self.prim.t_coi\n\n qp_values = {}\n\n for u in restricted_dims:\n temp_box = copy.deepcopy(box_lim)\n temp_box[u] = self.box_lims[0][u]\n\n indices = sdutil._in_box(self.prim.x[self.prim.yi_remaining],\n temp_box)\n indices = self.prim.yi_remaining[indices]\n\n # total nr. of cases in box with one restriction removed\n Tj = indices.shape[0]\n\n # total nr. of cases of interest in box with one restriction\n # removed\n Hj = np.sum(self.prim.y[indices])\n\n p = Hj/Tj\n\n Hbox = int(Hbox)\n Tbox = int(Tbox)\n\n qp = binom.sf(Hbox-1, Tbox, p)\n qp_values[u] = qp\n\n return qp_values\n\n def _format_stats(self, nr, stats):\n '''helper function for formating box stats'''\n row = self.stats_format.format(nr,**stats)\n return row\n","sub_path":"prim/prim_box.py","file_name":"prim_box.py","file_ext":"py","file_size_in_byte":24706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"35995228","text":"import struct\nfrom collections import namedtuple # es para nombrar cada elemento de un array \nfrom obj import Obj, Texture\nimport random\n# esto lo hace mas legible\nV2 = namedtuple('Point2D', ['x', 'y'])\nV3 = namedtuple('Point3D', ['x', 'y', 'z'])\n\n\ndef char(c):\n return struct.pack('=c', c.encode('ascii'))\n\ndef word(w):\n # short\n return struct.pack('=h', w)\n\ndef dword(w):\n # long\n return struct.pack('=l', w)\n\n\n\ndef color(r, g, b):\n return bytes([b, g, r])\n\n\nBLACK = color(0, 0, 0)\nWHITE = color(255, 255, 255)\n\n\n# este bounding box va a recibir los 3 parametros A,B,C\ndef bbox(A, B, C):\n xs = [A.x, B.x, C.x]\n xs.sort()\n ys = [A.y, B.y, C.y]\n ys.sort()\n # se utiliza -1 para regresar al ulitmo valor del array\n return V2(xs[0], ys[0]), V2(xs[-1], ys[-1])\n\n\ndef cross(v0, v1):\n # el producto cruz entre 3 vectores se calcula\n cx = v0.y * v1.z - v0.z * v1.y\n cy = v0.z * v1.x - v0.x * v1.z\n cz = v0.x * v1.y - v0.y * v1.x\n return V3(cx, cy, cz)\n\ndef barycentric(A, B, C, P):\n # calcular producto cruz entre dos vectores para calcular las 3 variables.\n cy, cx, cz = cross(\n V3(C.x - A.x, B.x - A.x, A.x - P.x), \n V3(C.y - A.y, B.y - A.y, A.y - P.y)\n )\n\n if abs(cz) < 1:\n return -1, -1, -1 # con esto se evita la division entre 0\n\n # para forzar a que uno sea 1 hay que dividirlos a todos entre cz\n w = 1 - (cy + cx) / cz\n v = cx / cz\n u = cy / cz # siempre que aparezca una división, hay una posibilidad que cz de 0. Esto significa que el triangulo es solo una linea\n\n # si ya tenemos herramienta, modulo que se va a priorizar sobretoido el valor de cleinte ubicar que clase o metodos hay que trabajar primero. se tiene que considerar refactorizarlo \n # que framework de pruebas se van a utilizar.\n\n return w, v, u\n\n\ndef sub(v0, v1):\n return V3(\n v0.x - v1.x,\n v0.y - v1.y,\n v0.z - v1.z,\n )\ndef length(v0):\n return(v0.x**2 + v0.y**2 +v0.z**2) ** 0.5\n\ndef norm(v0):\n l = length(v0)\n if l ==0:\n return V3(0,0,0)\n\n return V3(\n v0.x / l,\n v0.y / l,\n v0.z / l\n )\n\ndef dot(v0, v1):\n return v0.x * v1.x + v0.y * v1.y + v0.z * v1.z\n\nclass Renderer(object):\n def glinit():\n r = Renderer(1024, 768)\n\n def __init__(self, width, height):\n self.width = width\n self.height = height\n \n\n # Esta variable le da color al punto\n self.current_color = WHITE\n\n self.clear()\n\n def clear(self):\n self.framebuffer = [\n [BLACK for x in range(self.width)]\n for y in range(self.height)\n ]\n\n # hay que hacer un calculo en todos los pixeles, para ver cual corresponde en su coordenada en z\n\n\n self.zbuffer = [\n [-99999 for x in range(self.width)]\n for y in range(self.height)\n ]\n \n def write(self, filename):\n f = open(filename, 'bw')\n\n # File header (14)\n f.write(char('B'))\n f.write(char('M'))\n f.write(dword(14 + 40 + 3*(self.width*self.height)))\n f.write(dword(0))\n f.write(dword(14 + 40))\n\n # Info header (40)\n f.write(dword(40))\n f.write(dword(self.width))\n f.write(dword(self.height))\n f.write(word(1))\n f.write(word(24))\n f.write(dword(0))\n f.write(dword(3*(self.width*self.height)))\n f.write(dword(0))\n f.write(dword(0))\n f.write(dword(0))\n f.write(dword(0))\n\n # Bitmap (se recorre el framebuffer completo, para meter los bytes en un array de 4)\n for y in range(self.height):\n for x in range(self.width):\n try:\n f.write(self.framebuffer[y][x])\n except:\n pass\n f.close()\n\n\n \n\n def render(self):\n self.write('a.bmp')\n\n # Se agregara un punto \n def point(self, x, y, color = None):\n try:\n\n self.framebuffer[y][x] = color or self.current_color\n except:\n pass\n\n# en vez de utilizar textura, el color sera producto de la funcion shader\n def shader(self, A, B, C): # se utilizan los parametros para crear una condicionales\n # if A.y > 100 and A.y < 200: # se puede utiliza\n # return color(200, 50, 200)\n # # si mi A es mayor a 200 se regresa un color \n # elif A.y > 200:\n # return color(255, 0, 200)\n\n # BUSCAR HOW TO INSIDE CIRCLE PYTHON!!!\n if A.x > (300 + random.randint(0, 50)): # para difuminar\n return color(255, 0, 200)\n else: \n return color(200, 0, 255)\n \n # este es un triangle wirefram\n def triangle_wireframe(self, A, B, C):\n self.line(A, B)\n self.line(B, C)\n self.line(C, A)\n#PARA EL LAB DE TIERRA SE IGNORA LA TEXTURA\n # funcion que recibe 3 vertices y dibuja un triangulo\n def triangle(self, A, B, C, color1= None, textureC=None, intensity=1):\n bbox_min, bbox_max = bbox(A, B, C)\n\n # encontrar el rectangulo mas pequeño\n # se va marcando un punto\n for x in range(bbox_min.x, bbox_max.x + 1):\n for y in range(bbox_min.y, bbox_max.y + 1):\n # se toman las 3 coordenadas de triangulo y el punto P que es (x, y)\n P = V2(x,y)\n w, v, u = barycentric(A, B, C, P)\n # si alguna de las 3 es negativa quiere decir que esta afuera del triangulo\n if w < 0 or v < 0 or u < 0:\n continue # todo lo que este despues de continue no se va a ejecutar\n \n # solo si se tiene una textura \n if self.texture:\n vtA, vtB, vtC = textureC\n # se va interpolar un triangulo dentro de otro\n tx = vtA.x * w + vtB.x * v + vtC.x * u #estas son las coordenadas que corresponden a x, y de este triangulo\n ty = vtA.y * w + vtB.y * v + vtC.y * u\n\n temp_color = self.texture.get_color(tx, ty)\n b, g, r = [round(c * intensity) if intensity > 0 else 0 for c in temp_color]\n color1 = self.shader(A, B, C)\n \n # esto es para sacar colores del archivo de textura\n\n z = A.z * w + B.z * v + C.z * u # SEGUIR ACA!\n if x < 0 or y < 0:\n continue\n # PARA EL LAB 2 se deberia rendizar cada punto que se pinta en la escena \n if x < len(self.zbuffer) and y < len(self.zbuffer[x]) and z > self.zbuffer[x][y]:\n self.point(x, y, color1)\n self.zbuffer[x][y] = z\n\n # if x < len(self.zbuffer) and y < len(self.zbuffer[x]) and z > self.zbuffer[x][y]:\n # self.point(x, y, color)\n # self.zbuffer[x][y] = z\n\n # esta es una funcion que reciba un vertice como parametro que se transforma en X y Y\n def transform(self, v, translate=(0, 0, 0), scale=(1, 1, 1)):\n \n return V3(\n round((v[0] + translate[0]) * scale[0]),\n round((v[1] + translate[1]) * scale[1]),\n round((v[2] + translate[2]) * scale[2])\n )\n\n \n # --------------- LINE ---------------\n\n # ALGORITMO DE DIXTRA, VER BRESENHAM ALGORITHM \n \n def line(self, A, B, color=None):\n x0 = A.x\n x1 = B.x\n y0 = A.y\n y1 = B.y\n\n\n dy = abs(y1 - y0)\n dx = abs(x1 - x0)\n # en el caso que no funcione la linea, steep hace que se cambien los valores de y a x. Le da la vuelta\n steep = dy > dx\n if steep:\n x0, y0 = y0, x0\n x1, y1 = y1, x1\n\n # esto es para voltearlo\n if x0 > x1:\n x0, x1 = x1, x0\n y0, y1 = y1, y0\n\n # ahora se reemplatea la pendiente.\n dy = abs(y1 - y0)\n dx = abs(x1 - x0)\n \n offset = 0 # offset = 0 * 2 * dx\n \n threshold = dx # threshold = 0.5 * 2 * dx\n \n y = y0\n points = []\n for x in range(int(x0), int(x1) + 1):\n #se agrega esto para que este en la direccion correcta\n if steep:\n points.append((y, x, color))\n else:\n points.append((x, y, color))\n\n\n offset += 2 * dy # offset += (dy/dx) * 2 * dx\n \n if offset >= threshold:\n y += 1 if y0 < y1 else -1\n threshold += 2 * dx # threshold += 1 * 2 * dx\n\n for point in points:\n r.point(*point)\n\n # --------------- LINE ---------------\n\n # Esta funcion es para cargar y renderizar obj\n def load(self, filename, translate=(0, 0, 0), scale=(1, 1, 1)): # ahora a load se le pasa que tanto se quiere que se mueva para los lados y abajo\n # al load se le agregara una textura para cargarla en el modelo \n \n model = Obj(filename)\n light = V3(0, 0, 1)\n # se tienen que recorrer las caras, agarrar cada uno de los indices y pintar cada vertice\n \n for face in model.faces:\n # para saber si hace triangulos o cuadrados\n vcount = len(face)\n\n if vcount == 3:\n # se agarra el array 0 en la posicion 0\n f1 = face[0][0] -1\n f2 = face[1][0] -1\n f3 = face[2][0] -1 \n\n A = self.transform(model.vertex[f1], translate, scale)\n B = self.transform(model.vertex[f2], translate, scale)\n C = self.transform(model.vertex[f3], translate, scale)\n\n # normalizar un vector u=v/|v|\n normal = norm(cross(\n sub(B, A),\n sub(C, A)\n ))\n\n intensity = dot(normal, light)\n\n # si no se tiene textura, se colorcara flat shading\n if not self.texture:\n # en este caso tendra un 1 si esta en frente\n # tendra 0 si esta de lado \n\n grey = round(250 * intensity)\n\n if grey < 0:\n continue # si la intensidad es menor a 0, el loop termina y procede a la siguiente\n # esto es para que tenga colorcito\n # self.triangle(A, B, C, color(random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)))\n\n self.triangle(A, B, C, color(grey, grey, grey))\n # se necesita poder tener un triangulo que tenga el color que viene en la textura\n else:\n f1 = face[0][1] - 1\n f2 = face[1][1] - 1\n f3 = face[2][1] - 1\n # asi se sacan los vertices de textura\n \n vtA = V3(*model.tvertex[f1])\n vtB = V3(*model.tvertex[f2])\n vtC = V3(* model.tvertex[f3])\n\n # r.line(vtA, vtB)\n # r.line(vtB, vtC)\n # r.line(vtC, vtA)\n\n self.triangle(A, B, C, textureC = (vtA, vtB, vtC), intensity=intensity)\n\n elif vcount == 4: # para cuadrados\n # se agarra el array 0 en la posicion 0\n f1 = face[0][0] - 1\n f2 = face[1][0] - 1\n f3 = face[2][0] - 1\n f4 = face[3][0] - 1\n\n A = self.transform(model.vertex[f1], translate, scale)\n B = self.transform(model.vertex[f2], translate, scale)\n C = self.transform(model.vertex[f3], translate, scale)\n D = self.transform(model.vertex[f4], translate, scale)\n normal = norm(cross(\n sub(B, A),\n sub(C, A)\n ))\n intensity = dot(normal, light)\n grey = round(255 * intensity)\n\n if not self.texture:\n\n grey = round(250 * intensity)\n if grey < 0:\n continue\n self.triangle(A, B, C, color(grey, grey, grey))\n self.triangle(A, C, D, color(grey, grey, grey))\n else:\n f1 = face[0][1] - 1\n f2 = face[1][1] - 1\n f3 = face[2][1] - 1\n f4 = face[3][1] - 1\n\n vtA = V3(*model.tvertex[f1])\n vtB = V3(*model.tvertex[f2])\n vtC = V3(*model.tvertex[f3])\n vtD = V3(*model.tvertex[f4])\n\n # r.line(vtA, vtB)\n # r.line(vtB, vtC)\n # r.line(vtC, vtA)\n\n # r.line(vtA, vtC)\n # r.line(vtC, vtD)\n # r.line(vtD, vtA)\n self.triangle(A, B, C,textureC = (vtA, vtB, vtC), intensity=intensity)\n self.triangle(A, C, D,textureC = (vtA, vtC, vtD), intensity=intensity)\n\n \n\nr = Renderer(800, 600)\n# t = Texture('./textures/earth.bmp')\n# r.texture = t # se tiene que sacar la textura antes del modelo\nr.texture = None\n# r.load('./models/earth1.obj', (800, 600, 0), (0.5, 0.5, 1))\nr.load('./models/model.obj', (1, 1, 1), (300, 300, 300))\n\nr.write('a.bmp')\n","sub_path":"23-08/gl.py","file_name":"gl.py","file_ext":"py","file_size_in_byte":13295,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"326424598","text":"from __future__ import print_function\nimport lsst.sims.maf.slicers as slicers\nimport lsst.sims.maf.db as db\n\n# Connect to opsim\ndbAddress = 'sqlite:///ops1_1140_sqlite.db'\noo = db.OpsimDatabase(dbAddress)\ncolnames = ['expMJD', 'fieldRA', 'fieldDec']\nsqlconstraint ='filter=\"r\"'\n# Get opsim simulation data\nsimdata = oo.fetchMetricData(colnames, sqlconstraint)\n# Init the slicer, set 2 points\nslicer = slicers.UserPointsSlicer(ra=[0., .1], dec=[0., -.1])\n# Setup slicer (builds kdTree)\nslicer.setupSlicer(simdata)\n# Slice Point for index zero\nind = slicer._sliceSimData(0)\nexpMJDs = simdata[ind['idxs']]['expMJD']\nprint('mjd for the 1st user defined point', expMJDs)\n# Find the expMJDs for the 2nd point\nind = slicer._sliceSimData(1)\nexpMJDs = simdata[ind['idxs']]['expMJD']\nprint('mjd for the 2nd user defined point', expMJDs)\n","sub_path":"examples/pythonScripts/example_getPoints.py","file_name":"example_getPoints.py","file_ext":"py","file_size_in_byte":827,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"583991994","text":"\"\"\"Functional Data Boxplot Module.\n\nThis module contains the classes to construct the functional data boxplot and\nvisualize it.\n\n\"\"\"\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport math\n\nimport numpy as np\n\nfrom skfda.exploratory.depth import modified_band_depth\nfrom ... import FDataGrid\nfrom io import BytesIO\nfrom abc import ABC, abstractmethod\n\n__author__ = \"Amanda Hernando Bernabé\"\n__email__ = \"amanda.hernando@estudiante.uam.es\"\n\nclass FDataBoxplot(ABC):\n \"\"\"Abstract class inherited by the Boxplot and SurfaceBoxplot classes.\n\n It the data of the functional boxplot or surface boxplot of a FDataGrid object,\n depending on the dimensions of the domain, 1 or 2 respectively.\n\n It forces to both classes, Boxplot and SurfaceBoxplot to conain at least the median,\n central and outlying envelopes and a colormap for their graphical representation,\n obtained calling the plot method.\n\n \"\"\"\n @abstractmethod\n def __init__(self, factor=1.5):\n if factor < 0:\n raise ValueError(\n \"The number used to calculate the outlying envelope must be positive.\")\n self._factor = factor\n\n @property\n def factor(self):\n return self._factor\n\n @property\n def fdatagrid(self):\n pass\n\n @property\n def median(self):\n pass\n\n @property\n def central_envelope(self):\n pass\n\n @property\n def outlying_envelope(self):\n pass\n\n @property\n def colormap(self):\n return self._colormap\n\n @colormap.setter\n def colormap(self, value):\n if not isinstance(value, matplotlib.colors.LinearSegmentedColormap):\n raise ValueError(\n \"colormap must be of type matplotlib.colors.LinearSegmentedColormap\")\n self._colormap = value\n\n @abstractmethod\n def plot(self, fig=None, ax=None, nrows=None, ncols=None):\n pass\n\n def _repr_svg_(self):\n plt.figure()\n fig, _ = self.plot()\n output = BytesIO()\n fig.savefig(output, format='svg')\n data = output.getvalue()\n plt.close(fig)\n return data.decode('utf-8')\n\n\nclass Boxplot(FDataBoxplot):\n r\"\"\"Representation of the functional boxplot.\n\n Class implementing the functionl boxplot which is an informative exploratory\n tool for visualizing functional data, as well as its generalization, the\n enhanced functional boxplot. Only supports 1 dimensional domain functional data.\n\n Based on the center outward ordering induced by a :ref:`depth measure `\n for functional data, the descriptive statistics of a functional boxplot are: the\n envelope of the 50% central region, the median curve,and the maximum non-outlying envelope.\n In addition, outliers can be detected in a functional boxplot by the 1.5 times the 50%\n central region empirical rule, analogous to the rule for classical boxplots.\n\n Attributes:\n fdatagrid (FDataGrid): Object containing the data.\n median (array, (fdatagrid.ndim_image, nsample_points)): contains\n the median/s.\n central_envelope (array, (fdatagrid.ndim_image, 2, nsample_points)):\n contains the central envelope/s.\n outlying_envelope (array, (fdatagrid.ndim_image, 2, nsample_points)):\n contains the outlying envelope/s.\n colormap (matplotlib.colors.LinearSegmentedColormap): Colormap from\n which the colors to represent the central regions are selected.\n central_regions (array, (fdatagrid.ndim_image * ncentral_regions, 2,\n nsample_points)): contains the central regions.\n outliers (array, (fdatagrid.ndim_image, fdatagrid.nsamples)):\n contains the outliers\n barcol (string): Color of the envelopes and vertical lines.\n outliercol (string): Color of the ouliers.\n mediancol (string): Color of the median.\n show_full_outliers (boolean): If False (the default) then only the part\n outside the box is plotted. If True, complete outling curves are plotted\n\n Example:\n Function :math:`f : \\mathbb{R}\\longmapsto\\mathbb{R}`.\n\n >>> data_matrix = [[1, 1, 2, 3, 2.5, 2], [0.5, 0.5, 1, 2, 1.5, 1], [-1, -1, -0.5, 1, 1, 0.5],\n ... [-0.5, -0.5, -0.5, -1, -1, -1]]\n >>> sample_points = [0, 2, 4, 6, 8, 10]\n >>> fd = FDataGrid(data_matrix, sample_points, dataset_label=\"dataset\", axes_labels=[\"x_label\", \"y_label\"])\n >>> Boxplot(fd)\n Boxplot(\n FDataGrid=FDataGrid(\n array([[[ 1. ],\n [ 1. ],\n [ 2. ],\n [ 3. ],\n [ 2.5],\n [ 2. ]],\n \n [[ 0.5],\n [ 0.5],\n [ 1. ],\n [ 2. ],\n [ 1.5],\n [ 1. ]],\n \n [[-1. ],\n [-1. ],\n [-0.5],\n [ 1. ],\n [ 1. ],\n [ 0.5]],\n \n [[-0.5],\n [-0.5],\n [-0.5],\n [-1. ],\n [-1. ],\n [-1. ]]]),\n sample_points=[array([ 0, 2, 4, 6, 8, 10])],\n domain_range=array([[ 0, 10]]),\n dataset_label='dataset',\n axes_labels=['x_label', 'y_label'],\n extrapolation=None,\n interpolator=SplineInterpolator(interpolation_order=1, smoothness_parameter=0.0, monotone=False),\n keepdims=False),\n median=array([[ 0.5, 0.5, 1. , 2. , 1.5, 1. ]]),\n central envelope=array([[[ 0.5, 0.5, 1. , 2. , 1.5, 1. ],\n [-1. , -1. , -0.5, 1. , 1. , 0.5]]]),\n outlying envelope=array([[[ 1. , 1. , 2. , 3. , 2.25, 1.75],\n [-1. , -1. , -0.5 , -0.5 , 0.25, -0.25]]]),\n central_regions=array([[[ 0.5, 0.5, 1. , 2. , 1.5, 1. ],\n [-1. , -1. , -0.5, 1. , 1. , 0.5]]]),\n outliers=array([[ 1., 0., 0., 1.]]))\n\n \"\"\"\n\n def __init__(self, fdatagrid, method=modified_band_depth, prob=[0.5],\n factor=1.5):\n \"\"\"Initialization of the Boxplot class.\n\n Args:\n fdatagrid (FDataGrid): Object containing the data.\n method (:ref:`depth measure `, optional): Method\n used to order the data. Defaults to :func:`modified band depth\n `.\n prob (list of float, optional): List with float numbers (in the range\n from 1 to 0) that indicate which central regions to represent.\n Defaults to [0.5] which represents the 50% central region.\n factor (double): Number used to calculate the outlying envelope.\n\n \"\"\"\n FDataBoxplot.__init__(self, factor)\n\n if fdatagrid.ndim_domain != 1:\n raise ValueError(\n \"Function only supports FDataGrid with domain dimension 1.\")\n\n if sorted(prob, reverse=True) != prob:\n raise ValueError(\n \"Probabilities required to be in descending order.\")\n\n if min(prob) < 0 or max(prob) > 1:\n raise ValueError(\"Probabilities must be between 0 and 1.\")\n\n nsample_points = len(fdatagrid.sample_points[0])\n ncentral_regions = len(prob)\n\n self._median = np.ndarray((fdatagrid.ndim_image, nsample_points))\n self._central_envelope = np.ndarray((fdatagrid.ndim_image, 2,\n nsample_points))\n self._outlying_envelope = np.ndarray((fdatagrid.ndim_image, 2,\n nsample_points))\n self._central_regions = np.ndarray(\n (fdatagrid.ndim_image * ncentral_regions,\n 2, nsample_points))\n self._outliers = np.zeros((fdatagrid.ndim_image, fdatagrid.nsamples))\n\n depth = method(fdatagrid)\n indices_descencing_depth = (-depth).argsort(axis=0)\n\n for m in range(fdatagrid.ndim_image):\n\n for i in range(len(prob)):\n\n indices_samples = indices_descencing_depth[:, m][\n :math.ceil(fdatagrid.nsamples * prob[i])]\n samples_used = fdatagrid.data_matrix[indices_samples, :, m]\n max_samples_used = np.amax(samples_used, axis=0)\n min_samples_used = np.amin(samples_used, axis=0)\n\n if prob[i] == 0.5:\n # central envelope\n self._central_envelope[m] = np.asarray(\n [max_samples_used.T, min_samples_used.T])\n\n # outlying envelope\n max_value = np.amax(fdatagrid.data_matrix[:, :, m], axis=0)\n min_value = np.amin(fdatagrid.data_matrix[:, :, m], axis=0)\n iqr = np.absolute(max_samples_used - min_samples_used)\n outlying_max_envelope = np.minimum(\n max_samples_used + iqr * factor, max_value)\n outlying_min_envelope = np.maximum(\n min_samples_used - iqr * factor, min_value)\n self._outlying_envelope[m] = np.asarray(\n [outlying_max_envelope.flatten(),\n outlying_min_envelope.flatten()])\n\n # outliers\n for j in list(range(fdatagrid.nsamples)):\n outliers_above = (\n outlying_max_envelope < fdatagrid.data_matrix[\n j, :, m])\n outliers_below = (\n outlying_min_envelope > fdatagrid.data_matrix[\n j, :, m])\n if (\n outliers_above.sum() > 0 or outliers_below.sum() > 0):\n self._outliers[m, j] = 1\n # central regions\n self._central_regions[ncentral_regions * m + i] = np.asarray(\n [max_samples_used.flatten(), min_samples_used.flatten()])\n\n # mean sample\n self._median[m] = fdatagrid.data_matrix[\n indices_descencing_depth[0, m], :, m].T\n\n self._fdatagrid = fdatagrid\n self._prob = prob\n self._colormap = plt.cm.get_cmap('RdPu')\n self.barcol = \"blue\"\n self.outliercol = \"red\"\n self.mediancol = \"black\"\n self._show_full_outliers = False\n\n @property\n def fdatagrid(self):\n return self._fdatagrid\n\n @property\n def median(self):\n return self._median\n\n @property\n def central_envelope(self):\n return self._central_envelope\n\n @property\n def outlying_envelope(self):\n return self._outlying_envelope\n\n @property\n def central_regions(self):\n return self._central_regions\n\n @property\n def outliers(self):\n return self._outliers\n\n @property\n def show_full_outliers(self):\n return self._show_full_outliers\n\n @show_full_outliers.setter\n def show_full_outliers(self, boolean):\n if not isinstance(boolean, bool):\n raise ValueError(\"show_full_outliers must be boolean type\")\n self._show_full_outliers = boolean\n\n def plot(self, fig=None, ax=None, nrows=None, ncols=None):\n \"\"\"Visualization of the functional boxplot of the fdatagrid (ndim_domain=1).\n\n Args:\n fig (figure object, optional): figure over with the graphs are\n plotted in case ax is not specified. If None and ax is also None,\n the figure is initialized.\n ax (list of axis objects, optional): axis over where the graphs are\n plotted. If None, see param fig.\n nrows(int, optional): designates the number of rows of the figure\n to plot the different dimensions of the image. Only specified\n if fig and ax are None.\n ncols(int, optional): designates the number of columns of the figure\n to plot the different dimensions of the image. Only specified\n if fig and ax are None.\n\n Returns:\n fig (figure object): figure object in which the graphs are plotted.\n ax (axes object): axes in which the graphs are plotted.\n\n \"\"\"\n\n fig, ax = self.fdatagrid.generic_plotting_checks(fig, ax, nrows,\n ncols)\n tones = np.linspace(0.1, 1.0, len(self._prob) + 1, endpoint=False)[1:]\n color = self.colormap(tones)\n\n if self.show_full_outliers:\n var_zorder = 1\n else:\n var_zorder = 4\n\n for m in range(self.fdatagrid.ndim_image):\n\n # outliers\n for j in list(range(self.fdatagrid.nsamples)):\n if self.outliers[m, j]:\n ax[m].plot(self.fdatagrid.sample_points[0],\n self.fdatagrid.data_matrix[j, :, m],\n color=self.outliercol,\n linestyle='--', zorder=1)\n\n for i in range(len(self._prob)):\n # central regions\n ax[m].fill_between(self.fdatagrid.sample_points[0],\n self.central_regions[\n m * len(self._prob) + i, 0],\n self.central_regions[\n m * len(self._prob) + i, 1],\n facecolor=color[i], zorder=var_zorder)\n\n # outlying envelope\n ax[m].plot(self.fdatagrid.sample_points[0],\n self.outlying_envelope[m, 0],\n self.fdatagrid.sample_points[0],\n self.outlying_envelope[m, 1], color=self.barcol,\n zorder=4)\n\n # central envelope\n ax[m].plot(self.fdatagrid.sample_points[0],\n self.central_envelope[m, 0],\n self.fdatagrid.sample_points[0],\n self.central_envelope[m, 1], color=self.barcol,\n zorder=4)\n\n # vertical lines\n index = math.ceil(self.fdatagrid.ncol / 2)\n x = self.fdatagrid.sample_points[0][index]\n ax[m].plot([x, x], [self.outlying_envelope[m, 0][index],\n self.central_envelope[m, 0][index]],\n color=self.barcol,\n zorder=4)\n ax[m].plot([x, x], [self.outlying_envelope[m, 1][index],\n self.central_envelope[m, 1][index]],\n color=self.barcol, zorder=4)\n\n # median sample\n ax[m].plot(self.fdatagrid.sample_points[0], self.median[m],\n color=self.mediancol, zorder=5)\n\n self.fdatagrid.set_labels(fig, ax)\n\n return fig, ax\n\n def __repr__(self):\n \"\"\"Return repr(self).\"\"\"\n return (f\"Boxplot(\"\n f\"\\nFDataGrid={repr(self.fdatagrid)},\"\n f\"\\nmedian={repr(self.median)},\"\n f\"\\ncentral envelope={repr(self.central_envelope)},\"\n f\"\\noutlying envelope={repr(self.outlying_envelope)},\"\n f\"\\ncentral_regions={repr(self.central_regions)},\"\n f\"\\noutliers={repr(self.outliers)})\").replace('\\n', '\\n ')\n\n\nclass SurfaceBoxplot(FDataBoxplot):\n r\"\"\"Representation of the surface boxplot.\n\n Class implementing the surface boxplot. Analogously to the functional boxplot,\n it is an informative exploratory tool for visualizing functional data with\n domain dimension 2. Nevertheless, it does not implement the enhanced\n surface boxplot.\n\n Based on the center outward ordering induced by a :ref:`depth measure `\n for functional data, it represents the envelope of the 50% central region, the median curve,\n and the maximum non-outlying envelope.\n\n Attributes:\n fdatagrid (FDataGrid): Object containing the data.\n median (array, (fdatagrid.ndim_image, lx, ly)): contains\n the median/s.\n central_envelope (array, (fdatagrid.ndim_image, 2, lx, ly)):\n contains the central envelope/s.\n outlying_envelope (array,(fdatagrid.ndim_image, 2, lx, ly)):\n contains the outlying envelope/s.\n colormap (matplotlib.colors.LinearSegmentedColormap): Colormap from\n which the colors to represent the central regions are selected.\n boxcol (string): Color of the box, which includes median and central envelope.\n outcol (string): Color of the outlying envelope.\n\n Example:\n Function :math:`f : \\mathbb{R^2}\\longmapsto\\mathbb{R^2}`.\n\n >>> data_matrix = [[[[1, 4], [0.3, 1.5], [1, 3]], [[2, 8], [0.4, 2], [2, 9]]],\n ... [[[2, 10], [0.5, 3], [2, 10]], [[3, 12], [0.6, 3], [3, 15]]]]\n >>> sample_points = [[2, 4], [3, 6, 8]]\n >>> fd = FDataGrid(data_matrix, sample_points, dataset_label= \"dataset\",\n ... axes_labels=[\"x1_label\", \"x2_label\", \"y1_label\", \"y2_label\"])\n >>> SurfaceBoxplot(fd)\n SurfaceBoxplot(\n FDataGrid=FDataGrid(\n array([[[[ 1. , 4. ],\n [ 0.3, 1.5],\n [ 1. , 3. ]],\n \n [[ 2. , 8. ],\n [ 0.4, 2. ],\n [ 2. , 9. ]]],\n \n \n [[[ 2. , 10. ],\n [ 0.5, 3. ],\n [ 2. , 10. ]],\n \n [[ 3. , 12. ],\n [ 0.6, 3. ],\n [ 3. , 15. ]]]]),\n sample_points=[array([2, 4]), array([3, 6, 8])],\n domain_range=array([[2, 4],\n [3, 8]]),\n dataset_label='dataset',\n axes_labels=['x1_label', 'x2_label', 'y1_label', 'y2_label'],\n extrapolation=None,\n interpolator=SplineInterpolator(interpolation_order=1, smoothness_parameter=0.0, monotone=False),\n keepdims=False),\n median=array([[[ 1. , 0.3, 1. ],\n [ 2. , 0.4, 2. ]],\n \n [[ 4. , 1.5, 3. ],\n [ 8. , 2. , 9. ]]]),\n central envelope=array([[[[ 1. , 0.3, 1. ],\n [ 2. , 0.4, 2. ]],\n \n [[ 1. , 0.3, 1. ],\n [ 2. , 0.4, 2. ]]],\n \n \n [[[ 4. , 1.5, 3. ],\n [ 8. , 2. , 9. ]],\n \n [[ 4. , 1.5, 3. ],\n [ 8. , 2. , 9. ]]]]),\n outlying envelope=array([[[[ 1. , 0.3, 1. ],\n [ 2. , 0.4, 2. ]],\n \n [[ 1. , 0.3, 1. ],\n [ 2. , 0.4, 2. ]]],\n \n \n [[[ 4. , 1.5, 3. ],\n [ 8. , 2. , 9. ]],\n \n [[ 4. , 1.5, 3. ],\n [ 8. , 2. , 9. ]]]]))\n\n\n \"\"\"\n\n def __init__(self, fdatagrid, method=modified_band_depth, factor=1.5):\n \"\"\"Initialization of the functional boxplot.\n\n Args:\n fdatagrid (FDataGrid): Object containing the data.\n method (:ref:`depth measure `, optional): Method\n used to order the data. Defaults to :func:`modified band depth\n `.\n prob (list of float, optional): List with float numbers (in the range\n from 1 to 0) that indicate which central regions to represent.\n Defaults to [0.5] which represents the 50% central region.\n factor (double): Number used to calculate the outlying envelope.\n\n \"\"\"\n FDataBoxplot.__init__(self, factor)\n\n if fdatagrid.ndim_domain != 2:\n raise ValueError(\n \"Class only supports FDataGrid with domain dimension 2.\")\n\n lx = len(fdatagrid.sample_points[0])\n ly = len(fdatagrid.sample_points[1])\n\n self._median = np.ndarray((fdatagrid.ndim_image, lx, ly))\n self._central_envelope = np.ndarray((fdatagrid.ndim_image, 2, lx, ly))\n self._outlying_envelope = np.ndarray((fdatagrid.ndim_image, 2, lx, ly))\n\n depth = method(fdatagrid)\n indices_descencing_depth = (-depth).argsort(axis=0)\n\n for m in range(fdatagrid.ndim_image):\n indices_samples = indices_descencing_depth[:, m][\n :math.ceil(fdatagrid.nsamples * 0.5)]\n samples_used = fdatagrid.data_matrix[indices_samples, :, :, m]\n max_samples_used = np.amax(samples_used, axis=0)\n min_samples_used = np.amin(samples_used, axis=0)\n\n # mean sample\n self._median[m] = fdatagrid.data_matrix[\n indices_descencing_depth[0, m], :, :, m]\n\n # central envelope\n self._central_envelope[m] = np.asarray([max_samples_used,\n min_samples_used])\n\n # outlying envelope\n max_value = np.amax(fdatagrid.data_matrix[:, :, :, m], axis=0)\n min_value = np.amin(fdatagrid.data_matrix[:, :, :, m], axis=0)\n iqr = np.absolute(max_samples_used - min_samples_used)\n oulying_max_envelope = np.minimum(max_samples_used + iqr * factor,\n max_value)\n oulying_min_envelope = np.maximum(min_samples_used - iqr * factor,\n min_value)\n self._outlying_envelope[m] = np.asarray([oulying_max_envelope,\n oulying_min_envelope])\n\n self._fdatagrid = fdatagrid\n self.colormap = plt.cm.get_cmap('Greys')\n self._boxcol = 1.0\n self._outcol = 0.7\n\n @property\n def fdatagrid(self):\n return self._fdatagrid\n\n @property\n def median(self):\n return self._median\n\n @property\n def central_envelope(self):\n return self._central_envelope\n\n @property\n def outlying_envelope(self):\n return self._outlying_envelope\n\n @property\n def boxcol(self):\n return self._boxcol\n\n @boxcol.setter\n def boxcol(self, value):\n if value < 0 or value > 1:\n raise ValueError(\n \"boxcol must be a number between 0 and 1.\")\n\n self._boxcol = value\n\n @property\n def outcol(self):\n return self._outcol\n\n @outcol.setter\n def outcol(self, value):\n if value < 0 or value > 1:\n raise ValueError(\n \"outcol must be a number between 0 and 1.\")\n self._outcol = value\n\n def plot(self, fig=None, ax=None, nrows=None, ncols=None):\n \"\"\"Visualization of the surface boxplot of the fdatagrid (ndim_domain=2).\n\n Args:\n fig (figure object, optional): figure over with the graphs are\n plotted in case ax is not specified. If None and ax is also None,\n the figure is initialized.\n ax (list of axis objects, optional): axis over where the graphs are\n plotted. If None, see param fig.\n nrows(int, optional): designates the number of rows of the figure\n to plot the different dimensions of the image. Only specified\n if fig and ax are None.\n ncols(int, optional): designates the number of columns of the figure\n to plot the different dimensions of the image. Only specified\n if fig and ax are None.\n\n Returns:\n fig (figure object): figure object in which the graphs are plotted.\n ax (axes object): axes in which the graphs are plotted.\n\n \"\"\"\n fig, ax = self.fdatagrid.generic_plotting_checks(fig, ax, nrows,\n ncols)\n x = self.fdatagrid.sample_points[0]\n lx = len(x)\n y = self.fdatagrid.sample_points[1]\n ly = len(y)\n X, Y = np.meshgrid(x, y)\n\n for m in range(self.fdatagrid.ndim_image):\n\n # mean sample\n ax[m].plot_wireframe(X, Y, np.squeeze(self.median[m]).T,\n rstride=ly, cstride=lx,\n color=self.colormap(self.boxcol))\n ax[m].plot_surface(X, Y, np.squeeze(self.median[m]).T,\n color=self.colormap(self.boxcol), alpha=0.8)\n\n # central envelope\n ax[m].plot_surface(X, Y, np.squeeze(self.central_envelope[m, 0]).T,\n color=self.colormap(self.boxcol), alpha=0.5)\n ax[m].plot_wireframe(X, Y,\n np.squeeze(self.central_envelope[m, 0]).T,\n rstride=ly, cstride=lx,\n color=self.colormap(self.boxcol))\n ax[m].plot_surface(X, Y, np.squeeze(self.central_envelope[m, 1]).T,\n color=self.colormap(self.boxcol), alpha=0.5)\n ax[m].plot_wireframe(X, Y,\n np.squeeze(self.central_envelope[m, 1]).T,\n rstride=ly, cstride=lx,\n color=self.colormap(self.boxcol))\n\n # box vertical lines\n for indices in [(0, 0), (0, ly - 1), (lx - 1, 0),\n (lx - 1, ly - 1)]:\n x_corner = x[indices[0]]\n y_corner = y[indices[1]]\n ax[m].plot([x_corner, x_corner], [y_corner, y_corner],\n [self.central_envelope[\n m, 1, indices[0], indices[1]],\n self.central_envelope[\n m, 0, indices[0], indices[1]]],\n color=self.colormap(self.boxcol))\n\n # outlying envelope\n ax[m].plot_surface(X, Y,\n np.squeeze(self.outlying_envelope[m, 0]).T,\n color=self.colormap(self.outcol), alpha=0.3)\n ax[m].plot_wireframe(X, Y,\n np.squeeze(self.outlying_envelope[m, 0]).T,\n rstride=ly, cstride=lx,\n color=self.colormap(self.outcol))\n ax[m].plot_surface(X, Y,\n np.squeeze(self.outlying_envelope[m, 1]).T,\n color=self.colormap(self.outcol), alpha=0.3)\n ax[m].plot_wireframe(X, Y,\n np.squeeze(self.outlying_envelope[m, 1]).T,\n rstride=ly, cstride=lx,\n color=self.colormap(self.outcol))\n\n # vertical lines from central to outlying envelope\n x_index = math.floor(lx / 2)\n x_central = x[x_index]\n y_index = math.floor(ly / 2)\n y_central = y[y_index]\n ax[m].plot([x_central, x_central], [y_central, y_central],\n [self.outlying_envelope[m, 1, x_index, y_index],\n self.central_envelope[m, 1, x_index, y_index]],\n color=self.colormap(self.boxcol))\n ax[m].plot([x_central, x_central], [y_central, y_central],\n [self.outlying_envelope[m, 0, x_index, y_index],\n self.central_envelope[m, 0, x_index, y_index]],\n color=self.colormap(self.boxcol))\n\n self.fdatagrid.set_labels(fig, ax)\n\n return fig, ax\n\n def __repr__(self):\n \"\"\"Return repr(self).\"\"\"\n return (f\"SurfaceBoxplot(\"\n f\"\\nFDataGrid={repr(self.fdatagrid)},\"\n f\"\\nmedian={repr(self.median)},\"\n f\"\\ncentral envelope={repr(self.central_envelope)},\"\n f\"\\noutlying envelope={repr(self.outlying_envelope)})\").replace('\\n', '\\n ')\n","sub_path":"skfda/exploratory/visualization/boxplot.py","file_name":"boxplot.py","file_ext":"py","file_size_in_byte":28658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"109243106","text":"from numpy.core.fromnumeric import resize\nfrom numpy.lib.npyio import BagObj\nimport streamlit as st\nimport cv2\nfrom PIL import Image\nimport anant\nimport tempfile\nimport numpy as np\ndef main_image():\n uploaded_file = st.file_uploader(\"Choose a file\", type = ['jpg','jpeg','jfif','png'])\n if uploaded_file is not None:\n image = np.array(Image.open(uploaded_file))\n image = cv2.resize(image,(230,230))\n _,c,_ = st.beta_columns([1,1,1])\n with c:\n st.image(image)\n \n (R , G , B) = anant.diff_channel(image)\n \n col1, col2, col3 = st.beta_columns([1,1,1])\n\n with col1:\n st.image(R, clamp =True)\n with col2:\n st.image(G,clamp = True)\n with col3:\n st.image(B, clamp = True)\n\n\ndef main_video(type_):\n _,c,_ = st.beta_columns([1,1,1])\n c1,c2,c3 = st.beta_columns([1,1,1])\n with c:\n p = st.empty()\n with c1:\n p1 = st.empty()\n with c2:\n p2 = st.empty() \n with c3:\n p3 = st.empty()\n \n video = None\n if(type_ == \"Upload a video file\"):\n uploaded_file = st.file_uploader(\"Upload file\", type = 'mp4')\n if uploaded_file is not None:\n tfile = tempfile.NamedTemporaryFile(delete=False)\n tfile.write(uploaded_file.read())\n video = cv2.VideoCapture(tfile.name)\n else:\n video = cv2.VideoCapture(0)\n \n if video is not None: \n while True:\n ret , frame = video.read()\n if not ret:\n st.error(\"CHECK YOUR CAMERA, ALLOW THE PROGEAM TO RECORD THE LIVE VIDEO\")\n break\n frame = cv2.resize(frame, (230,230))\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n\n p.image(frame)\n (R,G,B) = anant.diff_channel(frame)\n\n p1.image(R)\n p2.image(G)\n p3.image(B)\n \n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n video.release()\n\n","sub_path":"R_B_G_channel.py","file_name":"R_B_G_channel.py","file_ext":"py","file_size_in_byte":2016,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"418239483","text":"# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import, division, print_function, with_statement\n\nimport os\nimport shutil\nimport kaptan\nimport tempfile\nfrom .. import config, cli\nfrom ..util import tmux\nfrom .helpers import TestCase\n\nimport logging\n\nlogger = logging.getLogger(__name__)\nTMUXP_DIR = os.path.join(os.path.dirname(__file__), '.tmuxp')\n\n\nclass StartupTest(TestCase):\n\n \"\"\"test startup_cli().\"\"\"\n\n def setUp(self):\n if os.path.isdir(TMUXP_DIR):\n shutil.rmtree(TMUXP_DIR)\n\n def test_creates_config_dir_not_exists(self):\n \"\"\"cli.startup() creates config dir if not exists.\"\"\"\n\n self.assertFalse(os.path.exists(TMUXP_DIR))\n cli.startup(TMUXP_DIR)\n\n self.assertTrue(os.path.exists(TMUXP_DIR))\n\n @classmethod\n def tearDownClass(cls):\n if os.path.isdir(TMUXP_DIR):\n shutil.rmtree(TMUXP_DIR)\n logger.debug('wiped %s' % TMUXP_DIR)\n\n\nclass FindConfigsTest(TestCase):\n\n \"\"\"test in_dir() test.\"\"\"\n\n def setUp(self):\n if os.path.isdir(TMUXP_DIR):\n shutil.rmtree(TMUXP_DIR)\n\n def test_in_dir_from_config_dir(self):\n \"\"\"config.in_dir() finds configs config dir.\"\"\"\n\n cli.startup(TMUXP_DIR)\n config1 = tempfile.NamedTemporaryFile(\n dir=TMUXP_DIR,\n prefix='myconfig',\n suffix='.yaml'\n )\n\n config2 = tempfile.NamedTemporaryFile(\n dir=TMUXP_DIR,\n prefix='myconfig',\n suffix='.json'\n )\n configs_found = config.in_dir(TMUXP_DIR)\n\n self.assertEqual(len(configs_found), 2)\n\n def test_in_dir_from_current_dir(self):\n \"\"\"config.in_dir() find configs config dir.\"\"\"\n\n cli.startup(TMUXP_DIR)\n config1 = tempfile.NamedTemporaryFile(\n dir=TMUXP_DIR,\n prefix='myconfig',\n suffix='.yaml'\n )\n\n config2 = tempfile.NamedTemporaryFile(\n dir=TMUXP_DIR,\n prefix='myconfig',\n suffix='.json'\n )\n configs_found = config.in_dir(TMUXP_DIR)\n\n self.assertEqual(len(configs_found), 2)\n\n def test_ignore_non_configs_from_current_dir(self):\n \"\"\"cli.in_dir() ignore non-config from config dir.\"\"\"\n\n cli.startup(TMUXP_DIR)\n badconfig = tempfile.NamedTemporaryFile(\n dir=TMUXP_DIR,\n prefix='myconfig',\n suffix='.psd'\n )\n\n config1 = tempfile.NamedTemporaryFile(\n dir=TMUXP_DIR,\n prefix='watmyconfig',\n suffix='.json'\n )\n configs_found = config.in_dir(TMUXP_DIR)\n\n self.assertEqual(len(configs_found), 1)\n\n def test_get_configs_cwd(self):\n \"\"\"config.in_cwd() find config in shell current working directory.\"\"\"\n\n current_dir = os.getcwd()\n\n configs_found = config.in_cwd()\n\n # create a temporary folder and change dir into it\n tmp_dir = tempfile.mkdtemp(suffix='tmuxp')\n os.chdir(tmp_dir)\n\n try:\n config1 = open('.tmuxp.json', 'w+b')\n config1.close()\n\n configs_found = config.in_cwd()\n finally:\n os.remove(config1.name)\n\n self.assertEqual(len(configs_found), 1)\n self.assertIn('.tmuxp.json', configs_found)\n\n # clean up\n os.chdir(current_dir)\n if os.path.isdir(tmp_dir):\n shutil.rmtree(tmp_dir)\n\n @classmethod\n def tearDownClass(cls):\n if os.path.isdir(TMUXP_DIR):\n shutil.rmtree(TMUXP_DIR)\n logger.debug('wiped %s' % TMUXP_DIR)\n\nsampleconfigdict = {\n 'session_name': 'sampleconfig',\n 'start_directory': '~',\n 'windows': [\n {\n 'window_name': 'editor',\n 'panes': [\n {\n 'start_directory': '~',\n 'shell_command': ['vim'],\n },\n {\n 'shell_command': ['cowsay \"hey\"']\n },\n ],\n 'layout': 'main-verticle'\n },\n {\n 'window_name': 'logging', 'panes': [\n {\n 'shell_command': ['tail -F /var/log/syslog'],\n 'start_directory':'/var/log'\n }\n ]\n }, {\n 'options': {'automatic_rename': True, },\n 'panes': [\n {\n 'shell_command': ['htop']\n }\n ]\n }\n ]\n}\n","sub_path":"tmuxp/testsuite/test_cli.py","file_name":"test_cli.py","file_ext":"py","file_size_in_byte":4444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"263009477","text":"from django.db import models\nfrom .recipe import Recipe\nfrom .ingredient import Ingredient\nfrom .article import Article\nfrom django.db.models import Sum\n\n\nclass ShoppingRecipeList (models.Model): \n recipes = models.ManyToManyField(\n \"Recipe\",\n blank=True,\n )\n\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n \n contributors = models.ManyToManyField(\n 'auth.User', \n related_name='shop', \n blank=True\n )\n\n def __str__(self):\n return str(self.created)\n\n def generate_random_recipe(number_of_recipe, excluded_ids = []):\n if number_of_recipe == None:\n number_of_recipe = 1\n\n return Recipe.objects.order_by(\"?\").exclude(id__in=excluded_ids)[:int(number_of_recipe)]\n\n def _get_ordered_ingredients_from_reicpes_with_the_sum_of_their_quantity(self):\n return ( \n Ingredient.objects.filter(recipes__in=self.recipes.all())\n .order_by('measure_type')\n .values('article_id', 'measure_type')\n .annotate(quantity=Sum('quantity')) \n )\n\n def generate_shopping_list(self):\n ingredients = self._get_ordered_ingredients_from_reicpes_with_the_sum_of_their_quantity()\n \n for ingredient in ingredients:\n article = Article.objects.get(pk=ingredient['article_id'])\n new_list = ShoppingIngredientList(article=article, shop=self, measure_type=ingredient['measure_type'])\n new_list.total_quantity = ingredient['quantity']\n new_list.save()\n\n\nclass ShoppingIngredientList(models.Model):\n PARTIAL = 'PARTIAL'\n COMPLETE = 'COMPLETE'\n NOTTOUCH = 'NOTTOUCH'\n\n BOUGHT_TYPE = (\n (PARTIAL, PARTIAL),\n (COMPLETE, COMPLETE),\n (NOTTOUCH, COMPLETE),\n )\n\n article = models.ForeignKey(\"Article\", null=True, on_delete=models.CASCADE, related_name=\"shop_list\")\n bought_value = models.IntegerField(default=0)\n bought_status = models.CharField(default=NOTTOUCH, choices=BOUGHT_TYPE, max_length=10)\n measure_type = models.CharField(\"Type de mesure\", choices=Ingredient.MEASURE_TYPE, max_length=20)\n shop = models.ForeignKey(\"ShoppingRecipeList\", null=True, on_delete=models.CASCADE, related_name='list_content')\n total_quantity = models.IntegerField(\"Quantité\", null=True, blank=True)\n\n\n def _update_bought_status(self):\n if (\n self.total_quantity is not None and \n self.bought_status != self.COMPLETE and \n self.bought_value == self.total_quantity\n ):\n self.bought_status = self.COMPLETE\n \n elif ( \n self.bought_status != self.PARTIAL and \n self.bought_value > 0 \n ):\n self.bought_status = self.PARTIAL \n\n elif self.bought_status != self.NOTTOUCH and self.bought_value == 0:\n self.bought_status = self.NOTTOUCH\n\n def save(self, *args, **kwargs):\n self._update_bought_status()\n super(ShoppingIngredientList, self).save(*args, **kwargs)\n","sub_path":"apicook/cookie/models/shop.py","file_name":"shop.py","file_ext":"py","file_size_in_byte":3085,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"307408277","text":"import json\nimport tweepy\nfrom os import environ\n\nAPIKEY = environ['Consumer_Key']\nAPISECRET = environ['Consumer_Secret']\nOAUTHTOKEN = environ['Access_Key']\nOAUTHTOKENSECRET = environ['Access_Secret']\n\nclass MyStreamListener(tweepy.StreamListener):\n def __init__(self, api):\n self.api = api\n self.me = api.me()\n\n def on_status(self, tweet):\n tweetInfo = json.loads(json.dumps(tweet._json))\n # User's tweet contents\n replyContent = \"Hey \" + str(tweetInfo['user']['screen_name']) + \" thanks for asking!\\n\\nHere's your account info:\\n\\n\" +\\\n \"Verified : \" + str(tweetInfo['user']['verified']) + \"\\n\" + \\\n \"Number of account likes : \" + str(tweetInfo['user']['favourites_count']) + \"\\n\" + \\\n \"Number of tweets : \" + str(tweetInfo['user']['statuses_count']) + \"\\n\"\n\n # Like the tweet\n if not tweet.favorited:\n # Mark it as Liked, since we have not done it yet\n try:\n tweet.favorite()\n except Exception as e:\n print(\"Error: \" + str(e))\n\n # Retweet the tweet and send message\n if not tweet.retweeted:\n # Retweet, since we have not retweeted it yet\n try:\n tweet.retweet()\n # Reply to the tweet\n api.update_status(status=replyContent, in_reply_to_status_id=tweetInfo['id'],\n auto_populate_reply_metadata=True)\n except Exception as e:\n print(\"Error: \" + str(e))\n def on_error(self, status):\n print(\"Error detected: \" + str(status))\n\n\n# Authenticate to Twitter\nauth = tweepy.OAuthHandler(APIKEY,APISECRET)\nauth.set_access_token(OAUTHTOKEN,OAUTHTOKENSECRET)\n\n# Create API object\napi = tweepy.API(auth, wait_on_rate_limit=True,\n wait_on_rate_limit_notify=True)\ntweets_listener = MyStreamListener(api)\nstream = tweepy.Stream(api.auth, tweets_listener)\nstream.filter(track=[\"#Whatsmyprofilestats\",\"#Whatsmyprofilestatus\"], languages=[\"en\"],is_async=True)\n","sub_path":"index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":2077,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"517833177","text":"import glob\nimport os\n\nimport pandas as pd\n\nfrom numerutils.labels import Labels\n\n\nclass TournamentDataSet:\n \"\"\"A class to collect all the (meta) data of a Numerai competition.\n\n \"\"\"\n lbl = Labels()\n\n def __init__(self, folder_path: str = ''):\n self._folder_path = folder_path\n self._df_train = self._load_data('*training_data.csv')\n self._df_train = (self._df_train\n .assign(**{self.lbl.erano: self._df_train[self.lbl.era].str.slice(3).astype(int)}))\n self._df_tournament = None\n\n def _get_path_to_file(self, name: str) -> str:\n return glob.glob(os.path.join(self._folder_path, name))[0]\n\n def _load_data(self, name: str) -> pd.DataFrame:\n path_to_data = self._get_path_to_file(name)\n print('Loading file \\'{}\\'.'.format(path_to_data))\n df = pd.read_csv(path_to_data)\n print('Success. Data has shape {} covering {} time points.'.format(df.shape, df[self.lbl.era].unique().size))\n return df\n\n def get_features_table(self) -> pd.DataFrame:\n feat_names = [col for col in self._df_train if col.startswith(self.lbl.feature)]\n df_feat = pd.DataFrame({self.lbl.feat_name: feat_names})\n df_feat[self.lbl.feat_group] = df_feat[self.lbl.feat_name].str.extract('_(.*?)[0-9]')\n return df_feat\n\n def get_target_name(self) -> str:\n return self._df_train.columns[self._df_train.columns.str.contains('target')][0]\n\n def get_prediction_name(self) -> str:\n return self.get_target_name().replace('target', 'prediction')\n\n def _describe_df_train(self) -> None:\n print('- Prediction target column = \\'{}\\'.'.format(self.get_target_name()))\n\n print(self.get_df_train()[self.lbl.erano]\n .agg(lambda x: '- \\'train\\' covers eras {} to {}'.format(x.min(), x.max())))\n\n n_null_in_target = self._df_train[self.get_target_name()].isnull().sum()\n if n_null_in_target == 0:\n print('- No missing values in target column detected.')\n else:\n print('- {} missing values in target column detected.'.format(n_null_in_target))\n\n df_feat = self.get_features_table()\n value_counts = self._df_train[df_feat[self.lbl.feat_name]].round(2).agg(lambda x: len(x.unique())).value_counts()\n for value_count, num_features in value_counts.items():\n print('- {} feature(s) with {} distinct values each.'.format(num_features, value_count))\n\n feat_group_size = self.get_features_table().groupby(self.lbl.feat_group).size()\n print('- Features are grouped:')\n for feat, size in feat_group_size.items():\n print(' {} features in group \\'{}\\'.'.format(size, feat))\n\n avg_prop_na = self._df_train[df_feat[self.lbl.feat_name]].isnull().mean().mean()\n if avg_prop_na == 0:\n print('- No missing values in feature columns detected.')\n else:\n print('- Average proportion of missing values in feature scolumns = {:.2f}.'.format(avg_prop_na))\n\n def _describe_df_tournament(self):\n print('- Target column name = \\'{}\\'.'.format(self.get_target_name()))\n\n print('- Prediction column name = \\'{}\\'.'.format(self.get_prediction_name()))\n\n print(self.get_df_val()[self.lbl.erano]\n .agg(lambda x: '- \\'validation\\' covers eras {} to {}'.format(x.min(), x.max())))\n\n def get_df_train(self) -> pd.DataFrame:\n \"\"\"\n This is the standard train subset as suggested by 'data_type' in the raw table.\n \"\"\"\n return self._df_train.copy(deep=True)\n\n def get_df_tournament(self) -> pd.DataFrame:\n \"\"\"\n Contains standard 'validation', 'test' and 'live' data. Rewards are evaluated on 'live' data only. Submission\n must cover all ids of df_tournament.\n \"\"\"\n if self._df_tournament is None:\n self._df_tournament = self._load_data('*tournament_data.csv')\n return self._df_tournament.copy(deep=True)\n\n def get_df_val(self) -> pd.DataFrame:\n \"\"\"\n This is the standard validation subset as suggested by 'data_type' in the raw table.\n \"\"\"\n df_val = self.get_df_tournament()[self.get_df_tournament()[self.lbl.data_type] == self.lbl.validation]\n return df_val.assign(**{self.lbl.erano: df_val[self.lbl.era].str.slice(3).astype(int)}).reset_index(drop=True)\n\n def get_df_insample(self) -> pd.DataFrame:\n return pd.concat([self.get_df_train(), self.get_df_val()]).reset_index(drop=True)\n\n\nclass KazutsugiDataSet(TournamentDataSet):\n \"\"\"\n Some Kazutsugi specific tweaks and checks.\n \"\"\"\n def __init__(self, folder_path: str = ''):\n super().__init__(folder_path)\n\n self._df_train = self._clean_df(self._df_train)\n self._describe_df_train()\n self._df_tournament = self.get_df_tournament()\n self._df_tournament = self._clean_df(self._df_tournament)\n self._describe_df_tournament()\n\n def _clean_df(self, df: pd.DataFrame) -> pd.DataFrame:\n feature_cols = self.get_features_table()[self.lbl.feat_name]\n df[feature_cols] = df[feature_cols].round(2)\n return df\n","sub_path":"numerutils/datasets.py","file_name":"datasets.py","file_ext":"py","file_size_in_byte":5154,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"580119366","text":"if __name__ == \"__main__\":\n\n import sys\n sys.path.append('.')\n\nimport math\nimport itertools\n\nimport ctrl.block as block\n\nimport rc\nfrom ctrl.rc import clock as clk\n\nclass Raw(block.BufferBlock):\n \n def __init__(self, \n clock = clk.Clock(), # is a singleton\n **kwargs):\n\n # make sure clock is Clock\n assert isinstance(clock, clk.Clock)\n self.clock = clock\n\n # call super\n super().__init__(**kwargs)\n\n def read(self):\n\n #print('> read')\n if self.enabled:\n\n # read imu\n data = self.clock.get_imu()\n\n # units (m/s^2) and (rad/s)\n self.buffer = itertools.chain(data['accel'],\n data['gyro'])\n \n #print('< read')\n return self.buffer\n\nclass Inclinometer(Raw):\n\n def __init__(self, **kwargs):\n\n # turns initialization\n self.turns = 0\n self.theta = 0\n self.threshold = 0.25\n\n # call super\n super().__init__(**kwargs)\n\n def reset(self):\n\n self.turns = 0\n \n def read(self):\n\n #print('> read')\n if self.enabled:\n\n # read imu\n data = self.clock.get_imu()\n \n # read IMU\n ax, ay, az = data['accel']\n gx, gy, gz = data['gyro']\n\n # compensate for turns\n theta = -math.atan2(az, ay) / (2 * math.pi)\n if (theta < 0 and self.theta > 0):\n if (self.theta - theta > self.threshold):\n self.turns += 1\n elif (theta > 0 and self.theta < 0):\n if (theta - self.theta > self.threshold):\n self.turns -= 1\n self.theta = theta\n\n # units (turns) and (1/s)\n self.buffer = (self.turns + theta, gx / 360)\n \n #print('< read')\n return self.buffer\n\nif __name__ == \"__main__\":\n\n import time, math\n from time import perf_counter\n\n import rc\n rc.set_state(rc.RUNNING)\n \n Ts = 0.01\n\n print(\"\\n> Testing Raw\")\n \n raw = Raw()\n raw.set_enabled(enabled = True)\n\n N = int(5/Ts)\n for k in range(N):\n \n # read inclinometer\n raw.clock.read()\n (ax, ay, az, gx, gy, gz) = raw.read()\n\n print('\\r> accel = {:+05.3f} {:+05.3f} {:+05.3f} m/s^2 giro = {:+05.3f} {:+05.3f} {:+05.3f} deg/s'.format(ax,ay,az,gx,gy,gz), end='')\n\n print(\"\\n> Testing Inclinometer\")\n \n giro = Inclinometer()\n giro.set_enabled(enabled = True)\n \n N = 1000\n for k in range(N):\n \n # read inclinometer\n giro.clock.read()\n (theta, thetadot) = giro.read()\n\n print('\\r> theta = {:+05.3f} theta dot = {:+05.3f} 1/s'.format(theta, thetadot), end='')\n \n","sub_path":"ctrl/rc/imu.py","file_name":"imu.py","file_ext":"py","file_size_in_byte":2810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"648620234","text":"import os\nimport sys\nimport gzip\nimport imp\nfrom itertools import product\nfrom setuptools import setup, find_packages\n\ntry:\n from urllib.request import urlretrieve\nexcept ImportError:\n from urllib import urlretrieve\n\nmodel_dir = os.path.join('openl3', 'models')\nmodalities = ['audio', 'image']\ninput_reprs = ['linear', 'mel128', 'mel256']\ncontent_type = ['music', 'env']\nweight_files = ['openl3_{}_{}_{}.h5'.format(*tup)\n for tup in product(modalities, input_reprs, content_type)]\nbase_url = 'https://github.com/marl/openl3/raw/models/'\n\nif len(sys.argv) > 1 and sys.argv[1] == 'sdist':\n # exclude the weight files in sdist\n weight_files = []\nelse:\n if not os.path.exists(model_dir):\n os.makedirs(model_dir)\n # in all other cases, decompress the weights file if necessary\n for weight_file in weight_files:\n weight_path = os.path.join(model_dir, weight_file)\n if not os.path.isfile(weight_path):\n compressed_file = weight_file + '.gz'\n compressed_path = os.path.join(model_dir, compressed_file)\n if not os.path.isfile(compressed_file):\n print('Downloading weight file {} ...'.format(compressed_file))\n urlretrieve(base_url + compressed_file, compressed_path)\n print('Decompressing ...')\n with gzip.open(compressed_path, 'rb') as source:\n with open(weight_path, 'wb') as target:\n target.write(source.read())\n print('Decompression complete')\n os.remove(compressed_path)\n print('Removing compressed file')\n\nversion = imp.load_source('openl3.version', os.path.join('openl3', 'version.py'))\n\nwith open('README.md') as file:\n long_description = file.read()\n\nsetup(\n name='openl3',\n version=version.version,\n description='Deep audio and image embeddings, based on Look, Listen, and Learn approach',\n long_description=long_description,\n long_description_content_type='text/markdown',\n url='https://github.com/marl/openl3',\n author='Jason Cramer, Ho-Hsiang Wu, and Justin Salamon',\n author_email='jtcramer@nyu.edu',\n packages=find_packages(),\n entry_points={\n 'console_scripts': ['openl3=openl3.cli:main'],\n },\n license='MIT',\n classifiers=[\n 'Development Status :: 3 - Alpha',\n 'License :: OSI Approved :: MIT License',\n 'Topic :: Multimedia :: Sound/Audio :: Analysis',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n ],\n keywords='tfrecord',\n project_urls={\n 'Source': 'https://github.com/marl/openl3',\n 'Tracker': 'https://github.com/marl/openl3/issues'\n },\n install_requires=[\n 'keras==2.0.9',\n 'numpy>=1.13.0',\n 'scipy>=0.19.1',\n 'kapre>=0.1.3.1',\n 'PySoundFile>=0.9.0.post1',\n 'resampy>=0.2.0,<0.3.0',\n 'h5py>=2.7.0,<3.0.0',\n ],\n extras_require={\n 'docs': [\n 'sphinx==1.2.3', # autodoc was broken in 1.3.1\n 'sphinxcontrib-napoleon',\n 'sphinx_rtd_theme',\n 'numpydoc',\n ],\n 'tests': []\n },\n package_data={\n 'openl3': [os.path.join('models', fname)\n for fname in weight_files]\n },\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":3454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"83225315","text":"from django.urls import path\n\nfrom . import views\n\nurlpatterns = [\n path('statistics/', views.statistics, name='statistics'),\n path('/', views.news_view, name='news'),\n path('/', views.tag_news, name='tag_news'),\n path('', views.index, name='index'),\n]\n","sub_path":"news/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"421321268","text":"# Carlson Sharpless and Daniel Foster\n# 09/17/2018\n# Draws a turtle running for Transportation Secretary.\n\nimport turtle\nimport math\n\n# Initializes a turtle named Steve.\nsteve = turtle.Turtle()\n\n# draw_ellipse: draws an ellipse with (xCenter, yCenter) as the center and the given radii.\n# angle describes the angle between the horizontal radius and the traditional x-axis.\ndef draw_ellipse(xRadius, yRadius, angle, xCenter, yCenter):\n\n # Raise the pen so a line isn't drawn while moving to the first point.\n steve.penup()\n # Convert the angle to radians so the sine and cosine functions work properly.\n angle = math.radians(angle)\n\n for theta in range(0, 365, 5):\n\n # If there's been more than one move, then Steve is correctly placed and can start drawing.\n if theta > 0:\n steve.pendown()\n\n # Convert theta to radians (it will be reset to degrees before the value is increased)\n theta = math.radians(theta)\n\n # The polar equation for an ellipse, with adjustment for the value of angle.\n r = xRadius * yRadius / math.sqrt(((yRadius * math.cos(theta - angle)) ** 2) + ((xRadius * math.sin(theta - angle)) ** 2))\n\n # Convert the polar result to cartesian format and move.\n x = int(r * math.cos(theta)) + xCenter\n y = int(r * math.sin(theta)) + yCenter\n steve.goto(x, y)\n\n# Initialize the screen, the pen color, and the drawing speed.\nturtle.screensize(1100, 625)\nturtle.setup(1100, 625)\nsteve.color('black')\nsteve.speed(0)\n\n# Move Steve to his initial location.\nsteve.penup()\nsteve.setpos(-490, 245)\nsteve.pendown()\nsteve.right(90)\n\n# Draw 13 red and white stripes.\nfor i in range(13):\n steve.begin_fill()\n if i % 2 != 0:\n steve.fillcolor(\"#FFFFFF\")\n else:\n steve.fillcolor(\"#FFAFAE\")\n for j in range(2):\n steve.forward(40)\n steve.left(90)\n steve.forward(1000)\n steve.left(90)\n steve.forward(40)\n steve.end_fill()\n\n# Draws star box in top left.\nsteve.penup()\nsteve.setpos(-490, 245)\nsteve.pendown()\nsteve.fillcolor(\"#DAE3F4\")\n\nsteve.left(90)\nsteve.begin_fill()\nfor i in range(2):\n steve.forward(400)\n steve.right(90)\n steve.forward(280)\n steve.right(90)\nsteve.end_fill()\n\nsteve.penup()\nsteve.setpos(-470, 220)\nsteve.pendown()\n\n# Draws fifty white stars.\nsteve.color(\"#FFFFFF\")\ncolumnNumber = 5\nfor i in range(11):\n for j in range(columnNumber):\n steve.begin_fill()\n for k in range(5):\n steve.forward(20)\n steve.right(144)\n steve.end_fill()\n \n steve.penup()\n steve.right(90) \n steve.forward(55)\n steve.left(90)\n steve.pendown()\n \n steve.penup()\n if columnNumber == 5:\n steve.setpos(-470 + (33 * (i+1)), 187)\n columnNumber = 4\n elif columnNumber == 4:\n steve.setpos(-470 + (33 * (i+1)), 220)\n columnNumber = 5\n steve.pendown()\n\n# Draw the head.\nsteve.color('black')\nsteve.fillcolor(\"#A9D18E\")\nsteve.begin_fill()\ndraw_ellipse(95, 74, 0, -20, -20)\nsteve.end_fill()\n\n# Add the nostrils.\nsteve.penup()\nsteve.goto(45, 0)\nsteve.pendown()\nsteve.goto(60, -7)\nsteve.penup()\nsteve.goto(45, -30)\nsteve.pendown()\nsteve.goto(60, -23)\n\n# Add the eyes.\nsteve.penup()\nsteve.goto(25, 10)\nsteve.dot()\nsteve.goto(25, -35)\nsteve.dot()\n\n# Draw the front left leg.\nsteve.begin_fill()\ndraw_ellipse(101, 41, 15, -55, 120)\nsteve.end_fill()\n\n# Draw the back left leg.\nsteve.begin_fill()\ndraw_ellipse(101, 41, -15, -385, 120)\nsteve.end_fill()\n\n# Draw the tail.\nsteve.penup()\nsteve.goto(-385, 5)\nsteve.pendown()\nsteve.begin_fill()\nsteve.setheading(195)\nsteve.forward(80)\nsteve.setheading(225)\nsteve.forward(35)\nsteve.setheading(315)\nsteve.forward(35)\nsteve.setheading(355)\nsteve.forward(120)\nsteve.setheading(steve.towards(-385, 5))\nsteve.goto(-385, 5)\nsteve.end_fill()\n\n# Draw the back right leg.\nsteve.begin_fill()\ndraw_ellipse(101, 41, 15, -385, -160)\nsteve.end_fill()\n\n# Draw the back left leg.\nsteve.begin_fill()\ndraw_ellipse(101, 41, -15, -55, -160)\nsteve.end_fill()\n\n# Draw the shell.\nsteve.fillcolor(\"#70AD47\")\nsteve.begin_fill()\ndraw_ellipse(195, 166, 0, -220, -20)\nsteve.end_fill()\n\n# Draw three lines that make up the outside plates.\nsteve.penup()\nsteve.goto(-25, -20)\nsteve.setheading(180)\nsteve.pendown()\nsteve.forward(390)\n\nsteve.penup()\nsteve.goto(-132, 126)\nsteve.setheading(240)\nsteve.pendown()\nsteve.forward(342)\n\nsteve.penup()\nsteve.goto(-304, 129)\nsteve.setheading(300)\nsteve.pendown()\nsteve.forward(342)\n\n# Draw the center plate.\nsteve.penup()\nsteve.goto(-268, -107)\nsteve.fillcolor(\"#548235\")\nsteve.pendown()\nsteve.begin_fill()\nfor i in range(0, 6):\n steve.setheading(60 * i)\n steve.forward(100)\nsteve.end_fill()\n\n# Now time for the words!\nsteve.penup()\nsteve.goto(-35, 200)\nsteve.pendown()\nsteve.write(\"Slow and steady will win this race!\", font=(\"Bradley Hand ITC\", 25, \"normal\"))\n\nsteve.penup()\nsteve.goto(230, 47)\nsteve.pendown()\nsteve.write(\"Vote\", font=(\"Helvetica\", 18, \"normal\"))\n\nsteve.penup()\nsteve.goto(120, -65)\nsteve.write(\"Turtle\", font=(\"Garamond\", 115, \"normal\"))\nsteve.pendown()\n\nsteve.penup()\nsteve.goto(265, -76)\nsteve.write(\"2018\", font=(\"Helvetica\", 28, \"normal\"))\nsteve.pendown()\n\nsteve.penup()\nsteve.goto(107, -114)\nsteve.pendown()\nsteve.write(\"Secretary of Transportation\", font=(\"Garamond\", 28, \"normal\"))\n\nsteve.penup()\nsteve.goto(-130, -270)\nsteve.pendown()\nsteve.write(\"Paid for by Turtles for America\", font=(\"Impact\", 16, \"normal\"))\n\n# Draw a box around the disclosure.\nsteve.penup()\nsteve.goto(-135, -270)\nsteve.pendown()\nsteve.setheading(0)\nsteve.forward(267)\nsteve.setheading(90)\nsteve.forward(28)\nsteve.setheading(180)\nsteve.forward(267)\nsteve.setheading(270)\nsteve.forward(28)\n\n# The end! Hide Steve and signal the end of the program.\nsteve.hideturtle()\nturtle.done()\n","sub_path":"Friday Problems/2 turtle/coolTurtle.py","file_name":"coolTurtle.py","file_ext":"py","file_size_in_byte":5809,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"309449015","text":"# Q3 Ivan Kahl, Python 2.7\n\n# A function to check if an integer is ordered\ndef isOrdered(x):\n # Convert the integer to a string so we can get each digit\n y = str(x)\n # Create a variable to store the previous digit\n prev = 0\n # Create a variable to store the current digit\n curr = 0\n # Create a variable to say wether the number is ordered or not\n ordered = True\n # Loop through each digit in the integer\n for i in range(0, len(y)):\n # Set the curr variable to the current digit in the integer\n curr = int(y[i])\n # Check if the current digit is greater than or equal to the previous integer\n if curr < prev:\n # If it is not greater than or equal, set ordered to False\n ordered = False\n # Set the prev variable to the current digit\n prev = curr\n # Return the ordered variable\n return ordered\n\n# Get the upper limit that the program must generate values up to\nv = int(raw_input(\"Upper limit: \"))\n# Create a list to store all the squares that are ordered below the upper limit\nsquares = []\n# A variable to store the current square number\nsquare = 1\n# A variable to store the current number being squared\ni = 1\n\n# Keep adding ordered squares while the square is less than the upper limit\nwhile square < v:\n if isOrdered(square):\n squares.append(square)\n i += 1\n square = i**2\n\nprint(\" \".join(map(str, squares)))","sub_path":"2014/First Round/Q3 Ivan Kahl.py","file_name":"Q3 Ivan Kahl.py","file_ext":"py","file_size_in_byte":1422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"240306898","text":"import random\r\n\r\n\r\n\"\"\"卡池内容(未设置四星up角色)\"\"\"\r\nup = '魈'\r\nst5 = [up, up, up, up, up, '刻晴', '莫娜', '七七', '迪卢克', '琴']\r\ncha_4 = ['安柏', '丽莎', '凯亚', '芭芭拉', '雷泽', '菲谢尔', '班尼特', '诺艾尔', '菲谢尔', '砂糖',\r\n '迪奥娜', '北斗', '凝光', '香菱', '行秋', '重云', '辛焱']\r\nweapon_4 = ['弓藏', '祭礼弓', '绝弦', '西风猎弓', '昭心', '祭礼残章', '流浪乐章', '西风秘典', '西风长枪',\r\n '雨裁', '匣里灭辰', '祭礼大剑', '钟剑', '西风大剑', '匣里龙吟', '祭礼剑', '笛剑', '西风剑', '岩盔丘丘王']\r\nst4 = weapon_4 + cha_4\r\nget = []\r\nhave = []\r\n\r\n\r\nclass Stats:\r\n \"\"\"跟踪游戏统计信息\"\"\"\r\n def __init__(self):\r\n self.total = 0\r\n self.up_num = 0\r\n self.num_4 = 0\r\n self.num_5 = 0\r\n\r\n\r\ndef single():\r\n \"\"\"不保底时的抽奖\"\"\"\r\n i = random.randint(1, 10001)\r\n if i in range(1, 61):\r\n a = random.randint(1, 6)\r\n star = st5[a]\r\n stat.num_5 = 0\r\n elif i in range(61, 316):\r\n cha = random.randint(1, len(cha_4)+1)\r\n star = cha_4[cha]\r\n stat.num_4 = 0\r\n elif i in range(316, 571):\r\n wea = random.randint(1, len(weapon_4)+1)\r\n star = weapon_4[wea]\r\n stat.num_4 = 0\r\n elif i in range(571, 10001):\r\n star = '三星'\r\n else:\r\n return None\r\n if star == up:\r\n stat.num_5 = 0\r\n stat.up_num = 0\r\n add(star)\r\n\r\n\r\ndef add(star):\r\n \"\"\"每次抽卡完毕的常规操作\"\"\"\r\n record(star)\r\n get.append(star)\r\n have.append(star)\r\n stat.total += 1\r\n\r\n\r\ndef check_up():\r\n \"\"\"检查保底\"\"\"\r\n if stat.up_num < 179:\r\n if stat.num_5 < 89:\r\n if stat.num_4 < 9:\r\n single()\r\n else:\r\n o_4 = random.randint(0, len(st4) - 1)\r\n star = st4[o_4]\r\n add(star)\r\n stat.num_4 = 0\r\n else:\r\n o_5 = random.randint(0, len(st5) - 1)\r\n star = st5[o_5]\r\n add(star)\r\n stat.num_5 = 0\r\n else:\r\n star = up\r\n add(star)\r\n stat.up_num = 0\r\n\r\n\r\ndef record(star):\r\n \"\"\"记录数据变化\"\"\"\r\n if star != '魈':\r\n stat.up_num += 1\r\n if star not in st4:\r\n stat.num_4 += 1\r\n if star not in st5:\r\n stat.num_5 += 1\r\n\r\n\r\ndef extract():\r\n \"\"\"单抽\"\"\"\r\n del get[:]\r\n check_up()\r\n print(get)\r\n\r\n\r\ndef ten():\r\n \"\"\"十连函数\"\"\"\r\n del get[:]\r\n for num in range(0, 10):\r\n check_up()\r\n print(get)\r\n\r\n\r\ndef remember():\r\n value_cnt = {}\r\n for h in have:\r\n value_cnt[h] = value_cnt.get(h, 0) + 1\r\n print(value_cnt)\r\n\r\n\r\nstat = Stats()\r\n","sub_path":"抽卡/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"355346356","text":"from django.db import models\nfrom .fields import ForeignKeyField, ManyToManyField\n\n\nclass JsonModel(models.Model):\n field_types = None\n pk_name = None\n strict = True\n\n class Meta:\n abstract = True\n\n @classmethod\n def init_model(cls, base=None):\n if not cls._meta.abstract:\n\n cls.field_types = dict()\n\n for field_name, field in cls._meta._forward_fields_map.items():\n rel_model = None\n if field.__class__ == ForeignKeyField or field.__class__ == ManyToManyField:\n rel_model = field.remote_field.model\n\n cls.field_types[field_name] = (field.__class__, rel_model)\n\n if cls._meta.pk.__class__ == models.fields.related.OneToOneField:\n cls.pk_name = base.pk_name\n else:\n cls.pk_name = cls._meta.pk.attname\n\n def overwrite_many_to_many_field(self, key, entries):\n cls = self.field_types[key][1]\n field = getattr(self, key)\n field.clear()\n item_ids = set([item for item in entries])\n for item in item_ids:\n try:\n field.add(cls.objects.get(pk=item))\n except models.ObjectDoesNotExist:\n continue\n\n def put(self, data):\n for key, value in data.items():\n if key != self.pk_name and key in self.field_types:\n try:\n field_class = self.field_types[key][0]\n except KeyError:\n if JsonModel.strict:\n raise AttributeError(\"Unexpected {} field for object of class {}\"\n .format(key, self.__class__.__name__))\n continue\n\n if field_class == models.fields.AutoField:\n if JsonModel.strict:\n raise AttributeError(\"Cant set Autofield {} field for object of class {}\"\n .format(key, self.__class__.__name__))\n continue\n\n if field_class == ManyToManyField:\n self.overwrite_many_to_many_field(key, value)\n else:\n if field_class == ForeignKeyField:\n key = key + \"_id\"\n try:\n setattr(self, key, value)\n except AttributeError:\n if JsonModel.strict:\n raise AttributeError(\"Unexpected {} field for object of class {}\"\n .format(key, self.__class__.__name__))\n self.clean_fields()\n self.save()\n\n @classmethod\n def create(cls, data):\n new_data = {}\n many_to_many_fields = []\n for key, value in data.items():\n\n try:\n field_class = cls.field_types[key][0]\n except KeyError:\n if JsonModel.strict:\n raise AttributeError(\"Unexpected {} field for new object of class {}\"\n .format(key, cls.__name__))\n continue\n\n if field_class == models.fields.AutoField:\n if JsonModel.strict:\n raise AttributeError(\"Cant set Autofield {} field for new object of class {}\"\n .format(key, cls.__name__))\n continue\n\n if field_class == ManyToManyField:\n many_to_many_fields.append((key, value))\n else:\n if field_class == ForeignKeyField:\n key = key + \"_id\"\n\n new_data[key] = value\n\n obj = cls(**new_data)\n\n if many_to_many_fields:\n models.Model.save(obj)\n for key, value in many_to_many_fields:\n obj.overwrite_many_to_many_field(key, value)\n obj.clean_fields()\n obj.save()\n return obj\n\n","sub_path":"backend/django_pubsub/models/json_model.py","file_name":"json_model.py","file_ext":"py","file_size_in_byte":3950,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"591735318","text":"from Hamming import H\n\ndef insert(I,par,s,rid): #insert R[rid] to I[s] (group of length s)\n\n I.setdefault(s,{})\n m=len(par)\n\n for i in range(m):\n I[s].setdefault(i,{})\n for i in par: #for every element index of segament in Par\n I[s][i].setdefault(tuple(par[i]),[]).append(rid) #I[s][i][Par[i]] append rid\n\n","sub_path":"src/setjoin/other/simple/insert.py","file_name":"insert.py","file_ext":"py","file_size_in_byte":332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"613029500","text":"import pandas as pd\nimport numpy as np\nimport streamlit as st\nimport matplotlib.pyplot as plt\nimport plotly.express as px\nfrom sklearn.ensemble import GradientBoostingClassifier\nimport pickle \n\nst.markdown(\n \"\"\"\n \n \"\"\",\n unsafe_allow_html=True,\n)\n\n# função para carregar o dataset\n@st.cache\ndef get_data():\n return pd.read_csv(\"dados_baseline_media_1.csv\")\n\n# função para treinar o modelo\n#def train_model():\n# data = get_data()\n# x = data.drop(\"MEDV\",axis=1)\n# y = data[\"MEDV\"]\n# rf_regressor = RandomForestRegressor(n_estimators=200, max_depth=7, max_features=3)\n# rf_regressor.fit(x, y)\n# return rf_regressor\n\n#importando modelo ja treinado\nmodelo = pickle.load(open('modelo_gb.pkl', 'rb'))\n\n# criando um dataframe\ndata = get_data()\n\n# treinando o modelo\n#model = train_model()\n\n# título\nst.title(\"Vou bem no ENEM?\")\n\n# subtítulo\nst.markdown(\"Este é um App para predizer se o aluno irá bem ou não no ENEM com uma solução de Machine Learning.\")\n\n\n# Instruções\nt1 = st.subheader(\"Instruções:\")\nt2 = st.write(\"Preencha os dados e click em >>Realizar Predição<<\")\n#t3 = st.write(\"Abaixo do botão aparecerá o resultado\")\n\ntext = '''\n---\n'''\n\nst.markdown(text)\n\n\n# Legenda\nt4 = st.subheader(\"Legenda para questões de ocupação/trabalho dos pais:\")\n\n# Legenda da questao 3 (Ocupação/trabalho do pai)\nt5 = st.markdown(\"Grupo 1: Lavrador, agricultor sem empregados, bóia fria, criador de animais (gado, porcos, galinhas, ovelhas, cavalos etc.), apicultor, pescador, lenhador, seringueiro, extrativista.\")\nt6 = st.markdown(\"Grupo 2: Diarista, empregado doméstico, cuidador de idosos, babá, cozinheiro (em casas particulares), motorista particular, jardineiro, faxineiro de empresas e prédios, vigilante, porteiro, carteiro, office-boy, vendedor, caixa, atendente de loja, auxiliar administrativo, recepcionista, servente de pedreiro, repositor de mercadoria.\")\nt7 = st.markdown(\"Grupo 3: Padeiro, cozinheiro industrial ou em restaurantes, sapateiro, costureiro, joalheiro, torneiro mecânico, operador de máquinas, soldador, operário de fábrica, trabalhador da mineração, pedreiro, pintor, eletricista, encanador, motorista, caminhoneiro, taxista.\")\nt8 = st.markdown(\"Grupo 4: Professor (de ensino fundamental ou médio, idioma, música, artes etc.), técnico (de enfermagem, contabilidade, eletrônica etc.), policial, militar de baixa patente (soldado, cabo, sargento), corretor de imóveis, supervisor, gerente, mestre de obras, pastor, microempresário (proprietário de empresa com menos de 10 empregados), pequeno comerciante, pequeno proprietário de terras, trabalhador autônomo ou por conta própria.\")\nt9 = st.markdown(\"Grupo 5: Médico, engenheiro, dentista, psicólogo, economista, advogado, juiz, promotor, defensor, delegado, tenente, capitão, coronel, professor universitário, diretor em empresas públicas ou privadas, político, proprietário de empresas com mais de 10 empregados.\")\n\n\n# verificando o dataset\n##st.subheader(\"Selecionando apenas um pequeno conjunto de atributos\")\n\n# atributos para serem exibidos por padrão\n##defaultcols = [\"NU_IDADE\",\"TP_LINGUA\",\"Q005\",\"TP_COR_RACA\",\"Q001\",\"Q003\",\"Q004\",\"Q006\",\"Q008\",\"Q019\"]\n\n# defindo atributos a partir do multiselect\n##cols = st.multiselect(\"Atributos\", data.columns.tolist(), default=defaultcols)\n\n# exibindo os top 10 registro do dataframe\n##st.dataframe(data[cols].head(10))\n\n\n#st.subheader(\"Distribuição...\")\n\n# definindo a faixa de valores\n#faixa_valores = st.slider(\"Faixa de preço\", float(data.MEDV.min()), 150., (10.0, 100.0))\n\n# filtrando os dados\n#dados = data[data['MEDV'].between(left=faixa_valores[0],right=faixa_valores[1])]\n\n# plot a distribuição dos dados\n#f = px.histogram(dados, x=\"MEDV\", nbins=100, title=\"Distribuição de Preços\")\n#f.update_xaxes(title=\"MEDV\")\n#f.update_yaxes(title=\"Total Imóveis\")\n#st.plotly_chart(f)\n\nst.sidebar.subheader(\"Dados do aluno\")\n\n#\n# mapeando dados do usuário para cada atributo\n#\n#\n#IDADE\n#v_idade = st.sidebar.number_input(\"Idade:\", value=18, max_value=99, min_value=0)\nv_idade = 0\n#\n#LINGUA ESTRANGEIRA\ntmp_tp_lingua = st.sidebar.selectbox(\"Lingua Estrangeira ?\",(\"Inglês\",\"Espanhol\"))\n# transformando o dado de entrada em valor binário\nv_tp_lingua = 0 if tmp_tp_lingua == \"Ingês\" else 1\n#valores do grafico\ngl = 100 if tmp_tp_lingua == \"Ingês\" else 67\n#\n#Quantas pessoas moram na casa? - Q005\nv_Q005 = st.sidebar.number_input(\"Quantas pessoas moram na casa?\", value=1, max_value=20, min_value=0)\n#valores do grafico\nif v_Q005 == 1:\n g5 = 7\nelif v_Q005 == 2:\n g5 = 37\nelif v_Q005 == 3:\n g5 = 80\nelif v_Q005 == 4:\n g5 = 100\nelif v_Q005 == 5:\n g5 = 43\nelif v_Q005 == 6:\n g5 = 14\nelif v_Q005 == 7:\n g5 = 5\nelif v_Q005 == 8:\n g5 = 2\nelse:\n g5 = 1\n \n#\n#TP_COR_RACA\ntmp_TP_COR_RACA = st.sidebar.selectbox(\"Cor/Raça\",(\"Não declarado\",\"Branca\",\"Preta\",\"Parda\",\"Amarela\",\"Indígena\"),1)\n# transformando o dado de entrada em valor numerico\nv_TP_COR_RACA1 = 1 if tmp_TP_COR_RACA == \"Branca\" else 0\nv_TP_COR_RACA2 = 1 if tmp_TP_COR_RACA == \"Preta\" else 0\nv_TP_COR_RACA3 = 1 if tmp_TP_COR_RACA == \"Parda\" else 0\nv_TP_COR_RACA4 = 1 if tmp_TP_COR_RACA == \"Amarela\" else 0\nv_TP_COR_RACA5 = 1 if tmp_TP_COR_RACA == \"Indígena\" else 0\n#valores do grafico\nif v_TP_COR_RACA3 == 1:\n gcor = 100\nelif v_TP_COR_RACA1 == 1:\n gcor = 66\nelif v_TP_COR_RACA2 == 1:\n gcor = 65\nelse:\n gcor = 10\n\n#ESTUDO DO PAI - Q001\ntmp_Q001 = st.sidebar.selectbox(\"Até que série seu pai, ou o homem responsável por você, estudou?\",(\"Nunca estudou.\",\"Não completou a 4ª/5º.\",\"Não completou a 8ª/9º.\",\"Não completou o Ensino Médio.\",\"Completou o Ensino Médio.\",\"Superior Completo\",\"Pós-graduação.\", \"Não sei.\"),5)\nv_Q001_B = 1 if tmp_Q001 == \"Não completou a 4ª/5º.\" else 0\nv_Q001_C = 1 if tmp_Q001 == \"Não completou a 8ª/9º.\" else 0\nv_Q001_D = 1 if tmp_Q001 == \"Não completou o Ensino Médio.\" else 0\nv_Q001_E = 1 if tmp_Q001 == \"Completou o Ensino Médio.\" else 0\nv_Q001_F = 1 if tmp_Q001 == \"Superior Completo\" else 0\nv_Q001_G = 1 if tmp_Q001 == \"Pós-graduação.\" else 0\nv_Q001_H = 1 if tmp_Q001 == \"Não sei.\" else 0\n#valores do grafico\nif v_Q001_B == 1:\n g1 = 100\nelif v_Q001_D == 1:\n g1 = 67\nelse:\n g1 = 10\n#\n#Ocupação/trabalho do pai - Q003\ntmp_Q003 = st.sidebar.selectbox(\"Ocupação/trabalho do pai\",(\"Grupo 1\",\"Grupo 2\",\"Grupo 3\",\"Grupo 4\",\"Grupo 5\"),4)\nv_Q003_B = 1 if tmp_Q003 == \"Grupo 1\" else 0\nv_Q003_C = 1 if tmp_Q003 == \"Grupo 2\" else 0\nv_Q003_D = 1 if tmp_Q003 == \"Grupo 3\" else 0\nv_Q003_E = 1 if tmp_Q003 == \"Grupo 4\" else 0\nv_Q003_F = 1 if tmp_Q003 == \"Grupo 5\" else 0\n#valores do grafico\nif tmp_Q003 == \"Grupo 5\":\n g3 = 100\nelif tmp_Q003 == \"Grupo 2\":\n g3 = 76\nelse:\n g3 = 10\n#\n#Ocupação/trabalho da mãe\ntmp_Q004 = st.sidebar.selectbox(\"Ocupação/trabalho da mãe\",(\"Grupo 1\",\"Grupo 2\",\"Grupo 3\",\"Grupo 4\",\"Grupo 5\"),3)\nv_Q004_B = 1 if tmp_Q004 == \"Grupo 1\" else 0\nv_Q004_C = 1 if tmp_Q004 == \"Grupo 2\" else 0\nv_Q004_D = 1 if tmp_Q004 == \"Grupo 3\" else 0\nv_Q004_E = 1 if tmp_Q004 == \"Grupo 4\" else 0\nv_Q004_F = 1 if tmp_Q004 == \"Grupo 5\" else 0\n#valores do grafico\nif v_Q004_B == 1:\n g4 = 100\nelif v_Q004_F == 1:\n g4 = 45\nelif v_Q004_C == 1:\n g4 = 23\nelse:\n g4 = 10\n#\n#Renda da família\nfaixa_valores = st.sidebar.slider(\"Renda da família\", float(0), 20000., (0., 3000.0))\n#faixa_valores = st.sidebar.slider(\"Renda da família\", float(0), 20000., 3000.0)\nv_Q006_B = 1 if (faixa_valores[1] > 0 and faixa_valores[1] <= 998) else 0\nv_Q006_C = 1 if (faixa_valores[1] > 998 and faixa_valores[1] <= 1497 ) else 0\nv_Q006_D = 1 if (faixa_valores[1] > 1497 and faixa_valores[1] <= 1996 ) else 0\nv_Q006_E = 1 if (faixa_valores[1] > 1996 and faixa_valores[1] <= 2495 ) else 0\nv_Q006_F = 1 if (faixa_valores[1] > 2495 and faixa_valores[1] <= 2994 ) else 0\nv_Q006_G = 1 if (faixa_valores[1] > 2994 and faixa_valores[1] <= 3992 ) else 0\nv_Q006_H = 1 if (faixa_valores[1] > 3992 and faixa_valores[1] <= 4990 ) else 0\nv_Q006_I = 1 if (faixa_valores[1] > 4990 and faixa_valores[1] <= 5988 ) else 0\nv_Q006_J = 1 if (faixa_valores[1] > 5988 and faixa_valores[1] <= 6986 ) else 0\nv_Q006_K = 1 if (faixa_valores[1] > 6986 and faixa_valores[1] <= 7984 ) else 0\nv_Q006_L = 1 if (faixa_valores[1] > 7984 and faixa_valores[1] <= 8982 ) else 0\nv_Q006_M = 1 if (faixa_valores[1] > 8982 and faixa_valores[1] <= 9980 ) else 0\nv_Q006_N = 1 if (faixa_valores[1] > 9980 and faixa_valores[1] <= 11976) else 0\nv_Q006_O = 1 if (faixa_valores[1] > 11976 and faixa_valores[1] <= 14970) else 0\nv_Q006_P = 1 if (faixa_valores[1] > 14970 and faixa_valores[1] <= 19960) else 0\nv_Q006_Q = 1 if faixa_valores[1] > 19960 else 0\n#valores do grafico\nif v_Q006_C == 1:\n g6 = 100\nelif v_Q006_D == 1:\n g6 = 53\nelif v_Q006_E == 1:\n g6 = 45\nelif v_Q006_B == 1:\n g6 = 45\nelse:\n g6 = 10\n\n\n#residencia tem banheiro - Q008\ntmp_Q008 = st.sidebar.number_input(\"Quantas banheiros tem a residencia?\", value=1, max_value=10, min_value=0)\nv_Q008_B = 1 if tmp_Q008 == 1 else 0\nv_Q008_C = 1 if tmp_Q008 == 2 else 0\nv_Q008_D = 1 if tmp_Q008 == 3 else 0\nv_Q008_E = 1 if tmp_Q008 >= 4 else 0\n#valores do grafico\nif tmp_Q008 == 1:\n g8 = 100\nelif tmp_Q008 == 2:\n g8 = 20.91\nelif tmp_Q008 == 3:\n g8 = 5.47\nelse:\n g8 = 1\n\n#\n#tem TV - Q019\ntmp_Q019 = st.sidebar.number_input(\"Quantas TV tem a residencia?\", value=1, max_value=10, min_value=0)\nv_Q019_B = 1 if tmp_Q019 == 1 else 0\nv_Q019_C = 1 if tmp_Q019 == 2 else 0\nv_Q019_D = 1 if tmp_Q019 == 3 else 0\nv_Q019_E = 1 if tmp_Q019 >= 4 else 0\n#valores do grafico\nif tmp_Q019 == 1:\n g19 = 100\nelif tmp_Q019 == 2:\n g19 = 63\nelif tmp_Q019 == 3:\n g19 = 55\nelse:\n g19 = 10\n\n\n\n# inserindo um botão na tela\nbtn_predict = st.sidebar.button(\"Realizar Predição\")\n\n# verifica se o botão foi acionado\nif btn_predict:\n v = [v_idade, #34, #NU_IDADE\n v_tp_lingua, #TP_LINGUA\n v_Q005, #Q005\n 0, #'TP_SEXO_M'\n 0, #'TP_ESTADO_CIVIL_1',\n 0, #'TP_ESTADO_CIVIL_2',\n 0, #'TP_ESTADO_CIVIL_3',\n 0, #'TP_ESTADO_CIVIL_4',\n v_TP_COR_RACA1, #'TP_COR_RACA_1',\n v_TP_COR_RACA2, #'TP_COR_RACA_2',\n v_TP_COR_RACA3, #'TP_COR_RACA_3',\n v_TP_COR_RACA4, #'TP_COR_RACA_4',\n v_TP_COR_RACA5, #'TP_COR_RACA_5',\n 0, #'TP_NACIONALIDADE_1',\n 0, #'TP_NACIONALIDADE_2',\n 0, #'TP_NACIONALIDADE_3',\n 0, #'TP_NACIONALIDADE_4',\n 0, #'TP_ST_CONCLUSAO_2',\n 0, #'TP_ST_CONCLUSAO_3',\n 0, #'TP_ST_CONCLUSAO_4',\n 0, #'TP_ENSINO_1.0',\n 0, #'TP_ENSINO_2.0',\n 0, #'TP_DEPENDENCIA_ADM_ESC_1.0',\n 0, #'TP_DEPENDENCIA_ADM_ESC_2.0',\n 0, #'TP_DEPENDENCIA_ADM_ESC_3.0',\n 0, #'TP_DEPENDENCIA_ADM_ESC_4.0',\n v_Q001_B, #'Q001_B',\n v_Q001_C, #'Q001_C',\n v_Q001_D, #'Q001_D',\n v_Q001_E, #'Q001_E',\n v_Q001_F, #'Q001_F',\n v_Q001_G, #'Q001_G',\n v_Q001_H, #'Q001_H',\n 0, #'Q002_B',\n 0, #'Q002_C',\n 0, #'Q002_D',\n 0, #'Q002_E',\n 0, #'Q002_F',\n 0, #'Q002_G',\n 0, #'Q002_H',\n v_Q003_B, #'Q003_B',\n v_Q003_C, #'Q003_C',\n v_Q003_D, #'Q003_D',\n v_Q003_E, #'Q003_E',\n v_Q003_F, #'Q003_F',\n v_Q004_B, #'Q004_B',\n v_Q004_C, #'Q004_C',\n v_Q004_D, #'Q004_D',\n v_Q004_E, #'Q004_E',\n v_Q004_F, #'Q004_F',\n v_Q006_B, #'Q006_B',\n v_Q006_C, #'Q006_C',\n v_Q006_D, #'Q006_D',\n v_Q006_E, #'Q006_E',\n v_Q006_F, #'Q006_F',\n v_Q006_G, #'Q006_G',\n v_Q006_H, #'Q006_H',\n v_Q006_I, #'Q006_I',\n v_Q006_J, #'Q006_J',\n v_Q006_K, #'Q006_K',\n v_Q006_L, #'Q006_L',\n v_Q006_M, #'Q006_M',\n v_Q006_N, #'Q006_N',\n v_Q006_O, #'Q006_O',\n v_Q006_P, #'Q006_P',\n v_Q006_Q, #'Q006_Q',\n 0, #'Q007_B',\n 0, #'Q007_C',\n 0, #'Q007_D',\n v_Q008_B, #'Q008_B',\n v_Q008_C, #'Q008_C',\n v_Q008_D, #'Q008_D',\n v_Q008_E, #'Q008_E',\n 0, #'Q009_B',\n 0, #'Q009_C',\n 0, #'Q009_D',\n 0, #'Q009_E',\n 0, #'Q010_B',\n 0, #'Q010_C',\n 0, #'Q010_D',\n 0, #'Q010_E',\n 0, #'Q011_B',\n 0, #'Q011_C',\n 0, #'Q011_D',\n 0, #'Q011_E',\n 0, #'Q012_B',\n 0, #'Q012_C',\n 0, #'Q012_D',\n 0, #'Q012_E',\n 0, #'Q013_B',\n 0, #'Q013_C',\n 0, #'Q013_D',\n 0, #'Q013_E',\n 0, #'Q014_B',\n 0, #'Q014_C',\n 0, #'Q014_D',\n 0, #'Q014_E',\n 0, #'Q015_B',\n 0, #'Q015_C',\n 0, #'Q015_D',\n 0, #'Q015_E',\n 0, #'Q016_B',\n 0, #'Q016_C',\n 0, #'Q016_D',\n 0, #'Q016_E',\n 0, #'Q017_B',\n 0, #'Q017_C',\n 0, #'Q017_D',\n 0, #'Q017_E',\n 0, #'Q018_B',\n v_Q019_B, #'Q019_B',\n v_Q019_C, #'Q019_C',\n v_Q019_D, #'Q019_D',\n v_Q019_E, #'Q019_E',\n 0, #'Q020_B',\n 0, #'Q021_B',\n 0, #'Q022_B',\n 0, #'Q022_C',\n 0, #'Q022_D',\n 0, #'Q022_E',\n 0, #'Q023_B',\n 0, #'Q024_B',\n 0, #'Q024_C',\n 0, #'Q024_D',\n 0, #'Q024_E',\n 0, #'Q025_B',\n 0, #'LE_NO_MUNICIPIO_RESIDENCIA',\n 0, #'LE_NO_MUNICIPIO_NASCIMENTO',\n 0, #'LE_SG_UF_NASCIMENTO',\n 0#, #'LE_NO_MUNICIPIO_ESC',\n #0, #'LE_SG_UF_PROVA'\n ]\n #result = modelo.predict([[crim,indus,chas,nox,rm,ptratio,b,lstat]])\n result = modelo.predict([v])\n r = result[0] #st.write(result[0])\n if r == 1:\n v_resultado = \"Aprovado!\" #if r == 1 else \"erro no calculo1\"\n g_cor = 'g'\n else:\n v_resultado = \"Reprovado!\" # if r == 0 else \"erro no calculo2\"\n g_cor = 'r'\n #st.sidebar.subheader(\"O Aluno foi:\")\n st.subheader(\"O Aluno foi:\")\n #result = \"US $ \"+str(round(result[0]*10,2))\n #v_resultado = str(result)\n st.sidebar.write(v_resultado)\n st.write(v_resultado)\n\n # plot a distribuição dos dados\n #f = px.histogram(dados, x=\"MEDV\", nbins=100, title=\"Distribuição de Preços\")\n #f.update_xaxes(title=\"MEDV\")\n #f.update_yaxes(title=\"Total Imóveis\")\n #st.plotly_chart(f)\n\n caracteristicas = [\"TRABALHO_PAI\", \"TRABALHO_MÃE\", \"ESTUDO_PAI\", \"BANHEIRO\", \"TV\", \"MORADORES\", \"RAÇA\", \"RENDA\", \"IDIOMA\"]\n valor = [g3, g4, g1, g8, g19, g5, gcor, g6 , gl, g3]\n #valor = [45, 53, 15, 61, 57, 45, 30, 30 , 90, 45]\n \n # Initialise the spider plot by setting figure size and polar projection\n fig = plt.figure(figsize=(10, 6))\n plt.subplot(polar=True)\n \n theta = np.linspace(0, 2 * np.pi, len(valor))\n \n # Arrange the grid into number of sales equal parts in degrees\n lines, labels = plt.thetagrids(range(0, 360, int(360/len(caracteristicas))), (caracteristicas))\n \n # Plot actual sales graph\n plt.plot(theta, valor)\n plt.fill(theta, valor, g_cor, alpha=0.2)\n \n # Plot expected sales graph\n #plt.plot(theta, expected)\n \n # Add legend and title for the plot\n #plt.legend(labels=('Actual', 'Expected'), loc=1)\n plt.title(\"Vou bem no ENEM? \\n\")\n \n # Dsiplay the plot on the screen\n #plt.show()\n #st.plotly_chart(fig)\n t1.empty()\n #t2.write(\"\")\n #t3.empty()\n t4.empty()\n t5.empty()\n t6.empty()\n t7.empty()\n t8.empty()\n t9.empty()\n st.write(fig)\n #st.write(valor)\n","sub_path":"app_tcc.py","file_name":"app_tcc.py","file_ext":"py","file_size_in_byte":15663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"632737857","text":"import os\nimport shutil\n\n\ndef make_env(config, save_scr=False, copy_dirs=None):\n if copy_dirs is not None:\n try:\n _copy_dirs(config, copy_dirs)\n except:\n print(\"dirs have been exist\")\n\n dirs = [\n config.INPUT,\n config.OUTPUT,\n config.SUBMISSION,\n config.FEATURE,\n config.NOTEBOOKS,\n config.EXP,\n config.PREDS,\n config.COLS,\n config.TRAINED,\n config.REPORTS\n ]\n\n for v in dirs:\n if not os.path.isdir(v):\n print(f\"making {v}\")\n os.makedirs(v)\n\n if save_scr:\n shutil.copy(f\"{config.RUN_NAME}.py\", config.EXP) # save scr\n\n\ndef _copy_dirs(config, dirs):\n exp_dir = config.OUTPUT + f\"/{dirs['RUN_NAME']}\"\n\n if \"FEATURE\" in dirs[\"DIRS\"]:\n source = os.path.join(exp_dir, \"feature\")\n copy_to = config.FEATURE\n shutil.copytree(source, copy_to)\n print(f\"features dir is copied from {dirs['RUN_NAME']}\")\n\n if \"COLS\" in dirs[\"DIRS\"]:\n source = os.path.join(exp_dir, \"cols\")\n copy_to = config.COLS\n shutil.copytree(source, copy_to)\n print(f\"cols dir is copied from {dirs['RUN_NAME']}\")\n\n if \"PREDS\" in dirs[\"DIRS\"]:\n source = os.path.join(exp_dir, \"preds\")\n copy_to = config.PREDS\n shutil.copytree(source, copy_to)\n print(f\"preds dir is copied from {dirs['RUN_NAME']}\")\n","sub_path":"mypipe/exp_env.py","file_name":"exp_env.py","file_ext":"py","file_size_in_byte":1410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"494094176","text":"from get_input import *\nimport random \n\nclass Car:\n\tdef __init__(self):\n\t\tself.available_time = 0\n\t\tself.pos = (0, 0)\n\t\tself.queue = []\n\ndef dist(p1, p2):\n\tx1, y1 = p1\n\tx2, y2 = p2\n\treturn abs(y2 - y1) + abs(x2 - x1)\n\ndef calculate_finish(car, ride):\n\tstart = car.available_time + dist(car.pos, ride.start)\n\tif ride.start_time > start:\n\t\tstart = ride.start_time\n\treturn start + dist(ride.start, ride.end)\n\ndef main():\n\ttotal = 0\n\tmissed = 0\n\tR, C, F, N, B, T, rides = get_input()\n\tcars = [Car() for i in range(F)]\n\tfor car in cars: \n\t\tcar.pos = (random.randint(0, R-1), random.randint(0, C-1))\n\t\tcar.available_time = car.pos[0] + car.pos[1]\n\n\tfor car in cars:\n\t\tclosest = 10**10\n\t\tride_found = False\n\t\tfor ride in rides:\n\t\t\tdistance = dist(car.pos, ride.start)\n\t\t\tif distance < closest and calculate_finish(car, ride) < ride.finish_time:\n\t\t\t\tride_found = True\n\t\t\t\tcar_ride = ride\n\t\t\t\tclosest = distance\n\t\tif \tride_found:\n\t\t\tcar.queue.append(ride.ride_id)\n\t\t\tcar.available_time = calculate_finish(car_ride, ride)\n\t\t\tcar.pos = ride.end\n\t\t\trides.remove(car_ride)\n\t\t\ttotal += ride.ride_time\n\t\telse:\n\t\t\tmissed += 1\n\tprint(total)\n\tprint(missed)\n\tfor car in cars:\n\t\tprint(len(car.queue), *car.queue)\n\nif __name__ == \"__main__\":\n\tmain()\n\n","sub_path":"ciara_no_hurry_reverse.py","file_name":"ciara_no_hurry_reverse.py","file_ext":"py","file_size_in_byte":1230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"256246843","text":"r\"\"\"Configuration for language model experiment.\n\nUsage:\n from lmp.config import BaseConfig\n\n config = BaseConfig(...params)\n config.save(path)\n config = config.load(path)\n\"\"\"\n\n# built-in modules\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport json\nimport math\nimport os\n\nfrom typing import Generator\nfrom typing import Tuple\nfrom typing import Union\n\n# 3rd-party modules\n\nimport torch\n\n# self-made modules\n\nimport lmp.path\n\n\nclass BaseConfig:\n r\"\"\"Configuration for text-generation model.\n\n Attributes:\n batch_size:\n Training batch size. Must be bigger than or equal to `1`.\n checkpoint_step:\n Checkpoint interval based on number of mini-batch. Must be bigger\n than or equal to `1`.\n d_emb:\n Embedding dimension. Must be bigger than or equal to `1`.\n d_hid:\n Hidden dimension. Must be bigger than or equal to `1`.\n dataset:\n Name of the dataset to perform experiment. Must not be empty.\n dropout:\n Dropout rate. Must range from `0.0` to `1.0`.\n epoch:\n Number of training epochs. Must be bigger than or equal to `1`.\n experiment:\n Name of the experiment. Must not be empty.\n is_uncased:\n Convert all upper case to lower case. Must be `True` or `False`.\n learning_rate:\n Optimizer's parameter `lr`. Must be bigger than `0.0`.\n max_norm:\n Max norm of gradient. Used when cliping gradient norm. Must be\n bigger than `0.0`.\n max_seq_len\n Maximum input sequence length. Must be greater than `1` or equal to\n `-1`.'\n min_count:\n Filter out tokens occur less than `min_count`. Must be bigger than\n or equal to `1`.\n model_class:\n Language model's class. Must not be empty.\n num_linear_layers:\n Number of Linear layers. Must be bigger than or equal to `1`.\n num_rnn_layers:\n Number of rnn layers. Must be bigger than or equal to `1`.\n optimizer_class:\n Optimizer's class. Must not be empty.\n seed:\n Control random seed. Must be bigger than or equal to `1`.\n tokenizer_class:\n Tokenizer's class. Must not be empty.\n\n Raises:\n TypeError:\n When one of the arguments are not an instance of their type annotation\n respectively.\n ValueError:\n When one of the arguments do not follow their constraints. See\n docstring for arguments constraints.\n \"\"\"\n\n def __init__(\n self,\n batch_size: int = 1,\n checkpoint_step: int = 500,\n d_emb: int = 1,\n d_hid: int = 1,\n dataset: str = '',\n dropout: float = 0.1,\n epoch: int = 10,\n experiment: str = '',\n is_uncased: bool = False,\n learning_rate: float = 1e-4,\n max_norm: float = 1.0,\n max_seq_len: int = 60,\n min_count: int = 1,\n model_class: str = 'lstm',\n num_linear_layers: int = 1,\n num_rnn_layers: int = 1,\n optimizer_class: str = 'adam',\n seed: int = 1,\n tokenizer_class: str = 'char_dict'\n ):\n # Type check.\n if not isinstance(batch_size, int):\n raise TypeError('`batch_size` must be an instance of `int`.')\n\n if not isinstance(checkpoint_step, int):\n raise TypeError('`checkpoint_step` must be an instance of `int`.')\n\n if not isinstance(d_emb, int):\n raise TypeError('`d_emb` must be an instance of `int`.')\n\n if not isinstance(d_hid, int):\n raise TypeError('`d_hid` must be an instance of `int`.')\n\n if not isinstance(dataset, str):\n raise TypeError('`dataset` must be an instance of `str`.')\n\n if not isinstance(dropout, float):\n raise TypeError('`dropout` must be an instance of `float`.')\n\n if not isinstance(experiment, str):\n raise TypeError('`experiment` must be an instance of `str`.')\n\n if not isinstance(epoch, int):\n raise TypeError('`epoch` must be an instance of `int`.')\n\n if not isinstance(is_uncased, bool):\n raise TypeError('`is_uncased` must be an instance of `bool`.')\n\n if not isinstance(learning_rate, float):\n raise TypeError('`learning_rate` must be an instance of `float`.')\n\n if not isinstance(max_norm, float):\n raise TypeError('`max_norm` must be an instance of `float`.')\n\n if not isinstance(max_seq_len, int):\n raise TypeError('`max_seq_len` must be an instance of `int`.')\n\n if not isinstance(min_count, int):\n raise TypeError('`min_count` must be an instance of `int`.')\n\n if not isinstance(model_class, str):\n raise TypeError('`model_class` must be an instance of `str`.')\n\n if not isinstance(num_linear_layers, int):\n raise TypeError(\n '`num_linear_layers` must be an instance of `int`.')\n\n if not isinstance(num_rnn_layers, int):\n raise TypeError('`num_rnn_layers` must be an instance of `int`.')\n\n if not isinstance(optimizer_class, str):\n raise TypeError('`optimizer_class` must be an instance of `str`.')\n\n if not isinstance(seed, int):\n raise TypeError('`seed` must be an instance of `int`.')\n\n if not isinstance(tokenizer_class, str):\n raise TypeError('`tokenizer_class` must be an instance of `str`.')\n\n # Value check.\n if batch_size < 1:\n raise ValueError(\n '`batch_size` must be bigger than or equal to `1`.'\n )\n\n if checkpoint_step < 1:\n raise ValueError(\n '`checkpoint_step` must be bigger than or equal to `1`.'\n )\n\n if d_emb < 1:\n raise ValueError('`d_emb` must be bigger than or equal to `1`.')\n\n if d_hid < 1:\n raise ValueError('`d_hid` must be bigger than or equal to `1`.')\n\n if not dataset:\n raise ValueError('`dataset` must not be empty.')\n\n if not 0 <= dropout <= 1:\n raise ValueError('`dropout` must range from `0.0` to `1.0`.')\n\n if epoch < 1:\n raise ValueError('`epoch` must be bigger than or equal to `1`.')\n\n if not experiment:\n raise ValueError('`experiment` must not be empty.')\n\n if learning_rate < 0.0 or math.isnan(learning_rate):\n raise ValueError('`learning_rate` must be bigger than `0.0`.')\n\n if max_norm < 0.0 or math.isnan(max_norm):\n raise ValueError('`max_norm` must be bigger than `0.0`.')\n\n if 0 <= max_seq_len <= 1:\n raise ValueError(\n '`max_seq_len` must be greater than `1` or equal to `-1`.'\n )\n\n if min_count < 1:\n raise ValueError(\n '`min_count` must be bigger than or equal to `1`.'\n )\n\n if not model_class:\n raise ValueError('`model_class` must not be empty.')\n\n if num_linear_layers < 1:\n raise ValueError(\n '`num_linear_layers` must be bigger than or equal to `1`.'\n )\n\n if num_rnn_layers < 1:\n raise ValueError(\n '`num_rnn_layers` must be bigger than or equal to `1`.'\n )\n\n if not optimizer_class:\n raise ValueError('`optimizer_class` must not be empty.')\n\n if seed < 1:\n raise ValueError('`seed` must be bigger than or equal to `1`.')\n\n if not tokenizer_class:\n raise ValueError('`tokenizer_class` must not be empty.')\n\n # Ensure instance have exact type specified in type annotation.\n self.batch_size = int(batch_size)\n self.checkpoint_step = int(checkpoint_step)\n self.d_emb = int(d_emb)\n self.d_hid = int(d_hid)\n self.dataset = str(dataset)\n self.dropout = float(dropout)\n self.epoch = int(epoch)\n self.experiment = str(experiment)\n self.is_uncased = bool(is_uncased)\n self.learning_rate = float(learning_rate)\n self.max_norm = float(max_norm)\n self.max_seq_len = int(max_seq_len)\n self.min_count = int(min_count)\n self.model_class = str(model_class)\n self.num_linear_layers = int(num_linear_layers)\n self.num_rnn_layers = int(num_rnn_layers)\n self.optimizer_class = str(optimizer_class)\n self.seed = int(seed)\n self.tokenizer_class = str(tokenizer_class)\n\n @classmethod\n def load(cls, experiment: str):\n r\"\"\"Load configuration JSON file.\n\n Args:\n experiment:\n Name of the existing experiment.\n Configuration file must be in JSON format.\n\n Raises:\n FileNotFoundError:\n If `experiment` does not exist.\n JSONDecodeError:\n If configuration is not in JSON format.\n TypeError:\n When `experiment` is not an instance of `str`.\n ValueError:\n When `experiment` is empty string.\n \"\"\"\n\n if not isinstance(experiment, str):\n raise TypeError('`experiment` must be an instance of `str`.')\n\n if not experiment:\n raise ValueError('`experiment` must not be empty.')\n\n file_path = os.path.join(lmp.path.DATA_PATH, experiment, 'config.json')\n\n if not os.path.exists(file_path):\n raise FileNotFoundError(f'File {file_path} does not exist.')\n\n with open(file_path, 'r', encoding='utf-8') as input_file:\n return cls(**json.load(input_file))\n\n def __iter__(self) -> Generator[\n Tuple[str, Union[bool, float, int, str]], None, None\n ]:\n r\"\"\"Make instance attributes iterable.\n\n Yields:\n All instance attributes.\n \"\"\"\n yield 'batch_size', self.batch_size\n yield 'checkpoint_step', self.checkpoint_step\n yield 'd_emb', self.d_emb\n yield 'd_hid', self.d_hid\n yield 'dataset', self.dataset\n yield 'dropout', self.dropout\n yield 'epoch', self.epoch\n yield 'experiment', self.experiment\n yield 'is_uncased', self.is_uncased\n yield 'learning_rate', self.learning_rate\n yield 'max_norm', self.max_norm\n yield 'max_seq_len', self.max_seq_len\n yield 'min_count', self.min_count\n yield 'model_class', self.model_class\n yield 'num_linear_layers', self.num_linear_layers\n yield 'num_rnn_layers', self.num_rnn_layers\n yield 'optimizer_class', self.optimizer_class\n yield 'seed', self.seed\n yield 'tokenizer_class', self.tokenizer_class\n\n def save(self) -> None:\n r\"\"\"Save configuration into JSON file.\n\n Raises:\n FileExistsError:\n When experiment path exists but is not a directory.\n \"\"\"\n\n file_dir = os.path.join(lmp.path.DATA_PATH, self.experiment)\n file_path = os.path.join(file_dir, 'config.json')\n\n if not os.path.exists(file_dir):\n os.makedirs(file_dir)\n elif not os.path.isdir(file_dir):\n raise FileExistsError(f'{file_dir} is not a directory.')\n\n with open(file_path, 'w', encoding='utf8') as output_file:\n json.dump(dict(self), output_file, ensure_ascii=False)\n\n @property\n def device(self) -> torch.device:\n r\"\"\"Get running model device.\n\n If `torch.cuda.is_available() == True`, then run model on GPU.\n Else run model on CUDA device.\n\n Returns:\n Device create by `torch.device`.\n \"\"\"\n if torch.cuda.is_available():\n return torch.device('cuda')\n return torch.device('cpu')\n","sub_path":"lmp/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":11997,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"436611749","text":"# Copyright (c) 2015 IBM Corp.\n#\n# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause\n# license at the users choice. A copy of both licenses are available in the\n# project source as Apache-2.0 and BSD. You may not use this file except in\n# compliance with one of these two licences.\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under these licenses is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# license you chose for the specific language governing permissions and\n# limitations under that license.\n\nfrom __future__ import absolute_import\n\n__all__ = [\n 'Deprecations',\n]\n\nimport contextlib\nimport re\nimport warnings # conflicts with the local warnings module so absolute_import\n\nimport fixtures\n\n\nclass Deprecations(fixtures.Fixture):\n \"\"\"Prevent calls to deprecated functions.\n\n This fixture can be added to a testcase to ensure that the code under test\n isn't calling deprecated function. It sets Python's `warnings` module for\n the module under test to \"error\" so that DeprecationWarning will be\n raised.\n\n You might want your application to not use any deprecated function.\n Deprecated function is going to be removed and sometimes is being removed\n because it's buggy and you shouldn't be using it.\n\n It can be difficult to tell just through code reviews that new code is\n calling deprecated function. This fixture can be used to protect you from\n developers proposing changes that use deprecated function.\n\n It can also be useful to be able to test if your application is still using\n some function that's been newly deprecated.\n\n .. note:: This fixture uses :func:`warnings.catch_warnings`, as such the\n note there applies: The fixture modifies global state and therefore is\n not thread safe.\n\n :param str module: The name of a Python module. DeprecationWarnings emitted\n from this module will cause an error to be raised.\n \"\"\"\n\n def __init__(self, module):\n super(Deprecations, self).__init__()\n self._module_regex = '^%s' % re.escape(module + '.')\n\n def _setUp(self):\n cw = warnings.catch_warnings()\n cw.__enter__()\n self.addCleanup(cw.__exit__)\n warnings.filterwarnings('error', category=DeprecationWarning,\n module=self._module_regex)\n\n def ignore_deprecations(self):\n \"\"\"Indicate that this test expects to call deprecated function.\n\n Normally you'll want to protect all tests from calling deprecated\n functions, then some function is deprecated and now tests are failing\n due to the deprecation. This function can be used to indicate\n that the test is going to call deprecated function and not to fail.\n This can be used as a marker for either tests that are there to verify\n deprecated functions continue to work and will be removed along with\n the function, or as tests that need to be fixed to stop calling\n deprecated functions.\n \"\"\"\n warnings.filterwarnings('ignore', category=DeprecationWarning,\n module=self._module_regex)\n\n @contextlib.contextmanager\n def ignore_deprecations_here(self):\n \"\"\"This section of code ignores calls to deprecated functions.\n\n If you've got a test that part of it is testing deprecated functions\n then wrap the part in this context manager::\n\n with self.deprecations.expect_deprecations_here():\n call_deprecated_function()\n\n \"\"\"\n self.cleanUp()\n try:\n yield\n finally:\n self.setUp()\n","sub_path":"fixtures/_fixtures/deprecations.py","file_name":"deprecations.py","file_ext":"py","file_size_in_byte":3749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"90949294","text":"from app import app\nimport boto3\nimport botocore\nfrom flask import Blueprint, Flask, render_template, request, redirect, flash, url_for\nfrom config import S3_KEY, S3_SECRET, S3_LOCATION\nfrom werkzeug import secure_filename\n\n#allowed extensions#\nALLOWED_EXTENSIONS = set(['pdf', 'png', 'jpg', 'jpeg', 'gif'])\n\n\ns3 = boto3.client(\n \"s3\",\n aws_access_key_id=S3_KEY,\n aws_secret_access_key=S3_SECRET\n)\n\n\ndef allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1].lower()in ALLOWED_EXTENSIONS\n\n\ndef upload_file_to_s3(file, bucket_name, acl=\"public-read\"):\n try:\n\n s3.upload_fileobj(\n file,\n bucket_name,\n file.filename,\n ExtraArgs={\n \"ACL\": acl,\n \"ContentType\": file.content_type\n }\n )\n except Exception as e:\n print(\"Something Happened: \", e)\n return e\n\n return f'{S3_LOCATION}{file.filename}'\n","sub_path":"instagram_web/blueprints/images/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":958,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"115911344","text":"from PIL import Image, ExifTags\n# img = Image.open(\"/home/yousuf/Downloads/page-1-1.jpg\")#/media/yousuf/YOUSUF/20190330_131415.jpg\n\nfilename=\"/home/yousuf/Downloads/original_images/20190330_131415.jpg\"\n\nimg = Image.open(filename)\n#exif = { ExifTags.TAGS[k]: v for k, v in img._getexif().items() if k in ExifTags.TAGS }\n# print(exif)\nfrom PIL.ExifTags import GPSTAGS\nfrom PIL.ExifTags import TAGS\nfrom PIL import Image\nimport numpy as np\n#import pandas as pd\n#import mysql.connector\n#import config\n#mysql = mysql.connector.connect(\n# host=config.HOST,\n# user=config.USER,\n# passwd=config.PSWD,\n# database = config.CLEARSIGHT_NAME\n# )\n\n#mycursor=mysql.cursor()\n#mycursor.execute(\"SELECT frenns_id FROM frenns_app_api WHERE frenns_app_api.frenns_id='\"+str(frenns_id)+\"'\")\n#update=mycursor.fetchall()\nDateTime=0\nDateTime1=0\nDateTimeDigitized=0\nDateTimeOriginal=0\n\n\nexifData = {}\nexifDataRaw = img._getexif()\nfor tag, value in exifDataRaw.items():\n decodedTag = ExifTags.TAGS.get(tag, tag)\n exifData[decodedTag] = value\nfor key,value in enumerate(exifData):\n if value==\"DateTime\":\n data=str(exifData[value])\n# print(value,data)\n DateTime=data\n elif value==\"DateTimeDigitized\":\n data=str(exifData[value])\n# print(value,data) \n DateTimeDigitized=data\n elif value==\"DateTimeOriginal\":\n data=str(exifData[value])\n# print(value,data) \n DateTimeOriginal=data\n elif value==\"ExifImageHeight\":\n data=str(exifData[value])\n# print(value,data) \n ExifImageHeight=data\n elif value==\"ExifImageWidth\":\n data=str(exifData[value])\n# print(value,data) \n ExifImageWidth=data\n elif value==\"ExifVersion\":\n data=str(exifData[value])\n# print(value,data) \n ExifVersion=data\n elif value==\"Flash\":\n data=str(exifData[value])\n# print(value,data) \n Flash=data\n elif value==\"FocalLengthIn35mmFilm\":\n data=str(exifData[value])\n# print(value,data) \n FocalLengthIn35mmFilm=data\n elif value==\"Make\":\n data=str(exifData[value])\n# print(value,data)\n Make=data\n elif value==\"Model\":\n data=str(exifData[value])\n# print(value,data) \n Model=data\n elif value==\"Orientation\":\n data=str(exifData[value])\n# print(value,data) \n Orientation=data\n elif value==\"Software\":\n data=str(exifData[value])\n# print(value,data) \n Software=data\n elif value==\"WhiteBalance\":\n data=str(exifData[value])\n# print(value,data) \n WhiteBalance=data\n elif value==\"YCbCrPositioning\":\n data=str(exifData[value])\n# print(value,data)\n YCbCrPositioning=data\n elif value==\"LensModel\":\n data=str(exifData[value])\n# print(value,data)\n LensModel=data\n elif value==\"LensMake\":\n data=str(exifData[value])\n# print(value,data)\n LensMake=data\n\n\n\ndef get_exif(filename):\n image = Image.open(filename)\n image.verify()\n return image._getexif()\n\nexif = get_exif(filename)\n#print(exif)\ndef get_geotagging(exif):\n if not exif:\n# raise ValueError(\"No EXIF metadata found\")\n print(\"No EXIF metadata found\")\n else:\n geotagging = {}\n for (idx, tag) in TAGS.items():\n if tag == 'GPSInfo':\n if idx not in exif:\n # raise ValueError(\"No EXIF geotagging found\")\n print(\"No EXIF geotagging found\")\n break\n for (key, val) in GPSTAGS.items():\n if key in exif[idx]:\n geotagging[val] = exif[idx][key]\n\n return geotagging\n\n# exif = get_exif(\"/media/yousuf/YOUSUF/20190330_131415.jpg\")\n# geotags = get_geotagging(exif)\n# print(geotags)\n\n\n\ndef get_decimal_from_dms(dms, ref):\n\n degrees = dms[0][0] / dms[0][1]\n minutes = dms[1][0] / dms[1][1] / 60.0\n seconds = dms[2][0] / dms[2][1] / 3600.0\n\n if ref in ['S', 'W']:\n degrees = -degrees\n minutes = -minutes\n seconds = -seconds\n\n return round(degrees + minutes + seconds, 5)\n\ndef get_coordinates(geotags):\n lat = get_decimal_from_dms(geotags['GPSLatitude'], geotags['GPSLatitudeRef'])\n\n lon = get_decimal_from_dms(geotags['GPSLongitude'], geotags['GPSLongitudeRef'])\n\n return (lat,lon)\n\n\nexif = get_exif(filename)\ngeotags = get_geotagging(exif)\n#print(geotags)\n#print(get_coordinates(geotags))\ncoordinates=str(get_coordinates(geotags))\n\ndef make_thumbnail(filename):\n img = Image.open(filename)\n\n (width, height) = img.size\n if width > height:\n ratio = 50.0 / width\n else:\n ratio = 50.0 / height\n\n img.thumbnail((round(width * ratio), round(height * ratio)), Image.LANCZOS)\n img.save('thumbnail.jpg')\n# thumb=make_thumbnail(\"/media/yousuf/YOUSUF/20190330_131415.jpg\")\n\n# import exif\n# photo_path = \"/media/yousuf/YOUSUF/20190330_131415.jpg\"\n# data = exif.parse(photo_path)\n# print(data)\n\nimport exifread\n# Open image file for reading (binary mode)\nf = open(filename, 'rb')\n\n# Return Exif tags\ntags = exifread.process_file(f)\n#print(tags)\nprintable={}\nfor key,value in tags.items():\n if key==\"GPS GPSVersionID\":\n value=str(value)\n print(key,value)\n GPSVersionID=value\n elif key==\"Image DateTime\":\n value=str(value)\n print(key,value) \n DateTime1=value\n elif key==\"Image GPSInfo\":\n value=str(value)\n print(key,value) \n GPSInfo=value\n elif key==\"Image Orientation\":\n value=str(value)\n print(key,value) \n Orientation1=value\n elif key==\"Interoperability InteroperabilityIndex\":\n value=str(value)\n print(key,value) \n InteroperabilityIndex=value\n elif key==\"Interoperability InteroperabilityVersion\":\n value=str(value)\n print(key,value) \n InteroperabilityVersion=value\n elif key==\"Thumbnail Compression\":\n value=str(value)\n print(key,value)\n thumbnail_compression=value\n elif key==\"Thumbnail ResolutionUnit\":\n value=str(value)\n print(key,value) \n resolution_unit=value\n elif key==\"GPS GPSDate\":\n value=str(value)\n print(key,value) \n GPSDate=value\n elif key==\"EXIF SceneType\":\n value=str(value)\n print(key,value) \n SceneType=value\n elif key==\"EXIF SceneCaptureType\":\n value=str(value)\n print(key,value) \n SceneCaptureType=value\n elif key==\"EXIF Flash\":\n value=str(value)\n print(key,value)\n Flash1=value\n elif key==\"EXIF ExposureMode\":\n value=str(value)\n print(key,value)\n ExposureMode=value\n elif key==\"EXIF SensingMethod\":\n value=str(value)\n print(key,value)\n SensingMethod=value\n if key==\"JPEGThumbnail\":\n #convert thumbnail(in bytes) to image\n# import io\n# image = Image.open(io.BytesIO(value))\n# image.save(savepath)\n value=str(value)\n printable[key]=value\n try:\n printable[key]=value.printable\n except:\n continue\n\nimport json\n#metadata={}\n#for key,value in exifData.items():\n# if value.__class__==bytes:\n# break\n# else:\n# metadata[key]=value\n \n\nprintable = json.dumps(printable)\nif DateTime!=0 and DateTime1!=0:\n if DateTime!=DateTime1:\n edited=1\n else:\n datetime=DateTime\nif DateTime!=0:\n datetime=DateTime\nelif DateTime==0:\n if DateTime1!=0:\n datetime=DateTime1\n elif DateTime1==0:\n if DateTimeDigitized!=0:\n datetime=DateTimeDigitized\n elif DateTimeDigitized==0:\n datetime=0\nif datetime==0:\n edited=2;\nelif datetime!=0:\n if DateTimeOriginal==0:\n originaldatetime=0\n edited=2\n elif DateTimeOriginal!=0:\n originaldatetime=DateTimeOriginal\n if datetime==originaldatetime:\n edited=0\n else:\n edited=1\n \nif DateTime!=0:\n if DateTime1!=0:\n if DateTimeDigitized!=0:\n if DateTimeOriginal!=0:\n if max(DateTime,DateTime1,DateTimeDigitized)!=DateTimeOriginal:\n edited=1;\n\n\n\n\n\n\n ","sub_path":"tags_image.py","file_name":"tags_image.py","file_ext":"py","file_size_in_byte":8200,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"29910671","text":"from aiogram import types\nfrom aiogram.dispatcher.filters import CommandStart\nfrom aiogram.utils.markdown import hitalic, italic, hcode, code, strikethrough, hstrikethrough, hbold, bold\n\nfrom loader import dp\n\nhtml_text = \"\\n\".join(\n [\n hbold(\"Жизнь — это не всегда вопрос хороших карт. Иногда это хороший розыгрыш плохой руки.\"),\n hbold(\"© Джек Лондон\"),\n\n ]\n)\n@dp.message_handler(CommandStart())\nasync def bot_start(message: types.Message):\n await message.answer_sticker(open(\"animation/wave_animated_sticker.gif_\", 'rb'))\n await message.answer(f\"Привет, {message.from_user.full_name}!\")\n await message.answer(html_text, parse_mode=types.ParseMode.HTML)\n","sub_path":"handlers/users/start.py","file_name":"start.py","file_ext":"py","file_size_in_byte":771,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"610849968","text":"# The general 2D FDTD diffusion simulator code for ECE 570 problem set #2\n\n## IMPORT MODULES\nfrom finite_difference_methods import *\nfrom matplotlib import pyplot as plt\nimport time\n\n# For timing the simulation\nstart_time = time.time()\n\n# Spatial domain\ndx = 1e-6\ndy = 1e-6\nX = 10e-6\nY = 10e-6\n\nNx = math.floor(X / dx)\ndx = X / Nx\nNy = math.floor(Y / dy)\ndy = Y / Ny\n\n# Time settings\ndt = 1e-19 # Timestep in seconds [Will be updated according to CFL condition if too low]\nNt = 500000 # Number of timesteps [Takes priority over T if provided]\n\n# Initial conditions [Just a point in the center]\nx_init = 5e-6\ny_init = 5e-6\n\n# Diffusion parameter\nD = 50\n\n## CODE\nprint(\"------------------------\")\nprint(\"-- Parsing Inputs...\")\ninputs = locals()\n\n# Index the domain for use throughout the code.\ndomain = {\n \"shape\": np.array([Ny, Nx]),\n \"size\": np.prod(np.array([Ny, Nx])),\n \"h\": np.array([dy, dx])\n}\n\nprint(\"Ymax: \\t\\t\\t {:0.2e}\".format(Y))\nprint(\"dy: \\t\\t\\t {:0.2e}\".format(dy))\nprint(\"Ny: \\t\\t\\t {:d}\".format(Ny))\nprint(\"Xmax: \\t\\t\\t {:0.2e}\".format(X))\nprint(\"dx: \\t\\t\\t {:0.2e}\".format(dx))\nprint(\"Nx: \\t\\t\\t {:d}\".format(Nx))\n\n#dt_max = ((4/3) / D) * (np.sum(np.power(domain['h'], -2)) ** (-1)) # N-Dimensional CFL Condition\n#if dt > dt_max or dt < 0:\n# print(\"WARNING: Input timestep {:0.2e} is larger than allowed by stability condition (or negative). Updating dt...\".format(dt))\n# dt = dt_max\n\nprint(\"Timestep: \\t\\t\\t {:0.2e} seconds\".format(dt))\n\nif 'Nt' not in inputs:\n if 'T' in inputs:\n Nt = math.ceil(T / dt) # The number of timesteps to simulate\n else:\n print(\"Error: No simulation time provided!\")\n exit()\nelse:\n T = dt*Nt\n\nprint(\"Simulation Time: \\t {:0.2e} seconds\".format(T))\n\nprint(\"------------------------\")\nprint(\"-- Initializing Solutions and Operators...\")\n\n# Allocate space for solution vectors\nu = [np.zeros(domain[\"shape\"]),\n np.zeros(domain[\"shape\"]),\n np.zeros(domain[\"shape\"])] # Solution is a list of arrays\n\nx_init = math.floor(x_init/domain['h'][1])\ny_init = math.floor(y_init/domain['h'][0])\nu[0][y_init, x_init] = 1\nu[1][y_init, x_init] = 1\n\n# Construct stepping operator [M]\nlaplacian = cd_1d_matrix_ND_v2(2, 0, domain) + cd_1d_matrix_ND_v2(2, 1, domain)\nM = dt * D * laplacian + sparse.eye(domain[\"size\"])\n\nprint(\"Domain: \" + str(domain))\nprint(\"Operator Size: {:d}, {:d}\".format(np.shape(M)[0], np.shape(M)[1]))\nprint(\"Solution Size: {:d}, {:d}\".format(np.shape(u[2])[0], np.shape(u[2])[1]))\n\nnp.set_printoptions(precision=1, suppress=True)\n\nprint(\"------------------------\")\nprint(\"-- Running FDTD simulation...\")\n\nmilestones = np.arange(10) * math.ceil(Nt/10)\nfor i in range(Nt):\n if np.sum(i == milestones) != 0 and i > 0:\n print(\"{:d}%\".format(math.ceil(100*(i/Nt))))\n t = dt*i # The time (in seconds)\n\n # First, apply the stepping operator to the internal nodes\n u = [i.reshape([domain['size']], order=\"F\") for i in u]\n u[2] = M.dot(u[1])# + u[0] # Apply stepping operator\n u = [i.reshape(domain['shape'], order=\"F\") for i in u]\n\n # Update solution for the next timestep\n u[0] = np.copy(u[1])\n u[1] = np.copy(u[2])\n\nprint(\"------------------------\")\nprint(\"-- Plotting Results...\")\n\nY = np.arange(domain['shape'][0])*domain['h'][0]\nX = np.arange(domain['shape'][1])*domain['h'][1]\n\nX, Y = np.meshgrid(X, Y)\n\nprint(\"Plotting 2D Colormap...\")\nfig = plt.figure()\nax = plt.axes()\nax.contourf(X, Y, u[2], 100)\nax.set_xlabel('x')\nax.set_ylabel('y')\nplt.show()\n\nprint(\"Plotting 3D Surface Rendering...\")\nfig = plt.figure()\nax = plt.axes(projection='3d')\nax.contourf(X, Y, u[2], 100)\nax.set_xlabel('x')\nax.set_ylabel('y')\nax.set_zlabel('u(x,y)')\nplt.ion()\nax.view_init(50, -45)\nplt.show()\n\nprint(\"Done! Runtime: {:.2f} seconds\".format(time.time() - start_time))\n","sub_path":"ps2_2D_diffusion_alt.py","file_name":"ps2_2D_diffusion_alt.py","file_ext":"py","file_size_in_byte":3782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"572261869","text":"import backoff\nimport requests\n\nfrom django import http\nfrom django.contrib.auth import get_user_model\nfrom django.core.cache import cache\nfrom django.contrib.auth.signals import user_logged_in\nfrom django.conf import settings\nfrom django.core.exceptions import PermissionDenied\n\n\nclass OIDCEndpointRequestError(Exception):\n \"\"\"Happens when the server-to-server communication with the OIDC\n endpoint succeeds but the OIDC endpoints responds with a status code\n less than 500 and not equal to 200 or 401.\"\"\"\n\n\nclass AuthenticationMiddleware:\n def __init__(self, get_response):\n self.get_response = get_response\n\n def __call__(self, request):\n response = self.process_request(request)\n if not response:\n response = self.get_response(request)\n return response\n\n def process_request(self, request):\n if request.path.startswith(\"/api/\"):\n if request.headers.get(\"Authorization\") or request.method != \"GET\":\n header_value = request.headers[\"Authorization\"]\n if not header_value:\n return http.JsonResponse(\n {\"error\": \"No 'Authorization' header\"}, status=403\n )\n try:\n access_token = header_value.split(\"Bearer\")[1].strip()\n except IndexError:\n return http.JsonResponse(\n {\"error\": \"invalid header value\"}, status=403\n )\n cache_key = \"bearer-to-user-info:{}\".format(access_token[:12])\n user = cache.get(cache_key)\n was_in_cache = user is not None\n if not was_in_cache:\n user_info = self.fetch_oidc_user_profile(access_token)\n if user_info:\n user_model = get_user_model()\n try:\n user = user_model.objects.get(email=user_info[\"email\"])\n cache.set(cache_key, user, 60 * 60)\n except user_model.DoesNotExist:\n return http.JsonResponse(\n {\"error\": \"Not creating users\"}, status=403\n )\n if not user:\n return http.JsonResponse(\n {\"error\": \"access_token invalid\"}, status=403\n )\n request.user = user\n if not was_in_cache:\n user_logged_in.send(\n sender=user.__class__, request=request, user=user\n )\n\n request.csrf_processing_done = True\n\n @backoff.on_exception(\n backoff.constant, requests.exceptions.RequestException, max_tries=5\n )\n def fetch_oidc_user_profile(self, access_token):\n url = settings.OIDC_USER_ENDPOINT\n response = requests.get(\n url, headers={\"Authorization\": \"Bearer {}\".format(access_token)}\n )\n\n if response.status_code == 200:\n return response.json()\n elif response.status_code == 401:\n # The OIDC provider did not like the access token.\n raise PermissionDenied(\"Unauthorized access token\")\n elif response.status_code >= 500:\n raise requests.exceptions.RequestException(\n \"{} on {}\".format(response.status_code, url)\n )\n\n # This could happen if, for some reason, we're not configured to be\n # allowed to talk to the OIDC endpoint.\n raise OIDCEndpointRequestError(response.status_code)\n","sub_path":"peterbecom/api/middleware.py","file_name":"middleware.py","file_ext":"py","file_size_in_byte":3609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"60306634","text":"import src.color_detector as cd\nimport cv2\nimport numpy as np\nimport math\n\n\nclass RoadFollower:\n\n # frame's factors\n __IMG_SIZE = (640, 480)\n __BLUR_K_SIZE = (10, 10)\n # horizontal window crop\n __HW_X1 = 0\n __HW_X2 = __IMG_SIZE[0]\n __HW_Y1 = 150\n __HW_Y2 = __HW_Y1 + 80\n # vertical window crop\n __VW_X1 = int(__IMG_SIZE[0] / 2 - 70)\n __VW_X2 = __VW_X1 + 140\n __VW_Y1 = 100\n __VW_Y2 = __VW_Y1 + 250\n\n # default prop factor\n __Kp = 0.0015\n\n def __init__(self):\n self.__colorDetectV = cd.ColorFilter()\n self.__colorDetectH = cd.ColorFilter()\n self.__kp = RoadFollower.__Kp\n\n def update_frame(self, image):\n try:\n if image.shape[0] != RoadFollower.__IMG_SIZE[1] or image.shape[1] != RoadFollower.__IMG_SIZE[0]:\n image = cv2.resize(image, RoadFollower.__IMG_SIZE)\n image = cv2.blur(image, RoadFollower.__BLUR_K_SIZE)\n self.__colorDetectH.img = image[RoadFollower.__HW_Y1:RoadFollower.__HW_Y2,\n RoadFollower.__HW_X1:RoadFollower.__HW_X2]\n self.__colorDetectV.img = image[RoadFollower.__VW_Y1:RoadFollower.__VW_Y2,\n RoadFollower.__VW_X1:RoadFollower.__VW_X2]\n except AttributeError:\n print(\"update_frame: Can't update, frame not found!!!\")\n\n def set_thresholds(self, v_thresh, h_thresh):\n self.__colorDetectV.color_thresholds = v_thresh\n self.__colorDetectH.color_thresholds = h_thresh\n\n def filter(self):\n try:\n self.__colorDetectV.process()\n self.__colorDetectH.process()\n except cv2.error:\n print(\"filter: Can't process, image not found!!!\")\n\n def compute_deviation(self):\n try:\n non_zeros_left = cv2.countNonZero(self.__colorDetectH.mask[:, 0:320])\n non_zeros_right = cv2.countNonZero(self.__colorDetectH.mask[:, 320:640])\n delta = non_zeros_right - non_zeros_left\n return delta * self.__kp / (1.0 - self.__kp)\n except TypeError:\n print(\"compute_deviation: Can't compute, image not found!!!\")\n\n def display_masks(self):\n try:\n cv2.imshow('mask V', self.__colorDetectV.mask)\n cv2.imshow('mask H', self.__colorDetectH.mask)\n except cv2.error:\n print(\"display_masks: Can't display, image not found!!!\")\n\n\nif __name__ == '__main__':\n # img = cv2.imread('../img/road_3.jpg')\n # road_follow = RoadFollower()\n # road_follow.update_frame(img)\n # road_follow.filter()\n # road_follow.display_masks()\n # print(road_follow.compute_deviation())\n # cv2.waitKey(0)\n\n road_follow = RoadFollower()\n cap = cv2.VideoCapture(\"../video/MOV_0304.mp4\")\n simulation = np.zeros((200, 400, 3), np.uint8)\n\n while cap.isOpened():\n ret, frame = cap.read()\n if frame is None:\n break\n frame = cv2.resize(frame, (640, 480))\n road_follow.update_frame(frame)\n road_follow.filter()\n road_follow.display_masks()\n print(road_follow.compute_deviation())\n alpha = road_follow.compute_deviation()\n if not 45.0 >= alpha >= -45.0:\n alpha = 45.0\n point_a = (200, 150)\n point_b = (int(200 + 100 * math.tan(math.radians(alpha))), 50)\n cv2.line(simulation, point_a, point_b, (255, 0, 0), thickness=5)\n cv2.imshow(\"sim\", simulation)\n simulation = np.zeros((200, 400, 3), np.uint8)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n cap.release()\n cv2.destroyAllWindows()\n","sub_path":"raspberry/image-processing/src/road_follower.py","file_name":"road_follower.py","file_ext":"py","file_size_in_byte":3625,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"81113586","text":"import sys\nsys.stdin = open(\"자성체_input.txt\", \"r\")\n\nT = 10\nfor tc in range(1, T+1):\n N = int(input())\n arr = [list(map(int, input().split())) for _ in range(N)]\n\n # print(N)\n # print(arr)\n\n cnt = 0\n for i in range(N):\n charge = 0\n for j in range(N):\n if arr[j][i] == 1:\n charge = 1\n elif arr[j][i] == 2:\n if charge == 1:\n cnt += 1\n charge = 0\n\n\n print(f\"#{tc} {cnt}\")\n","sub_path":"Algorithm/python 파일/20190218/자성체.py","file_name":"자성체.py","file_ext":"py","file_size_in_byte":498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"527125966","text":"from flask_wtf import FlaskForm\nfrom wtforms.fields import StringField\nfrom wtforms.ext.sqlalchemy.fields import QuerySelectField\nfrom webapp.models import Manufacturer, DeviceType\n\n\nclass SearchForm(FlaskForm):\n class Meta:\n csrf = False\n\n q = StringField('Fehlersuche')\n manufacturer = QuerySelectField(\n query_factory=Manufacturer.query_factory_all,\n get_pk=lambda i: i.id,\n get_label=lambda i: i.name,\n allow_blank=True, blank_text='Hersteller wählen'\n )\n device_type = QuerySelectField(\n query_factory=DeviceType.query_factory_all,\n get_pk=lambda i: i.id,\n get_label=lambda i: i.name,\n allow_blank=True, blank_text='Geräteart wählen'\n )\n","sub_path":"webapp/views/search/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":726,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"653494223","text":"import xlwt\nimport requests\nfrom lxml import etree\n\n\n# book = xlwt.Workbook(encoding='utf-8')# 创建工作簿\n# sheet = book.add_sheet('Sheet1')#创建工作表\n# sheet.write(0, 0, 'python')#向工作表中添加数据\n# sheet.write(1, 1, 'love')\n# # book.save('books.xls')#保存\n\n\nurls = ['https://www.qidian.com/all/?page={}'.format(str(i)) for i in range(1,6)]\nheaders = {\n 'User-Agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.120 Safari/537.36'\n}\nall_info_list = []\n\nif __name__ == '__main__':\n for url in urls:\n res = requests.get(url = url, headers = headers)\n res.encoding = res.apparent_encoding\n print(res.status_code)\n slector = etree.HTML(res.text)\n infos = slector.xpath('//div[@class = \"book-img-text\"]/ul/li')\n # print(infos)\n for info in infos:\n title = info.xpath('div[2]/h4/a/text()')[0]#返回的是一个list 利用索引,把值取出来\n # print(title)\n author = info.xpath('div[2]/p[1]/a[1]/text()')[0]\n # print(author)\n style_1 = info.xpath('div[2]/p[1]/a[2]/text()')[0]\n style_2 = info.xpath('div[2]/p[1]/a[3]/text()')[0]\n style = style_1 + '.' + style_2\n # print(style)\n complete = info.xpath('div[2]/p[1]/span/text()')[0]\n # print(complete)\n introduce = info.xpath('div[2]/p[@class = \"intro\"]/text()')[0].strip()#strip清洗字符串对象的数据,去掉没用的换行符\n # print(introduce)\n word = info.xpath('div[2]/p[3]/span/text()')[0].strip('')#清洗数据\n # print(word)\n info_list = [title,author,style,complete,introduce,word]\n all_info_list.append(info_list)\n book = xlwt.Workbook(encoding='utf-8')\n sheet = book.add_sheet('sheet3')\n header = ['titles', 'authors', 'styles', 'completes', 'introduces', 'words']\n for i in range(len(header)):\n sheet.write(0, i, header[i])\n\n x = 1\n for list in all_info_list:\n y = 0\n for data in list:\n sheet.write(x, y, data)\n y += 1\n x += 1\n book.save('起点中文网.xls')","sub_path":"Python爬虫/qidianbookDemo.py","file_name":"qidianbookDemo.py","file_ext":"py","file_size_in_byte":2210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"315870709","text":"from django.conf.urls import url\r\nfrom . import views\r\n\r\napp_name = 'base'\r\n\r\nurlpatterns = [\r\n url('^$', views.IndexView, name='index'),\r\n url('^login', views.LoginView, name='login'),\r\n url(r'^trylogin/$', views.login_user, name='trylogin'),\r\n url(r'^logout/$', views.logout_user, name='logout'),\r\n]","sub_path":"base/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"211338784","text":"import io\r\nimport socket\r\nimport struct\r\nimport time\r\nfrom PIL import Image\r\nimport os\r\nimport time\r\n\r\ndef get_time():\r\n now = time.localtime()\r\n s = \"%04d%02d%02d-%02d%02d%02d\" % (now.tm_year, now.tm_mon, now.tm_mday, now.tm_hour, now.tm_min, now.tm_sec)\r\n return s\r\n\r\n# edgesdf\r\n# Start a socket listening for connections on 0.0.0.0:8000 (0.0.0.0 means\r\n# all interfaces)\r\nserver_socket = socket.socket()\r\nserver_socket.bind(('192.168.99.101', 80))\r\nserver_socket.listen(0)\r\n#test\r\n# Accept a single connection and make a file-like object out of it\r\nprint('server start')\r\n\r\nconnection = server_socket.accept()[0].makefile('rb')\r\ntry:\r\n print('client accept')\r\n cnt = 0\r\n #folder_num = 1\r\n dir_name = get_time()\r\n os.mkdir('/home/jeong/fall_detection_online/test/rec/'+ str(dir_name) +'/')\r\n while True:\r\n # Read the length of the image as a 32-bit unsigned int. If the\r\n # length is zero, quit the loop\r\n image_len = struct.unpack('\n\n\nclass UnifiedPaintPanel():\n # subclass must set\n # bl_space_type = 'IMAGE_EDITOR'\n # bl_region_type = 'UI'\n\n @staticmethod\n def paint_settings(context):\n toolsettings = context.tool_settings\n\n if context.sculpt_object:\n return toolsettings.sculpt\n elif context.vertex_paint_object:\n return toolsettings.vertex_paint\n elif context.weight_paint_object:\n return toolsettings.weight_paint\n elif context.image_paint_object:\n return toolsettings.image_paint\n elif context.particle_edit_object:\n return toolsettings.particle_edit\n\n return None\n\n @staticmethod\n def unified_paint_settings(parent, context):\n ups = context.tool_settings.unified_paint_settings\n parent.label(text=\"Unified Settings:\")\n row = parent.row()\n row.prop(ups, \"use_unified_size\", text=\"Size\")\n row.prop(ups, \"use_unified_strength\", text=\"Strength\")\n if context.weight_paint_object:\n parent.prop(ups, \"use_unified_weight\", text=\"Weight\")\n\n @staticmethod\n def prop_unified_size(parent, context, brush, prop_name, icon='NONE', text=\"\", slider=False):\n ups = context.tool_settings.unified_paint_settings\n ptr = ups if ups.use_unified_size else brush\n parent.prop(ptr, prop_name, icon=icon, text=text, slider=slider)\n\n @staticmethod\n def prop_unified_strength(parent, context, brush, prop_name, icon='NONE', text=\"\", slider=False):\n ups = context.tool_settings.unified_paint_settings\n ptr = ups if ups.use_unified_strength else brush\n parent.prop(ptr, prop_name, icon=icon, text=text, slider=slider)\n\n @staticmethod\n def prop_unified_weight(parent, context, brush, prop_name, icon='NONE', text=\"\", slider=False):\n ups = context.tool_settings.unified_paint_settings\n ptr = ups if ups.use_unified_weight else brush\n parent.prop(ptr, prop_name, icon=icon, text=text, slider=slider)\n\n\n# Used in both the View3D toolbar and texture properties\ndef brush_texture_settings(layout, brush, sculpt):\n tex_slot = brush.texture_slot\n\n layout.label(text=\"Brush Mapping:\")\n\n # map_mode\n if sculpt:\n layout.row().prop(tex_slot, \"map_mode\", text=\"\")\n layout.separator()\n else:\n layout.row().prop(tex_slot, \"tex_paint_map_mode\", text=\"\")\n layout.separator()\n\n if tex_slot.map_mode == 'STENCIL':\n if brush.texture and brush.texture.type == 'IMAGE':\n layout.operator(\"brush.stencil_fit_image_aspect\")\n layout.operator(\"brush.stencil_reset_transform\")\n\n # angle and texture_angle_source\n if brush.brush_capabilities.has_texture_angle:\n col = layout.column()\n col.label(text=\"Angle:\")\n row = col.row(align=True)\n if brush.brush_capabilities.has_texture_angle_source:\n if brush.brush_capabilities.has_random_texture_angle:\n if sculpt:\n if brush.sculpt_capabilities.has_random_texture_angle:\n row.prop(brush, \"texture_angle_source_random\", text=\"\")\n else:\n row.prop(brush, \"texture_angle_source_no_random\", text=\"\")\n\n else:\n row.prop(brush, \"texture_angle_source_random\", text=\"\")\n else:\n row.prop(brush, \"texture_angle_source_no_random\", text=\"\")\n\n row.prop(tex_slot, \"angle\", text=\"\")\n\n # scale and offset\n split = layout.split()\n split.prop(tex_slot, \"offset\")\n split.prop(tex_slot, \"scale\")\n\n if sculpt:\n # texture_sample_bias\n col = layout.column(align=True)\n col.label(text=\"Sample Bias:\")\n col.prop(brush, \"texture_sample_bias\", slider=True, text=\"\")\n\n\ndef brush_mask_texture_settings(layout, brush):\n mask_tex_slot = brush.mask_texture_slot\n\n layout.label(text=\"Mask Mapping:\")\n\n # map_mode\n layout.row().prop(mask_tex_slot, \"mask_map_mode\", text=\"\")\n layout.separator()\n\n if mask_tex_slot.map_mode == 'STENCIL':\n if brush.mask_texture and brush.mask_texture.type == 'IMAGE':\n layout.operator(\"brush.stencil_fit_image_aspect\").mask = True\n layout.operator(\"brush.stencil_reset_transform\").mask = True\n\n col = layout.column()\n col.label(text=\"Angle:\")\n col.active = brush.brush_capabilities.has_texture_angle\n col.prop(mask_tex_slot, \"angle\", text=\"\")\n\n # scale and offset\n split = layout.split()\n split.prop(mask_tex_slot, \"offset\")\n split.prop(mask_tex_slot, \"scale\")\n","sub_path":"scripts/startup/bl_ui/properties_paint_common.py","file_name":"properties_paint_common.py","file_ext":"py","file_size_in_byte":5381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"541163384","text":"print(\"Hello! This is a 'Prime Number Detection Program'...\")\r\nprint(\"Please enter some integer numbers one after another...\")\r\nprint(\"You can enter max. 20 different integer numbers...\")\r\nprint(\"Or you can always finish entering numbers by pressing '0'...\" )\r\nprint(\"Now let's begin...\")\r\n\r\nnumber_list= []\r\nnumber = \"Bu programin šalismasi išin number'i burada tanimliyorum.\"\r\ncounter = 0\r\nPrime_number_list = []\r\n\r\ndef prime_number_finder():\r\n\r\n for index in range(len(number_list)):\r\n for i in range (2, number_list[index]):\r\n if number_list[index] % i == 0:\r\n break\r\n else:\r\n Prime_number_list.append(number_list[index])\r\n break\r\n\r\ndef result():\r\n print(\"We have checked the numbers you have entered..\")\r\n print(\"These are the Prime Numbers that we have found among the numbers you have entered:\")\r\n prime_number_finder()\r\n print(\"Prime_numbers....:\", Prime_number_list)\r\ndef user_input():\r\n global counter\r\n global number\r\n while counter < 21:\r\n try:\r\n number = int(input(\"Please enter an integer number...:\"))\r\n \r\n if number == 0:\r\n print(\"You have terminated the program! Please wait for the result...\")\r\n result()\r\n\r\n elif number in number_list:\r\n print(\"You have already entered this number. Please enter an another integer number...:\")\r\n\r\n else:\r\n number_list.append(number)\r\n if number == 1 :\r\n Prime_number_list.append(number)\r\n if number == 2 :\r\n Prime_number_list.append(number)\r\n counter +=1\r\n if counter == 20:\r\n print(\"You have reached the entering limit..! Please wait for the results\")\r\n result()\r\n \r\n except ValueError:\r\n print(\"You did not entered an integer number. Please try again...:\")\r\n \r\nuser_input()\r\n","sub_path":"Prime_Number_Finder.py","file_name":"Prime_Number_Finder.py","file_ext":"py","file_size_in_byte":2020,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"203219914","text":"\"\"\"\n请求完成后, 有时需要让用户知道状态发生了变化。这里可以使用确认消息、警告或者错误提醒。\n\"\"\"\nfrom flask import Flask, render_template, session, redirect, url_for, flash\nfrom flask_wtf import Form\nfrom wtforms import StringField, SubmitField\nfrom wtforms.validators import DataRequired\n\napp = Flask(__name__)\napp.config['SECRET_KEY'] = 'code'\n\n\nclass NameForm(Form):\n name = StringField(\"What's your name?\", validators=[DataRequired()])\n submit = SubmitField('Submit')\n\n\n@app.route('/', methods=['POST', 'GET'])\ndef index():\n form = NameForm()\n if form.validate_on_submit():\n session['name'] = form.name.data\n return redirect(url_for('index'))\n else:\n # 在这里添加flash消息,并且需要在模板中渲染flash消息\n \"\"\"\n 在模板中使用循环是因为在之前的请求循环中每次调用flash()函数时都会生成一个消息,所以有可能有多个消息在排队等待显示。\n get_flashed_messages() 函数获取的消息在下次调用时不会再次返回,因此Flash消息只显示一次,然后就消失了。\n \"\"\"\n flash(\"Please type your name here\")\n if form.name.data != \"jhon\":\n flash('Welcome Stranger!')\n else:\n flash('Oh~ I love Gakki too~')\n return render_template('demo3_把表单渲染成HTML.html', form=form, name=session.get('name'))\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)","sub_path":"F_flask/Part3_Web表单/demo6_Flash消息.py","file_name":"demo6_Flash消息.py","file_ext":"py","file_size_in_byte":1470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"379360330","text":"import unittest\n\n# Definition for a binary tree node.\n\n\nclass TreeNode(object):\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\n\ndef helper(root, result):\n if root:\n helper(root.left, result)\n result.append(root.val)\n helper(root.right, result)\n\n\ndef recursiveInorderTraversal(root):\n \"\"\"\n :type root: TreeNode\n :rtype: List[int]\n \"\"\"\n result = []\n helper(root, result)\n return result\n\n\ndef iterativeInorderTraversal(root):\n if not root:\n return []\n results, stack = [], []\n while stack or root:\n if root:\n stack.append(root)\n root = root.left\n else:\n root = stack.pop()\n results.append(root.val)\n root = root.right\n return results\n\n\nclass TestInorderTraversal(unittest.TestCase):\n def test_inorder_recursive(self):\n root = TreeNode(1)\n root.right = TreeNode(2)\n root.right.left = TreeNode(3)\n expected = [1, 3, 2]\n result = recursiveInorderTraversal(root)\n self.assertEquals(result, expected)\n\n def test_inorder_iterative(self):\n root = TreeNode(1)\n root.right = TreeNode(2)\n root.right.left = TreeNode(3)\n expected = [1, 3, 2]\n result = iterativeInorderTraversal(root)\n self.assertEquals(result, expected)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"binary_tree_inorder_traversal.py","file_name":"binary_tree_inorder_traversal.py","file_ext":"py","file_size_in_byte":1432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"448461040","text":"from http.client import HTTPConnection\r\nfrom urllib.parse import urlparse\r\nimport urllib3\r\nimport sys\r\n\r\nsys.path.append('src')\r\n\r\nfrom agent import TunnelHttpAgent\r\n\r\n\r\nauth = {\r\n 'login': 'TEST-LOGIN',\r\n 'password': 'TEST-PASSWORD'\r\n}\r\n\r\nheaders = urllib3.make_headers(\r\n keep_alive=True,\r\n disable_cache=True,\r\n accept_encoding=True,\r\n user_agent='Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.102 Safari/537.36'\r\n)\r\n\r\n\r\ndef request(url=None, method=None, timeout=None, proxy=None, client=None):\r\n parsed_url = urlparse(url)\r\n scheme = parsed_url.scheme\r\n host = parsed_url.netloc\r\n\r\n if scheme == 'http':\r\n port = 80\r\n else:\r\n port = 443\r\n\r\n tunnelHttpAgent = TunnelHttpAgent(\r\n host='127.0.0.1',\r\n port=8080,\r\n server_name=host,\r\n server_port=port,\r\n auth=auth,\r\n timeout=timeout,\r\n proxy=proxy,\r\n client=client,\r\n )\r\n\r\n connection = HTTPConnection(\r\n host=host,\r\n port=port,\r\n timeout=timeout\r\n )\r\n connection.sock = tunnelHttpAgent.sock\r\n connection.request(\r\n method=method,\r\n url=parsed_url.path,\r\n body=parsed_url.query,\r\n headers=headers\r\n )\r\n return connection\r\n\r\n\r\nif __name__ == '__main__':\r\n url = 'https://www.howsmyssl.com/a/check'\r\n method = 'GET'\r\n r = request(\r\n url=url,\r\n method=method,\r\n timeout=5,\r\n proxy='',\r\n client='CHROME'\r\n )\r\n data = r.getresponse().read().decode('utf-8')\r\n print(data)\r\n","sub_path":"tests/howmyssl-test.py","file_name":"howmyssl-test.py","file_ext":"py","file_size_in_byte":1600,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"5604993","text":"class RandomizedSet(object):\n\n def __init__(self):\n \"\"\"\n Initialize your data structure here.\n \"\"\"\n self.indices = {}\n self.vec = []\n\n def insert(self, val):\n \"\"\"\n Inserts a value to the set. Returns true if the set did not already contain the specified element.\n :type val: int\n :rtype: bool\n \"\"\"\n if val not in self.indices:\n self.indices[val] = len(self.vec)\n self.vec.append(val)\n return True\n return False\n\n def remove(self, val):\n \"\"\"\n Removes a value from the set. Returns true if the set contained the specified element.\n :type val: int\n :rtype: bool\n \"\"\"\n if val not in self.indices:\n return False\n if len(self.vec) == 1 or self.vec[-1] == val:\n self.vec.pop()\n del self.indices[val]\n else:\n idx = self.indices[val]\n self.indices[self.vec[-1]] = idx\n self.vec[idx] = self.vec[-1]\n del self.indices[val]\n self.vec.pop()\n return True\n\n def getRandom(self):\n \"\"\"\n Get a random element from the set.\n :rtype: int\n \"\"\"\n return random.choice(self.vec)\n\n\n# Your RandomizedSet object will be instantiated and called as such:\n# obj = RandomizedSet()\n# param_1 = obj.insert(val)\n# param_2 = obj.remove(val)\n# param_3 = obj.getRandom()\n","sub_path":"Insert Delete GetRandom O(1).py","file_name":"Insert Delete GetRandom O(1).py","file_ext":"py","file_size_in_byte":1449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"115549454","text":"def main():\n for a in range(int(1000 / 3)):\n for b in range(a + 1, int(1000 / 2)):\n c = 1000 - a - b\n if a ** 2 + b ** 2 == c ** 2:\n print(str(a) + \" \" + str(b) + \" \" + str(c))\n print(a * b * c)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"Problem09/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"417576163","text":"# Worked exercise 4.6\n# Pay Computation: 1.5 * rate when overwork < 40\n# worked hours = 45\n# rate = 10\n# answer = 475\n\ndef computepay(hours, rate):\n print('in computepay:', hours, rate)\n\n print('''\n HOURS = %d\n RATE = %d\n '''%(hours, rate))\n\n if hours <= 40.0:\n pay = hours * rate\n elif hours > 40.0:\n overhourPay = (hours-40)*(rate*0.5)\n regPay = hours * rate\n pay = overhourPay + regPay\n print('RETURNING: '+ str(pay))\n return(pay)\n\nwhile True:\n hours = input('HOURS: ')\n rate = input('RATE: ')\n try:\n fh = float(hours)\n fr = float(rate)\n break\n except ValueError:\n print('PLEASE TYPE IN A VALID NUMBER.')\n continue\n\nxp = computepay(fh, fr)\nprint('Pay:', xp)\n","sub_path":"ex_04_06.py","file_name":"ex_04_06.py","file_ext":"py","file_size_in_byte":763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"591632220","text":"N, Y = map(int, input().split())\nans = [-1,-1,-1]\nflag=False\nfor i in range(N+1):\n if i * 10000 > Y: break\n for j in range(N-i+1):\n if (i*10000 + j*5000 + (N-i-j)*1000) == Y:\n ans = [i,j,N-i-j]\n flag =True\n break\n if flag:\n break\nprint('{} {} {}'.format(*ans))","sub_path":"Python_codes/p03471/s207872816.py","file_name":"s207872816.py","file_ext":"py","file_size_in_byte":316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"417650222","text":"import pandas as pd\nimport matplotlib.pyplot as plt\nimport sklearn.linear_model as sk_linear_models\nimport sklearn.metrics as sk_metrics\n\n\nqualifies_single_grade = pd.read_csv('data/single_grade.csv')\nqualifies_single_grade.sort_values(by=[\"grade\", \"qualifies\"], inplace=True) # inplace заменяет исходный набор на отсортированный\nprint(qualifies_single_grade)\n\nX = qualifies_single_grade[[\"grade\"]]\ny = qualifies_single_grade[\"qualifies\"]\n\nplt.scatter(X, y)\n\nqualificaion_model = sk_linear_models.LogisticRegression()\nqualificaion_model.fit(X, y) # обучение\n\nmodel_qualification = qualificaion_model.predict(X)\nmodel_qualification_probability = qualificaion_model.predict_proba(X)[:, 1] # предсказанные данные из модели\n\nqualifies_single_grade[\"modeled probability\"] = model_qualification_probability\nprint(qualifies_single_grade)\n\nconfusion_matrix = sk_metrics.confusion_matrix(y, model_qualification) # матрица спутанности\nprint(confusion_matrix)\n# [[19 3]\n# [ 2 16]]\n# Acc = (19+16)/40 = 0.875\n# Err = 1-Acc = (2+3)/40 = 0.125\n# Pr = 16/(3+16) = 0.84 - точнсть\n# Rec = 16/(2+16) = 0.89 - чувствительность\n\nsk_metrics.accuracy_score(y, model_qualification)\nprint(f\"Accuracy: {sk_metrics.accuracy_score(y, model_qualification)}\")\nprint(f\"Error: {1-sk_metrics.accuracy_score(y, model_qualification)}\")\nprint(f\"Recall: {sk_metrics.recall_score(y, model_qualification)}\")\n\n\nplt.plot(X, model_qualification, color=\"r\")\nplt.plot(X, model_qualification_probability, color=\"g\")\nplt.show()\n\n","sub_path":"bach8.py","file_name":"bach8.py","file_ext":"py","file_size_in_byte":1613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"117139977","text":"from flask_restful import fields\n\nclass Resource:\n def __init__(self, resource, *urls, **kwargs):\n self.resource = resource\n self.urls = urls\n self.kwargs = kwargs\n\n\nclass Response:\n \"\"\"\n 统一响应格式\n \"\"\"\n\n def __init__(self, code: int = 0, msg: str = 'ok', data=None):\n self.code = code\n self.msg = msg\n self.data = data or {}\n\n\ndef make_fields(data=None) -> dict:\n return {\n 'code': fields.Integer,\n 'msg': fields.String,\n 'data': fields.Nested(data or {})\n }\n","sub_path":"admin/common/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"298348152","text":"import warnings\nfrom typing import *\n\nfrom manim.utils.iterables import list_update\nfrom manim.animation.creation import ShowCreation\nfrom manim.animation.transform import Transform, ApplyMethod\nfrom manim.animation.composition import AnimationGroup\nfrom manim.mobject.types.vectorized_mobject import VGroup\nfrom manim.mobject.geometry import Line\nfrom manim.mobject.svg.tex_mobject import MathTex, Tex\nfrom manim.mobject.svg.text_mobject import Text\nfrom manim.mobject.numbers import DecimalNumber, Integer\nimport manim.utils.color as C\nfrom manim.constants import *\n\n\nclass Table(VGroup):\n\n def __init__(self, tabledict, **kwargs):\n super().__init__()\n\n self.config = {\n \"data\": tabledict,\n \"vbuff_length\": 0.3,\n \"hbuff_length\": 0.3,\n \"buff_length\": 0.3,\n \"line_color\": C.WHITE,\n \"raw_string_color\": C.WHITE,\n }\n\n self.config.update(kwargs)\n\n self.data = tabledict\n\n self.buff_length = self.config[\"buff_length\"]\n self.vbuff_length = self.config[\"vbuff_length\"]\n self.hbuff_length = self.config[\"hbuff_length\"]\n\n self.line_color = self.config[\"line_color\"]\n self.raw_string_color = self.config[\"raw_string_color\"]\n\n self._draw_table() # Make the table with the parameters in config\n self.move_to(ORIGIN)\n\n def _draw_table(self):\n # if the headers and columns are not manim objects, convert them\n self._type_convert_table_header()\n self._type_convert_table_columns()\n\n self._set_dimensions()\n\n self._draw_headers()\n self._draw_columns()\n self._draw_table_lines()\n\n def _type_convert_table_header(self):\n for key in self.data.keys():\n if isinstance(key, (float, int)):\n key = str(key)\n\n if isinstance(key, (MathTex, Text, DecimalNumber, Integer)) == False:\n self.data[MathTex(\n key, fill_color=self.raw_string_color)] = self.data.pop(key)\n\n def _type_convert_table_columns(self):\n def convert(val):\n if isinstance(val, (float, int)):\n val = str(val)\n if isinstance(val, (MathTex, Text, DecimalNumber, Integer)) == False:\n return Tex(val, fill_color=self.raw_string_color)\n return val\n\n for key, vals in self.data.items():\n self.data[key] = list(map(convert, vals))\n\n def _set_dimensions(self):\n fields = list(self.data.keys())\n # find the max width and height across all headers and row items\n dims = [(i.get_width(), i.get_height())\n for column in self.data.values() for i in column] + \\\n [(i.get_width(), i.get_height()) for i in fields]\n\n max_width, max_height = map(max, map(list, zip(*dims)))\n\n self.cell_length = max_width + 2*self.hbuff_length\n self.cell_height = max_height + 2*self.vbuff_length\n\n # The remaining width and height will be successively added\n self.table_width = (self.cell_length*len(fields))\n # while drawing the Mobjects to the screen. +1 is added to account for headings.\n self.table_height = self.cell_height * \\\n (max(map(len, self.data.values())))+1\n\n def _draw_headers(self):\n # Coordinates of TexMobjects in Manim are taken from centre, hence the /2 in the calculations below\n def buffer(x): return (self.cell_length-x)/2\n\n # The initial position of the first field.\n field_position = [\n self.cell_length / 2,\n 0,\n 0]\n fields = list(self.data.keys())\n for field, next in zip(fields, fields[1:] + [MathTex(\"\")]):\n field.move_to(field_position)\n self.add(field)\n\n # calculate the next field's position\n right_buf, next_left_buf = buffer(field.get_width()), buffer(next.get_width())\n space_to_leave = right_buf + next_left_buf + next.get_width()/2\n\n # next/2: coordinates are taken from center and not left edges\n\n field_position = field.get_right()+(space_to_leave, 0, 0)\n\n def _draw_columns(self):\n for key, records in self.data.items():\n for record in records:\n # the record position is set to be the\n # [\n # center of the field it belongs to,\n # buffer space above the record + centered height of the record,\n # 0\n # ]\n\n record_position = [\n key.get_center()[0],\n -(self.cell_height / 2 + self.cell_height),\n 0]\n for record in records:\n record.move_to(record_position)\n record_position = record.get_center()+(0, - self.cell_height, 0)\n self.add(record)\n\n def _draw_table_lines(self):\n fields = list(self.data.keys())\n\n line_hor = Line(start=(0, -2*self.cell_height/3, 0),\n end=(self.table_width, -2*self.cell_height/3, 0), color=self.line_color)\n self.add(line_hor) # This is the horizontal separator\n\n for l in range(len(fields)-1): # These create the vertical separators.\n line = Line(start=(self[l].get_center() + (self.cell_length/2, self.cell_height/2, 0)),\n end=(self[l].get_center() +\n (self.cell_length/2, -self.table_height, 0)),\n color=self.line_color)\n\n self.add(line)\n\n def add_record(self, record, field_num, record_pos=-1):\n orig_submob_list = list(self.submobjects)\n records_in_required_field = len(\n self.data[list(self.data.keys())[field_num]])\n records_to_skip = 0\n\n if isinstance(record, (MathTex, Text, DecimalNumber, Integer)) == False:\n record = Tex(record) # Mandatory Type Conversions\n\n for i in range(0, field_num): # Until you reach the field where the record should be added\n # skip the records in the field\n records_to_skip += len(\n self.data[list(self.data.keys())[i]])\n\n # Skip all the fields and the records you are supposed to.\n fields_records_to_skip = len(self.data.keys()) + records_to_skip\n\n if record_pos != -1: # If a custom record postion is given\n warnings.warn(\n \"Custom Record Positions are still in Development. May give unwanted results.\")\n rec_index = fields_records_to_skip+record_pos # put the record there\n else:\n # Go to the end of the field and put the record there.\n rec_index = fields_records_to_skip+records_in_required_field\n\n assigned_field = list(self.data.keys())[field_num]\n\n # 1 is added for field name 0.5 is added for half record width\n vert_num = len(\n self.data[list(self.data.keys())[field_num]])+1+0.5\n\n how_far_down = vert_num*self.cell_height/2 # How far down to move.\n\n # Move the record to the assigned fields x coord and move required amount down\n record.move_to(assigned_field.get_center()-(0, how_far_down, 0))\n\n if record_pos == -1:\n self.data[list(self.data.keys())[\n field_num]].append(record)\n else:\n self.data[list(self.data.keys())[field_num]].insert(\n record_pos, record) # add the record to tabledict\n\n # make the new submob list and insert record at propre place.\n new_submob_list = orig_submob_list[:rec_index] + \\\n [record] + orig_submob_list[rec_index:]\n\n self.submobjects = list_update(\n self.submobjects, new_submob_list) # update self\n\n return record\n\n def remove_record(self, field_num, record_num):\n orig_submob_list = list(self.submobjects)\n records_in_required_field = len(\n self.data[list(self.data.keys())[field_num]])\n records_to_skip = 0\n\n for i in range(0, field_num): # Until you reach the field where the record should be added\n # skip the records in the field\n records_to_skip += len(\n self.data[list(self.data.keys())[i]])\n\n # Skip all the fields and the records you are supposed to.\n fields_records_to_skip = len(self.data.keys()) + records_to_skip\n if record_num != -1:\n rec_index = fields_records_to_skip+record_num\n else:\n rec_index = fields_records_to_skip+records_in_required_field-1\n\n self.data[list(self.data.keys())[field_num]].pop(\n record_num) # remove the value from tabledict\n\n return self.submobjects.pop(rec_index)\n\n def get_record(self, field_num, record_num):\n orig_submob_list = list(self.submobjects)\n records_in_required_field = len(\n self.data[list(self.data.keys())[field_num]])\n records_to_skip = 0\n\n for i in range(0, field_num): # Until you reach the field where the record should be added\n # skip the records in the field\n records_to_skip += len(\n self.data[list(self.data.keys())[i]])\n\n # Skip all the fields and the records you are supposed to.\n fields_records_to_skip = len(self.data.keys()) + records_to_skip\n if record_num != -1:\n rec_index = fields_records_to_skip+record_num\n else:\n rec_index = fields_records_to_skip+records_in_required_field-1\n\n return self.submobjects[rec_index]\n\n def get_field(self, field_num):\n return self.submobjects[field_num]\n\n def add_field(self, field, field_pos=-1):\n tabledict = self.data\n cell_height = self.cell_height\n cell_length = self.cell_length\n field_index = len(tabledict)\n\n if isinstance(field, (Text, MathTex, DecimalNumber, Integer)) == False:\n field = Tex(field)\n\n firstfield = self.submobjects[0]\n new_field_pos = firstfield.get_center()+((len(tabledict)*cell_length/2), 0, 0)\n\n field.move_to(new_field_pos)\n\n self.submobjects = self.submobjects[:field_index] + \\\n [field] + self.submobjects[field_index:]\n tabledict[field] = []\n return field\n\n def adjust_lines(self):\n tabledict = self.data\n cell_height = self.cell_height\n cell_length = self.cell_length\n\n vertlines = self.submobjects[-(len(tabledict)-1):]\n lowestmobject = min(self.submobjects[0:len(\n self.submobjects)-(len(tabledict))], key=lambda m: m.get_y())\n rightestmobject = max(\n self.submobjects[:len(tabledict)], key=lambda m: m.get_x())\n anims = []\n\n for line in vertlines:\n curr_start, curr_end = line.get_start_and_end()\n # This only happens when a field has been added, but a vertical separator doesnt exist for it.\n if line.get_angle()*DEGREES == 0:\n new_end = np.array(\n curr_end+(rightestmobject.get_x() -\n curr_end[0]+cell_length/4, 0, 0)\n )\n\n newsep = Line( # This is the vertical separator for the new field.\n start=(rightestmobject.get_center() - \\\n (cell_length/4, -cell_height/4, 0)),\n end=(rightestmobject.get_center() - (cell_length/4, + \\\n rightestmobject.get_y()-lowestmobject.get_y()+cell_height/4, 0)),\n color=self.line_color)\n\n anims.append(ShowCreation(newsep))\n self.add(newsep)\n else:\n new_end = np.array(\n (curr_end)+(0, lowestmobject.get_y() -\n curr_end[1]-cell_height/4, 0)\n )\n\n new_line = Line(curr_start, new_end, color=self.line_color)\n # Set the new bottom to the required position\n anims.append(Transform(line, new_line))\n return AnimationGroup(*anims)\n\n def adjust_positions(self):\n cell_height = self.cell_height\n tabledict = self.data\n fields = tabledict.keys()\n anim_list = []\n\n # VERY VERY TACKY. MUST CHANGE:\n class TempData(): # I mean, really? Thats a performance hog if I've ever seen one...\n pos_to_comp = 0\n records = []\n\n for field in fields:\n TempData.records = tabledict[field]\n TempData.pos_to_comp = field.get_center()\n\n for record in TempData.records:\n # if the distance between two records #greater than one cell height\n if np.abs(record.get_center()[1]-TempData.pos_to_comp[1]) > cell_height:\n TempData.pos_to_comp = record.get_center() # Set the position to compare\n\n anim_list.extend(\n [record.shift, (UP*cell_height/2)]\n )\n\n del record\n else:\n TempData.pos_to_comp = record.get_center()\n\n return ApplyMethod(*anim_list)\n","sub_path":"anim_tools/tables.py","file_name":"tables.py","file_ext":"py","file_size_in_byte":13163,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"186476662","text":"import pprint\nimport urllib.request, json\nfrom firebase import firebase\n\nkey=\"AIzaSyC8cn_tgtIiNErOM5S5pVj8Eu9Y8gUkiXY\"\ngoogle_autocomplete = \"https://maps.googleapis.com/maps/api/place/autocomplete/json\"\ngoogle_detail = \"https://maps.googleapis.com/maps/api/place/details/json\"\ndb = firebase.FirebaseApplication(\"https://touradvisorapp.firebaseio.com/\", None)\nAUTH = False\n\ndef user_auth(user, pwd):\n global AUTH\n users = db.get('/users', user)\n secure_token = list(users)[0]\n if list(db.get('/users', None)).count(user) > 0 and users[secure_token] == pwd:\n AUTH = True\n return True\n return False\n\ndef user_register(user, pwd, confirm_pwd):\n if pwd == confirm_pwd:\n db.post('/users/' + user, pwd)\n return True\n return False\n\ndef add_plan(destination):\n global google_autocomplete\n if AUTH:\n choice = []\n link = google_autocomplete + '?key=' + key + '&input=' + destination.replace(' ', '+')\n print(link)\n with urllib.request.urlopen(link) as url:\n data = json.loads(url.read().decode())['predictions']\n for i in data:\n for j in i:\n if j == 'description':\n choice.append((i[j], i['place_id']))\n return choice\n return False\n\ndef place_detail(place_id):\n global google_detail\n if AUTH:\n link = google_detail + '?key=' + key + '&=fields=name,rating,formatted_phone_number' + '&place_id=' + place_id\n print(link)\n with urllib.request.urlopen(link) as url:\n data = json.loads(url.read().decode())\n pprint.pprint(data)\n","sub_path":"ui/backend.py","file_name":"backend.py","file_ext":"py","file_size_in_byte":1633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"634502827","text":"class Node(object):\n def __init__(self, data):\n self.data = data\n self.children = []\n\n def add_child(self, node):\n self.children.append(node)\n\n\n\nclass Parser(object):\n def __init__(self, file):\n self.source = open(file)\n self.root = Node('root');\n self.parse_tree(None, 0, self.root)\n\n def parse_tree(self, parent, depth, root = None):\n line = self.source.readline()\n while line:\n tab_count = line.count('\\t')\n if tab_count < depth:\n break\n node = Node(line.strip())\n if root is not None:\n root.data = line.strip()\n node = root\n if tab_count >= depth:\n\n if parent is not None:\n #print(node.data)\n parent.add_child(node)\n line = self.parse_tree(node, tab_count+1)\n return line\n\n def get_tree(self):\n return self.root\n\nr = Parser(\"tree1.txt\").get_tree()\nprint(r.data)\nprint( len(r.children) )\n","sub_path":"Tree.py","file_name":"Tree.py","file_ext":"py","file_size_in_byte":1044,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"495897798","text":"# -*- coding: utf-8 -*-\n#\n# Author: Dixie Flatline \n#\n# Distributed under terms of the MIT license.\n\n\"\"\"\nModels for ``products`` app.\n\nACHTUNG!!!\n\nAttribute details generation should be further rewritten\nthrough signals!\n\nAt the moment it is realised with through Product.save()\nmethod overload with cross-import from toolshop.details module.\n\"\"\"\nimport os\nimport sys\nimport datetime\nfrom django.db import models\nfrom django.core.urlresolvers import reverse\nfrom mptt.models import MPTTModel, TreeForeignKey\n\n\nclass Catalog(MPTTModel):\n \"\"\"\n Catalogs model that has tree-like structure due to MPTTModel subclassing\n isntead of common models.Model usage.\n \"\"\"\n SUBDIR_VIEW_CHOICES = (\n ('list', 'Список'),\n ('thumbnails', 'Эскизы'),\n )\n parent = TreeForeignKey('self',\n blank=True, null=True,\n related_name='children',\n help_text='Родительский каталог.'\n )\n name = models.CharField(max_length=300,\n help_text='Название каталога.',\n )\n slug = models.SlugField(max_length=150,\n unique=True,\n help_text='Относительный адрес. \\\n Должен быть уникальным.'\n )\n visible = models.BooleanField(default=True,\n help_text='Сделать директорию видимой \\\n в дереве разделов.'\n )\n subdir_view = models.CharField(max_length=50,\n choices=SUBDIR_VIEW_CHOICES,\n help_text='Стиль отображения подкаталогов.',\n default=SUBDIR_VIEW_CHOICES[0][0]\n )\n sides_differ = models.BooleanField(default=False,\n help_text='Инструменты различаются \\\n Left/Right?'\n )\n catalog_pictures = models.ForeignKey('attributes.PictureAttributeSet',\n blank=True, null=True,\n #related_name='cat_catalog_pictures',\n help_text='Типы изображений для каталога.',\n on_delete=models.SET_NULL,\n )\n product_sizes = models.ForeignKey('attributes.SizeAttributeSet',\n blank=True, null=True,\n #related_name='cat_product_sizes'\n help_text='Набор размеров для товаров.',\n on_delete=models.SET_NULL,\n )\n product_spare_parts = models.ForeignKey('attributes.SparePartAttributeSet',\n blank=True, null=True,\n help_text='Набор запчастей для них же.',\n on_delete=models.SET_NULL,\n )\n description = models.TextField(blank=True,\n help_text='Описание директории.'\n )\n order = models.PositiveIntegerField()\n\n def __unicode__(self):\n return u'%s' % (self.name)\n\n def get_absolute_url(self):\n return reverse('catalog_view', kwargs={'slug': self.slug})\n\n class MPTTMeta:\n order_insertion_by = ['order']\n\n def save(self, *args, **kwargs):\n # Wooha!\n # Demonstrates crazy attributes example,\n # when every attribute in attributes_set knows\n # what detail to generate.\n from attributes import models as details\n # A Set of Pictures:\n if not self.pk:\n super(Catalog, self).save(*args, **kwargs)\n if self.catalog_pictures_id:\n for attribute in self.catalog_pictures.attributes.all():\n DetailClass = eval('details.' + attribute.attr_type)\n detail = DetailClass(catalog=self, attribute=attribute)\n detail.save()\n else:\n super(Catalog, self).save(*args, **kwargs)\n Catalog.objects.rebuild()\n\n\nclass Product(models.Model):\n catalog = models.ForeignKey('Catalog',\n related_name='products',\n help_text='Содержится в каталоге'\n )\n name = models.CharField(max_length=300,\n help_text='Название товара.'\n )\n slug = models.SlugField(max_length=150,\n unique=True,\n help_text='Относительный адрес. \\\n Должен быть уникальным.'\n )\n description = models.TextField(blank=True,\n help_text='Описание товара.'\n )\n\n class Meta:\n ordering = ['name']\n\n def __unicode__(self):\n return u'%s' % self.name\n\n def get_absolute_url(self):\n return reverse('product_detail', kwargs={'slug': self.slug})\n\n def save(self, *args, **kwargs):\n from attributes import models as details\n if not self.pk:\n super(Product, self).save(*args, **kwargs)\n if self.catalog.product_spare_parts_id:\n for attribute in self.catalog.product_spare_parts.attributes.all():\n DetailClass = eval('details.' + attribute.attr_type)\n detail = DetailClass(product=self, attribute=attribute)\n detail.save()\n # The last ones are not as crazy as before.\n # Because all attributes in set are of the same attribute type,\n # thus we also know what kind of details to generate.\n #\n # Sizes:\n if self.catalog.product_sizes_id:\n for attribute in self.catalog.product_sizes.attributes.all():\n details.ProductSize.objects.create(product=self, attribute=attribute)\n # In Stock Counts:\n if self.catalog.sides_differ:\n details.StockCount.objects.create(product=self, side='L')\n details.StockCount.objects.create(product=self, side='R')\n else:\n details.StockCount.objects.create(product=self, side='N')\n # Pricer:\n details.Price.objects.create(product=self)\n else:\n super(Product, self).save(*args, **kwargs)\n","sub_path":"products/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":7141,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"339044189","text":"import numpy as np\nimport cv2\nfrom matplotlib import pyplot as plt\n\nimg1 = cv2.imread('/home/kyaking/downloads/pic/rose/DSC_3430.jpg',0)# queryImage\nimg2 = cv2.imread('/home/kyaking/downloads/pic/rose/DSC_3431.jpg',0) # trainImage\n\n# Initiate SIFT detector\norb = cv2.xfeatures2d.SIFT_create()\n\n# find the keypoints and descriptors with SIFT\nkp1, des1 = orb.detectAndCompute(img1,None)\nkp2, des2 = orb.detectAndCompute(img2,None)\nprint(len(kp1))\n\n# create BFMatcher object\nbf = cv2.BFMatcher(cv2.NORM_L1, crossCheck=False)\n# bf = cv.BFMatcher_create(cv2.NORM_L2, crossCheck=False)\n# Match descriptors.\nmatches = bf.match(des1, des2)\n\n# Sort them in the order of their distance.\nmatches = sorted(matches, key = lambda x:x.distance)\n\n# Draw first 10 matches.\nimg3 = cv2.drawMatches(img1,kp1,img2,kp2,matches[:], None,flags=2)\nplt.imshow(img3),plt.show()\ncv2.imshow('drawMatches',img3)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n","sub_path":"venv/SIFT_BM.py","file_name":"SIFT_BM.py","file_ext":"py","file_size_in_byte":921,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"421242105","text":"from selenium.webdriver.support.wait import WebDriverWait\nfrom pyvirtualdisplay import Display\nfrom selenium import webdriver\n\n\nclass Page:\n \"\"\"\n Basic Page Object to be used as super class.\n \"\"\"\n display = None\n url = None\n\n def __init__(self):\n self.display = Display(visible=0, size=(800, 600))\n self.display.start()\n self.driver = webdriver.Firefox()\n\n def navigate(self):\n \"\"\"\n Proceed the driver to the set url.\n \"\"\"\n self.driver.get(self.url)\n\n def close(self):\n self.driver.close()\n self.display.stop()\n\n\nclass Element:\n \"\"\"\n Provides webdriver's find element functions.\n \"\"\"\n\n def __init__(self, location_strategy, locator):\n self.location_strategy = location_strategy\n self.locator = locator\n\n def __get__(self, instance, owner):\n driver = instance.driver\n WebDriverWait(driver, 100).until(\n lambda driver: driver.find_element(\n self.location_strategy, self.locator\n )\n )\n found_element = driver.find_element(\n self.location_strategy, self.locator\n )\n\n return found_element\n","sub_path":"papukaaniApp/tests/page_models/page_model.py","file_name":"page_model.py","file_ext":"py","file_size_in_byte":1191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"548185739","text":"from uuid import UUID\nfrom django.db.models import Q\nfrom django.shortcuts import get_object_or_404\nfrom rest_framework import status, viewsets, mixins\nfrom rest_framework.decorators import list_route, detail_route\nfrom bdn.auth.models import User\nfrom rest_framework.pagination import LimitOffsetPagination\nfrom rest_framework.permissions import (IsAuthenticated,\n IsAuthenticatedOrReadOnly)\nfrom rest_framework.response import Response\nfrom haystack.query import SearchQuerySet\nfrom bdn.auth.signature_authentication import SignatureAuthentication\nfrom bdn.auth.utils import get_auth_eth_address\nfrom bdn.industry.models import Industry\nfrom bdn.skill.models import Skill\nfrom bdn.provider.models import Provider\nfrom bdn.profiles.serializers import AcademyProfileSerializer\nfrom .serializers import CourseSerializer, CourseCreateSerializer\nfrom .models import Course\n\n\nclass CourseViewSet(mixins.RetrieveModelMixin,\n mixins.CreateModelMixin,\n mixins.ListModelMixin,\n viewsets.GenericViewSet):\n queryset = Course.objects.all()\n serializer_class = CourseSerializer\n pagination_class = LimitOffsetPagination\n authentication_classes = (SignatureAuthentication,)\n\n def get_permissions(self):\n if self.action and (\n self.action in ('get_by_provider')):\n self.permission_classes = [IsAuthenticatedOrReadOnly, ]\n else:\n self.permission_classes = [IsAuthenticated, ]\n return super(self.__class__, self).get_permissions()\n\n def retrieve(self, request, pk=None):\n course = get_object_or_404(Course, id=pk)\n user = course.provider.user\n serializerCourse = CourseSerializer(course)\n try:\n profile = user.profile\n except AttributeError:\n return Response({\n 'course': serializerCourse.data,\n })\n serializerProfile = AcademyProfileSerializer(profile)\n return Response({\n 'course': serializerCourse.data,\n 'academy': serializerProfile.data\n })\n\n def get_queryset(self):\n search_query = self.request.GET.get('q', '')\n if search_query:\n sqs = SearchQuerySet().filter(title=search_query).models(Course)\n qs = [\n _.object\n for _ in sqs\n if _.object is not None\n ]\n return qs\n\n qs = Course.objects.all()\n qs = qs.filter(self.industry_filter())\n qs = qs.filter(self.featured_filter())\n return qs.order_by('-is_featured')\n\n def industry_filter(self):\n filtered_industries_ids = self.request.query_params.get(\n 'filter_industry', '').split('|')\n industry_filter = Q()\n for filtered_industry_id in filtered_industries_ids:\n try:\n UUID(filtered_industry_id, version=4)\n except ValueError:\n continue\n industry_filter |= Q(industries__id=filtered_industry_id)\n return industry_filter\n\n def featured_filter(self):\n featured_filter = Q()\n if int(self.request.query_params.get('is_featured', 0)) == 1:\n featured_filter = Q(is_featured=True)\n return featured_filter\n\n @list_route(methods=['get'])\n def search(self, request):\n query = self.request.GET.get('q', '')\n sqs = SearchQuerySet().filter(title=query).models(Course)\n serializer = self.get_serializer([\n s.object for s in sqs if s.object is not None\n ], many=True)\n return Response(serializer.data, status=status.HTTP_200_OK)\n\n @list_route(methods=['get'])\n def autocomplete(self, request):\n AUTOCOMPLETE_SIZE = 10\n sqs = SearchQuerySet()\\\n .filter(title_auto=request.GET.get('q', ''))\\\n .models(Course)[:AUTOCOMPLETE_SIZE]\n serializer = self.get_serializer([\n s.object for s in sqs if s.object is not None], many=True)\n return Response(serializer.data, status=status.HTTP_200_OK)\n\n @list_route(methods=['get'])\n def get_by_provider(self, request):\n eth_address = str(request.GET.get('eth_address')).lower()\n academy = get_object_or_404(User, username=eth_address)\n provider = get_object_or_404(Provider, user=academy)\n qs = Course.objects.all().filter(provider=provider)\n serializer = self.get_serializer(qs, many=True)\n return Response(serializer.data, status=status.HTTP_200_OK)\n\n @detail_route(methods=['get'])\n def get_by_id(self, request, pk=None):\n course = get_object_or_404(Course, id=pk)\n serializer = self.get_serializer(course)\n return Response(serializer.data, status=status.HTTP_200_OK)\n\n @detail_route(methods=['post'])\n def edit_by_id(self, request, pk=None):\n eth_address = get_auth_eth_address(request.META)\n course = get_object_or_404(Course, id=pk)\n if course.provider.user.username == eth_address:\n serializer = CourseCreateSerializer(\n data=request.data, instance=course, partial=True)\n if serializer.is_valid():\n serializer.save()\n return Response({'status': 'ok'})\n return Response(serializer.errors,\n status=status.HTTP_400_BAD_REQUEST)\n return Response({\n 'status': 'denied'}, status=status.HTTP_401_UNAUTHORIZED)\n\n @detail_route(methods=['post'])\n def mark_featured_by_id(self, request, pk=None):\n eth_address = get_auth_eth_address(request.META)\n course = get_object_or_404(Course, id=pk)\n if course.provider.user.username == eth_address:\n course.is_featured = True\n course.save()\n return Response({'status': 'ok'})\n return Response({\n 'status': 'denied'}, status=status.HTTP_401_UNAUTHORIZED)\n\n def create(self, request, pk=None):\n academy = request.user\n try:\n provider = Provider.objects.get(user=academy)\n except Provider.DoesNotExist:\n return Response({\n 'error': 'Provider not found',\n }, status=status.HTTP_400_BAD_REQUEST)\n skills_post = request.data.get('skills', [])\n skills_lower = [_.lower() for _ in skills_post]\n skills = Skill.objects.filter(name__in=skills_lower)\n industries = Industry.objects.filter(\n name__in=request.data.get('industries', []))\n serializer = CourseCreateSerializer(data=request.data)\n if serializer.is_valid():\n course = serializer.save(\n provider=provider, industries=industries, skills=skills)\n return Response({\n 'status': 'ok',\n 'pk': course.pk,\n })\n else:\n return Response(serializer.errors,\n status=status.HTTP_400_BAD_REQUEST)\n\n @detail_route(methods=['post'])\n def delete_by_id(self, request, pk=None):\n eth_address = get_auth_eth_address(request.META)\n course = get_object_or_404(Course, id=pk)\n\n if course.provider.user.username == eth_address:\n course.delete()\n return Response({'status': 'ok'})\n else:\n return Response(\n {'status': 'denied'}, status=status.HTTP_401_UNAUTHORIZED)\n","sub_path":"bdn/course/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"556114551","text":"import sqlite3\nimport logging\nfrom plotly.offline import plot\nfrom plotly.graph_objs import Scatter, Figure\nfrom flask import Flask, request\n\n \nclass Frontend(Flask):\n \n def __init__(self, database_file):\n super(Frontend, self).__init__(\"Frontend\")\n self.database_file = database_file\n self.reset_db()\n\n @self.route(\"/clear\", methods=[\"POST\"])\n def clear():\n self.reset_db(force=True)\n return \"OK\"\n \n @self.route(\"/\", methods=[\"GET\", \"POST\"])\n def models():\n if request.method == \"GET\":\n model_names = set([m[0] for m in self._cur.execute('''SELECT model_name from metrics''')]) \n model_html = \"\\n\".join([\"

{}

\".format(m, m) for m in model_names])\n html = \"

Vivisect server

{}\".format(model_html).encode() \n return html\n elif request.method == \"POST\":\n j = request.get_json()\n model_name = j[\"metadata\"][\"model_name\"]\n op_name = j[\"metadata\"][\"op_name\"]\n metric_name = j[\"metric_name\"]\n metric_value = j[\"metric_value\"] \n iteration = j[\"metadata\"][\"iteration\"]\n self._cur.execute('''INSERT INTO metrics VALUES (?, ?, ?, ?, ?)''', (model_name, metric_name, op_name, metric_value, iteration))\n self._conn.commit()\n return \"OK\"\n \n @self.route(\"/\", methods=[\"GET\", \"POST\"])\n def model_metrics(model_name):\n metric_names = set([m[0] for m in self._cur.execute('SELECT metric_name from metrics where model_name=?', (model_name,))])\n metric_html = \"\\n\".join([\"

{}

\".format(model_name, m, m) for m in metric_names])\n html = \"

Vivisect server|{}

{}\".format(model_name, metric_html).encode()\n return html\n\n @self.route(\"//\", methods=[\"GET\"])\n def model_metric_ops(model_name, metric_name):\n vals = sorted([x for x in self._cur.execute('SELECT iteration,op_name,metric_value from metrics where model_name=? and metric_name=?', (model_name, metric_name))])\n op_names = sorted(set([o for _, o, _ in vals]))\n plots = []\n for op_name in op_names:\n vals = sorted([x for x in self._cur.execute('SELECT iteration,metric_value from metrics where model_name=? and metric_name=? and op_name=?', (model_name, metric_name, op_name))])\n plots.append(Scatter(x=[x[0] for x in vals], y=[x[1] for x in vals], mode=\"lines\", name=op_name))\n plots_html = plot(plots, output_type=\"div\")\n html = \"

Vivisect server|{0}|{1}

{2}\".format(model_name,\n metric_name,\n plots_html).encode()\n return html\n \n def reset_db(self, force=False):\n self._conn = sqlite3.connect(self.database_file, check_same_thread=False)\n self._cur = self._conn.cursor()\n try:\n self._cur.execute('''CREATE TABLE metrics (model_name, metric_name text, op_name text, metric_value real, iteration int)''')\n except: \n pass\n\n\ndef create_server(database_file=\":memory:\"):\n return Frontend(database_file)\n","sub_path":"src/servers/frontend.py","file_name":"frontend.py","file_ext":"py","file_size_in_byte":3748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"6993248","text":"from math import *\r\n\r\nfrom flask import (\r\n Flask,\r\n render_template,\r\n request,\r\n g,\r\n session,\r\n redirect,\r\n url_for\r\n)\r\nfrom flask_restful import Resource, Api\r\nfrom sqlalchemy import create_engine\r\nfrom json import dumps\r\nfrom flask import jsonify\r\nimport pymongo \r\nfrom pymongo import MongoClient \r\n\r\n\r\n\r\n### Tạo APP\r\napp = Flask(__name__)\r\n#, static_url_path='', static_folder='/static'\r\napp.secret_key = \"adtekdev\"\r\n\r\n### LIÊN KẾT TỚI DB MONGO\r\nMONGO_URI = 'mongodb+srv://cuteo1232:Hainam08@cluster0-iuz4b.mongodb.net/test?retryWrites=true&w=majority'\r\ncluster = MongoClient(MONGO_URI)\r\n\r\ndb = cluster.ATN # cluster[\"ATN\"]\r\n\r\n\r\n### CODE Flask - Python Web\r\n\r\n@app.route('/')\r\ndef index():\r\n return render_template(\"login.html\")\r\n\r\n\r\n@app.route('/home')\r\ndef home():\r\n return render_template(\"home.html\", username=session['username'], fullname=session['fullname'])\r\n\r\n@app.route('/login', methods=['GET', 'POST'])\r\ndef login():\r\n\r\n if session.get('logged_in_flag'):\r\n if session['logged_in_flag']:\r\n return redirect(url_for('home'))\r\n\r\n query_parameters = request.args\r\n vusername = query_parameters.get(\"username\")\r\n vpassword = query_parameters.get(\"password\")\r\n\r\n collection = db.account\r\n ### ch-eck Account / Tài khoản USER\r\n results = collection.find({\"username\":vusername, \"password\": vpassword}) \r\n\r\n\r\n if results.count() == 1:\r\n session['logged_in_flag'] = True\r\n session['username'] = results[0][\"username\"]\r\n session['fullname'] = results[0][\"fullname\"]\r\n return render_template(\"home.html\", username=results[0][\"username\"], fullname=results[0][\"fullname\"])\r\n else:\r\n session['logged_in_flag'] = False\r\n return render_template(\"login.html\", mesg = \"\")\r\n\r\n@app.route('/logout', methods=['GET', 'POST'])\r\ndef logout():\r\n #if session.get('logged_in_flag'):\r\n if 'logged_in_flag' in session:\r\n session['logged_in_flag'] = False\r\n return \"\"\r\n\r\n\r\n@app.route('/profile')\r\ndef profile():\r\n return render_template(\"profile.html\")\r\n\r\n@app.route('/products', methods=['GET', 'POST'])\r\ndef products():\r\n collection = db.product \r\n lpro = collection.find()\r\n return render_template(\"product-listA1.html\", productList = lpro)\r\n\r\n@app.route('/addProduct', methods=['GET', 'POST'])\r\ndef addProduct():\r\n if (\"productName\" in request.args and \"productPrice\" in request.args):\r\n pName = request.args.get(\"productName\")\r\n pPrice = request.args.get(\"productPrice\")\r\n newProduct = {\"name\" : pName, \"price\" : pPrice}\r\n collection = db.product \r\n collection.insert_one(newProduct)\r\n return render_template(\"addProduct.html\")\r\n \r\n@app.route('/addOrder', methods=['GET', 'POST'])\r\ndef addOrder():\r\n global total\r\n total = 0\r\n collection = db.product\r\n lpro = collection.find()\r\n product= collection.find()\r\n if (\"username\" in request.args and \"orderid\" in request.args and \"productName\" in request.args):\r\n oID = request.args.get(\"orderid\")\r\n uName = request.args.get(\"username\")\r\n pPrice = request.args.get(\"productName\")\r\n oDate = request.args.get(\"date\")\r\n for Products in product:\r\n total = total + int(Products[\"price\"])*int(pPrice)\r\n newOrder = {\"orderid\" : oID, \"username\": uName, \"date\" : oDate, \"total\" : total}\r\n collection = db.OrderList\r\n collection.insert_one(newOrder)\r\n return render_template(\"addorder.html\", productList = lpro)\r\n\r\n@app.route('/report', methods=['GET', 'POST'])\r\ndef report():\r\n global vtotal\r\n vtotal = 0\r\n collection = db.OrderList\r\n lpro = collection.find()\r\n for x in collection.find():\r\n vtotal = vtotal + int(x[\"total\"])\r\n newReport = {\"total\" : vtotal}\r\n db.report.insert_one(newReport)\r\n return render_template(\"report.html\", orderList = lpro, total = newReport)\r\n \r\n\r\n \r\n\r\n","sub_path":"pyweb_001.py","file_name":"pyweb_001.py","file_ext":"py","file_size_in_byte":3941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"131628739","text":"#!/usr/bin/env python\nfrom random import shuffle\n\n\ndef shuffle_list(mylist):\n shuffle(mylist)\n return mylist\n\n\ndef player_guess():\n guess = ''\n while guess not in ['0', '1', '2']:\n guess = input(\"Pick a number :0, 1 or 2 \")\n return int(guess)\n\n\ndef check_guess(mylist, guess):\n if mylist[guess] == \"O\":\n print(\"Correct\")\n else:\n print(\"Wrong guess\")\n print(mylist)\n\n# Initial list\n\n\nmy_list = [' ', 'O', ' ']\n\n\n# Shuffle list\n\nmixedup_list = shuffle_list(my_list)\n\n# User guess\n\nguess = player_guess()\n\n# Check guess\n\ncheck_guess(mixedup_list, guess)\n","sub_path":"6.Methods_Functions_builtin/3cardmonty.py","file_name":"3cardmonty.py","file_ext":"py","file_size_in_byte":600,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"399747789","text":"import argparse, json, sys\nfrom slei.main import Slei\n\n_parser = argparse.ArgumentParser(\n \"slei\",\n formatter_class=argparse.RawTextHelpFormatter,\n description='SLEI -- a structured language for elasticsearch')\n_parser.add_argument(\"-c\", help='connection url')\n_parser.add_argument('-i', help='index name')\n_parser.add_argument('query', help='''A slei expression\nExamples:\n filter(type==\"value\")\n filter(type==\"x\") | group_by(foobar) | head(10)''')\n\nargs = _parser.parse_args()\njson.dump(Slei()\n .connect(args.c or os.environ.get('SLEI_ES_URL'))\n .execute(args.query)\n ._return(),\n sys.stdout)\nprint\n","sub_path":"slei/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":649,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"559873966","text":"#Jason Mandras, Lab 7\n#variable dictionary:\n #records - number of people in the file\n #start - variable that runs the while loop for program\n #choice - selects which search method code to use\n #option returns user's search method request to choice\n #search_count - number of times the computer uses sequential search for the last name and house name\n\n#Starting Documentation:\n#Write a program that gives the user a menu of options to search through the file. The menu should be:\n\t\t\t\t\t\n#SEARCH MENU\n#1.\tSearch by FIRST NAME\n#2.\tSearch by ID CODE\n#3.\tSearch by LAST NAME\n#4.\tSearch by ALLEGIANCE\n#5.\tEXIT\n\n#Depending on how the user wants to search, you may need to sort the searched-through list before performing Binary Search. Binary Search should be used for Menu Options 1 (First Name) and 2 (ID codes) and the full record for the individual searched should be included if found (the user should be alerted if the person cannot be found). If the user chooses options 3 or 4, you must print a list of everyone and their full record that fits the searched item (think: sequential search!) that has that Last Name or Allegiance. Use the GOT_bubble_sort_7.txt file (you may change the name if you wish but you may NOT edit the text file outside of checking for and deleting empty end records). The user should be able to search as many times as they would like. If the user enters an option that does not exist, the program must tell them this before asking if the user would like to search for a new record\n\n#Other stipulations of lab:\n#The menu should be printed from a function that returns the user’s search selection \n#A function must be used to swap values for bubble sort. \n#A function must be used to ask the user if they would like to search again. \n#This function should only accept the following values from the user: Y, y, N, n. Any other values will prompt the user to reenter. The function should return the user’s input once it meets the criteria.\n#A function to print a goodbye message when the user decides to exit.\n#10pt BONUS: Add something GOT related to your goodbye message. This could be a quote, a picture … get creative (sliding scale bonus points so 1 – 10 based on what you do :] )\n#The console screen should clear before each new search.\n#This will be reviewed W8D2\n\n#The GOT_bubble_sort_7.txt file is setup in the following way:\n\n#FIELD1\t\tFIELD2\t\t\tFIELD3\t\t\tFIELD4\t\tFIELD5\t\t\n#ID Code\tLast Name\t\tFirst Name\t\tAge\t\t\tAllegiance\n\nimport csv\nimport os\n\n#initializing variables\nrecords = 0\noption = 0\nchoice = 0\nsearch_count = 0\nstart = \"y\"\n\n#empty lists\nfirst = []\nlast = []\nid = []\nage = []\nallegiance = []\n\n#functions\ndef menu(option): #select an option 1-5 for searching\n print(\"\\n SEARCH MENU\")\n print(\"1. Search by FIRST NAME\")\n print(\"2. Search by ID CODE\")\n print(\"3. Search by LAST NAME\")\n print(\"4. Search by ALLEGIANCE\")\n print(\"5. EXIT\")\n option = int(input(\"\\nWhat method would you like to search with? (1-5) \"))\n while option != 1 and option != 2 and option != 3 and option != 4 and option != 5:\n print(\"Please choose an option with the numbers 1 to 5.\")\n option = int(input(\"\\nWhat method would you like to search with? (1-5) \"))\n return option\n\ndef swap(listname, index): #swaps values for list order\n temp = listname[index]\n listname[index] = listname[index + 1]\n listname[index + 1] = temp\n return listname, index\n \ndef run(continuation): #exit loop\n continuation = input(\"\\nWould you like to search for anyone else? (y/n) \")\n while continuation != \"y\" and continuation != \"n\" and continaution != \"Y\" and continuation != \"N\":\n print(\"Please enter 'y' or 'Y' for yes and 'n' or 'N' for no.\")\n continuation = input(\"\\nWould you like to search for anyone else? (y/n) \")\n return continuation\n\nwith open(\"G:/school docs/quarter 2/se 126/lab 7 text file/GOT_bubble_sort_7.txt\") as csvfile:\n file = csv.reader(csvfile)\n for rec in file:\n records += 1\n id.append(rec[0])\n last.append(rec[1])\n first.append(rec[2])\n age.append(rec[3])\n allegiance.append(rec[4])\n\nwhile start == \"y\" or start == \"Y\":\n os.system('cls')\n print(\"{0:10} \\t {1:10} \\t {2:15} \\t {3:3} \\t {4:10}\".format(\"First Name\", \"Last Name\", \"ID Code\", \"Age\", \"Allegiance\"))\n for i in range(0, records):\n print(\"{0:10} \\t {1:10} \\t {2:15} \\t {3:3} \\t {4:10}\".format(first[i], last[i], id[i], age[i], allegiance[i]))\n choice = menu(option)\n if choice == 1:\n os.system('cls')\n #bubble sort\n for index in range(0, records - 1):\n for i in range(0, records - 1):\n if first[i] > first[i + 1]:\n swap(first, i)\n swap(last, i)\n swap(id, i)\n swap(age, i)\n swap(allegiance, i)\n\n print(\"{0:10} \\t {1:10} \\t {2:15} \\t {3:3} \\t {4:10}\".format(\"First Name\", \"Last Name\", \"ID Code\", \"Age\", \"Allegiance\"))\n for i in range(0, records):\n print(\"{0:10} \\t {1:10} \\t {2:15} \\t {3:3} \\t {4:10}\".format(first[i], last[i], id[i], age[i], allegiance[i]))\n \n #binary search\n max = records - 1\n min = 0\n guess = 0\n search = input(\"\\nEnter the FIRST NAME of who you're looking for: \")\n max = records - 1\n guess = int((max + min)/2)\n while(min < max and search != first[guess]):\n if search < first[guess]: #resetting max value\n max = guess - 1\n else: #resetting min value\n min = guess + 1\n guess = int((max + min)/2) #recalculating guess value\n #printing based on binary search results\n if search == first[guess]:\n print(\"\\nWe found the person you are looking for. Here is their data:\")\n print(\"{0:10} \\t {1:10} \\t {2:15} \\t {3:3} \\t {4:10}\".format(\"First Name\", \"Last Name\", \"ID Code\", \"Age\", \"Allegiance\"))\n print(\"{0:10} \\t {1:10} \\t {2:15} \\t {3:3} \\t {4:10}\".format(first[guess], last[guess], id[guess], age[guess], allegiance[guess]))\n else:\n print(\"Sorry, but the person you are looking for was not found\")\n print(\"Please make sure that the FIRST NAME is correct and try again\")\n start = run(start)\n\n elif choice == 2:\n os.system('cls')\n #bubble sort\n for index in range(0, records - 1):\n for i in range(0, records - 1):\n if id[i] > id[i + 1]:\n swap(first, i)\n swap(last, i)\n swap(id, i)\n swap(age, i)\n swap(allegiance, i)\n print(\"{0:10} \\t {1:10} \\t {2:15} \\t {3:3} \\t {4:10}\".format(\"First Name\", \"Last Name\", \"ID Code\", \"Age\", \"Allegiance\"))\n for i in range(0, records):\n print(\"{0:10} \\t {1:10} \\t {2:15} \\t {3:3} \\t {4:10}\".format(first[i], last[i], id[i], age[i], allegiance[i]))\n #binary search\n max = records - 1\n min = 0\n guess = 0\n search = input(\"\\nEnter the ID CODE of who you're looking for: \")\n max = records - 1\n guess = int((max + min)/2)\n while(min < max and search != id[guess]):\n if search < id[guess]: #resetting max value\n max = guess - 1\n else: #resetting min value\n min = guess + 1\n guess = int((max + min)/2) #recalculating guess value\n #printing based on binary search results\n if search == id[guess]:\n print(\"\\nWe found the person you are looking for. Here is their data:\")\n print(\"{0:10} \\t {1:10} \\t {2:15} \\t {3:3} \\t {4:10}\".format(\"First Name\", \"Last Name\", \"ID Code\", \"Age\", \"Allegiance\"))\n print(\"{0:10} \\t {1:10} \\t {2:15} \\t {3:3} \\t {4:10}\".format(first[guess], last[guess], id[guess], age[guess], allegiance[guess]))\n else:\n print(\"Sorry, but the person you are looking for was not found\")\n print(\"Please make sure that the ID CODE is correct and try again\")\n start = run(start)\n\n elif choice == 3:\n os.system('cls')\n #sequential search by last name\n print(\"{0:10} \\t {1:10} \\t {2:15} \\t {3:3} \\t {4:10}\".format(\"First Name\", \"Last Name\", \"ID Code\", \"Age\", \"Allegiance\"))\n for i in range(0, records):\n print(\"{0:10} \\t {1:10} \\t {2:15} \\t {3:3} \\t {4:10}\".format(first[i], last[i], id[i], age[i], allegiance[i]))\n search = input(\"\\nEnter the LAST NAME of who you're looking for: \")\n found = -1\n for i in range(0, records):\n if search == last[i]:\n found = i\n search_count += 1\n print(\"{0:10} \\t {1:10} \\t {2:15} \\t {3:3} \\t {4:10}\".format(first[i], last[i], id[i], age[i], allegiance[i]))\n if found >= 0:\n print(\"\\nWe found\", search_count,\"people with the last name\", search, \".\")\n else:\n print(\"Sorry, but the person you are looking for was not found\")\n print(\"Please make sure that you're spelling is correct and try again\")\n start = run(start)\n\n elif choice == 4:\n os.system('cls')\n #sequential search by allegiance\n print(\"{0:10} \\t {1:10} \\t {2:15} \\t {3:3} \\t {4:10}\".format(\"First Name\", \"Last Name\", \"ID Code\", \"Age\", \"Allegiance\"))\n for i in range(0, records):\n print(\"{0:10} \\t {1:10} \\t {2:15} \\t {3:3} \\t {4:10}\".format(first[i], last[i], id[i], age[i], allegiance[i]))\n search = input(\"\\nEnter the HOUSE NAME of who you're looking for: \")\n found = -1\n for i in range(0, records):\n if search == allegiance[i]:\n found = i\n search_count += 1\n print(\"{0:10} \\t {1:10} \\t {2:15} \\t {3:3} \\t {4:10}\".format(first[i], last[i], id[i], age[i], allegiance[i]))\n if found >= 0:\n print(\"\\nWe found\", search_count, \"people in\", search, \".\")\n else:\n print(\"Sorry, but the house you are looking for was not found\")\n print(\"Please make sure that you're spelling is correct and try again\")\n start = run(start)\n\n elif choice == 5:\n start = \"n\" \nprint(\"\\nThank you for using my program!\\n\")","sub_path":"lab_7-1.py","file_name":"lab_7-1.py","file_ext":"py","file_size_in_byte":10269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"191014793","text":"from app import db\nfrom sqlalchemy import Column, DateTime, ForeignKey, UniqueConstraint, func\nimport enum\nimport datetime\n\n\nclass Type(enum.Enum):\n Costuraria = 'Costuraria'\n Electricidade = 'Electricidade'\n Canalizações = 'Canalizações'\n Jardinagem = 'Jardinagem'\n Limpeza = 'Limpeza'\n Carpintaria = 'Carpintaria'\n Pintura = 'Pintura'\n Serralharia = 'Serralharia'\n\n\nclass Users(db.Model):\n __tablename__ = 'users'\n\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(), nullable=False)\n email = db.Column(db.String(), nullable=False, unique=True)\n password = db.Column(db.String(), nullable=False)\n photo = db.Column(db.String())\n address = db.Column(db.String())\n contact = db.Column(db.String())\n certified = db.Column(db.Boolean())\n created_at = db.Column(DateTime(timezone=True), server_default=func.now())\n updated_at = db.Column(DateTime(timezone=True), onupdate=func.now())\n description = db.Column(db.String())\n\n def __init__(self, name, email, password, photo, address, contact, description):\n self.name = name\n self.email = email\n self.password = password\n self.photo = photo\n self.address = address\n self.contact = contact\n self.description = description\n\n def __repr__(self):\n return ('').format(\n self.id, self.email, self.photo, self.address, self.contact,\n self.certified, self.description)\n\n\nclass Tasks(db.Model):\n __tablename__ = 'tasks'\n\n id = db.Column(db.Integer, primary_key=True)\n title = db.Column(db.String(), nullable=False)\n location = db.Column(db.String(), nullable=False)\n price = db.Column(db.Float, nullable=False)\n description = db.Column(db.String())\n user = db.Column(ForeignKey(\"users.id\"), nullable=False)\n type = db.Column(db.Enum(Type))\n approved = db.Column(db.Boolean(), default=False)\n created_at = db.Column(DateTime(timezone=True), server_default=func.now())\n updated_at = db.Column(DateTime(timezone=True), onupdate=func.now())\n\n def __init__(self, title, location, price, description, user, type):\n self.title = title\n self.location = location\n self.price = price\n self.description = description\n self.user = user\n self.type = type\n\n def __repr__(self):\n return ('').format(\n self.id, self.title, self.type, self.approved,\n self.user, self.location, self.description)\n\n\nclass Ratings(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n value = db.Column(db.Integer, nullable=False)\n from_user = db.Column(ForeignKey(\"users.id\"), nullable=False)\n to_user = db.Column(ForeignKey(\"users.id\"), nullable=False)\n task = db.Column(ForeignKey(\"tasks.id\"), nullable=False)\n __table_args__ = (UniqueConstraint('from_user', 'to_user', 'task',\n name='unique_user_rating'),)\n\n def __init__(self, value, from_user, to_user, task):\n self.value = value\n self.from_user = from_user\n self.to_user = to_user\n self.task = task\n\n def __repr__(self):\n return ''.format(self.id, self.value)\n\n\nclass Proposals(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n user = db.Column(ForeignKey(\"users.id\"), nullable=False)\n offer = db.Column(db.Float, nullable=False)\n description = db.Column(db.String())\n accepted = db.Column(db.Boolean(), default=False)\n task = db.Column(ForeignKey(\"tasks.id\"), nullable=False)\n\n def __init__(self, user, offer, description, accepted, task):\n self.user = user\n self.offer = offer\n self.description = description\n self.accepted = accepted\n self.task = task\n\n def __repr__(self):\n return ('').format(self.id, self.user,\n self.offer, self.description,\n self.accepted, self.task)\n","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"535023728","text":"#/usr/bin/env python\n# -*- coding:utf-8 -*-\n'''\n @File : fake_datas.py \n @Contact : guoxin@126.com\n @License : (C)Copyright 2018-2019, xguo\n\n@Modify Time @Author @Version @Desciption\n------------ ------- -------- -----------\n2020/6/8 21:40 xguo 1.0 None\n\n'''\n\nfrom faker import Faker\nfrom pprint import pprint\nimport profile\n\ndef fake_data(num=10):\n f=Faker(\"zh_CN\")\n datas=[]\n while num:\n data = [f.name(),\n f.address(),\n f.phone_number(),\n f.country(),\n f.province(),\n f.city_suffix(),\n f.district(),\n f.street_name(),\n f.street_suffix(),\n f.random_digit(),\n f.random_element(),\n f.random_int(),\n f.random_letter(),\n f.random_number(),\n f.email(),\n f.url(),\n f.user_name(),\n f.ipv4(),\n f.ssn(),\n f.color_name(),\n f.date(),\n # f.geo_coordinate(),\n f.latitude(),\n f.longitude(),\n f.lexify(),\n f.numerify(),\n f.postcode(),\n ]\n\n datas.append(data)\n num-=1\n return datas\n\ndef main():\n d=fake_data()\n print(\"地址类\".center(100, \"-\"))\n pprint(d)\n print(\"公司类\".ljust(100,'-'))\n print(\"个人信息类\".rjust(100,'*'))\n\n\nif __name__ == \"__main__\":\n main()","sub_path":"py/fake_datas.py","file_name":"fake_datas.py","file_ext":"py","file_size_in_byte":1571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"126397711","text":"import tensorflow as tf\nfrom numpy.random import RandomState\n\nx = tf.placeholder(tf.float32,shape = (None,2),name = 'x_input')\ny_ = tf.placeholder(tf.float32,shape = (None,1),name = 'y_input')\n\nw1 = tf.Variable(tf.random_normal([2,1],stddev = 1, seed = 1))\ny = tf.matmul(x, w1)\n\n#define loss function\nloss_less = 1\nloss_more = 10\nloss = tf.reduce_sum(tf.where(tf.greater(y_,y),loss_less*(y_-y),loss_more*(y-y_)))\ntraining = tf.train.AdamOptimizer().minimize(loss)\n\ndataset_size = 128\nbatch_size = 8\nrdm = RandomState(1)#one dimension\n#numpy.random.rand() 生成(0,1)区间数\nX = rdm.rand(dataset_size,2)\nY = [[x1 + x2 + rdm.rand()/10.0 - 0.05] for x1,x2 in X]\n\ninitial = tf.global_variables_initializer()#tf.initialize_all_variables()\n\nwith tf.Session() as sess:\n sess.run(initial)\n for step in range(5000):\n start = (step*batch_size)%dataset_size\n end = min(batch_size + start,dataset_size)\n sess.run(training,feed_dict = {y_:Y[start:end], x:X[start:end]})\n if step%1000 == 0:\n print(step,sess.run(w1))","sub_path":"practice/loss.py","file_name":"loss.py","file_ext":"py","file_size_in_byte":1053,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"501365394","text":"# coding=utf-8\n\nfrom pygame import *\n\nfrom helpers.spritesheet import *\nfrom pygame.constants import K_LEFT, K_DOWN, K_RIGHT, K_UP\n\nMOVE_SPEED = .2\nCOLOR = \"#336699\"\n\n# Point = namedtuple('Point', ['x', 'y'])\n\n# Point = type(\"Point\", (object,), {\"x\": 0, \"y\": 0})\n\n\nclass Point(object):\n def __init__(self, x, y):\n self.x = x\n self.y = y\n\n\nclass Player(sprite.Sprite):\n velocity = Point(0, 0)\n event = None\n\n def __init__(self, x, y):\n sprite.Sprite.__init__(self)\n\n sprite_sheet = Spritesheet('assets/images/player.png')\n self.animations = {\n 'down': self._hero_animation(0, 0, 32, 32),\n 'left': self._hero_animation(0, 32, 32, 32),\n 'right': self._hero_animation(0, 64, 32, 32),\n 'up': self._hero_animation(0, 96, 32, 32),\n 'default': sprite_sheet.image_at((0, 0, 32, 32), (255, 0, 255))\n }\n\n self.image = self.animations['default']\n self.rect = self.image.get_rect()\n self.feet = pygame.Rect(0, 0, self.rect.width * .5, 8)\n\n self._position = [x, y]\n self._old_position = self.position\n\n @property\n def position(self):\n return list(self._position)\n\n @position.setter\n def position(self, value):\n self._position = list(value)\n\n @staticmethod\n def _hero_animation(x, y, width, height):\n return StripAnimation(\n 'assets/images/player.png',\n (x, y, width, height),\n 3,\n (255, 0, 255),\n True,\n 120 / 8\n )\n\n def handle_input(self):\n pressed = pygame.key.get_pressed()\n if pressed[K_UP]:\n self.velocity.y = -MOVE_SPEED\n self.image = self.animations['up'].next()\n elif pressed[K_DOWN]:\n self.velocity.y = MOVE_SPEED\n self.image = self.animations['down'].next()\n else:\n self.velocity.y = 0\n\n if pressed[K_LEFT]:\n self.velocity.x = -MOVE_SPEED\n self.image = self.animations['left'].next()\n elif pressed[K_RIGHT]:\n self.velocity.x = MOVE_SPEED\n self.image = self.animations['right'].next()\n else:\n self.velocity.x = 0\n\n if self.velocity.x == 0 and self.velocity.y == 0:\n self.image = self.animations['default']\n\n def update(self, dt):\n self._old_position = self._position[:]\n self._position[0] += self.velocity.x * dt\n self._position[1] += self.velocity.y * dt\n\n self.rect.topleft = self._position\n self.feet.midbottom = self.rect.midbottom\n\n","sub_path":"game/player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":2606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"544887288","text":"import random\nimport numpy as np\nimport pandas as pd\nfrom sklearn.metrics import mean_squared_error, r2_score\nfrom statistics import mode, mean, variance, stdev\n\nfrom joblib import Parallel, delayed\nimport multiprocessing\n\nclass ExtraTreeRegressionForrest:\n\n def __init__(self, alg_type, M, n_min, K):\n self.used_combos = []\n self.alg_type = 'regression'\n self.M = M\n self.n_min = n_min\n self.K = K\n self.entropy_beta = np.random.randint(2, 11)\n\n \"\"\"\n makes K random splits by K random attributes\n returns the attribute and the split_value that gave the best score\n \"\"\"\n def split_a_node(self, S, beta):\n if self.stop_split(S):\n return None\n keys = list(S.keys())\n keys = self.check_keys(S)\n keys.remove('output')\n if len(keys) < self.K:\n return None, None\n a = random.sample(keys, self.K)\n s = [self.pick_a_random_split(S, ai) for ai in a]\n scores = [self.score(S, si, ai, beta) for ai, si in s]\n s_star = scores.index(max(scores))\n return s[s_star]\n\n def check_keys(self, S):\n splittable_keys = []\n for key in S:\n verdict = False\n for i in range(len(S[key]) - 1):\n if S[key][i] != S[key][i + 1]:\n verdict = True\n if verdict:\n splittable_keys.append(key)\n return splittable_keys\n\n \"\"\"\n calculates the entropy of a list of values\n \"\"\" \n def entropy(self, target):\n classes = set(target)\n ps = [target.count(t)/float(len(target)) for t in classes]\n ps = [-pi * np.log2(pi) for pi in ps]\n return sum(ps)\n \n def beta_entropy(self, target, beta):\n classes = set(target)\n lengths = [target.count(t) for t in classes]\n s = len(target)\n beta_ent = 1 / (1 - 2 ** (1 - beta))\n beta_ent *= (1 - sum([(bi / s) ** beta for bi in classes]))\n return beta_ent\n \n \"\"\"\n purity gain\n \"\"\"\n def score1(self, S, si, ai, beta):\n #entropy\n hbs = self.beta_entropy(list(S['output']), beta)\n #split entropy\n left = [t for a, t in zip(S[ai], S['output']) if a < si]\n right = [t for a, t in zip(S[ai], S['output']) if a > si]\n hbb1 = self.beta_entropy(left, beta)\n hbb2 = self.beta_entropy(right, beta)\n split_etp = float(len(left))/len(S[ai]) * hbb1 + float(len(right)) / len(S[ai]) * hbb2\n #information gain\n ig = hbs - split_etp\n return 2 * ig / (hbs + split_etp)\n \n \n def score(self, S, si, ai, beta=0):\n #different for regression\n #entropy\n etp = variance(list(S['output']))\n #split entropy\n left = [t for a, t in zip(S[ai], S['output']) if a < si]\n right = [t for a, t in zip(S[ai], S['output']) if a > si]\n if len(left) < 2:\n etpl = 0\n else:\n etpl = variance(left)\n if len(right) < 2:\n etpr = 0\n else:\n etpr = variance(right)\n split_etp = float(len(left))/len(S[ai]) * etpl + float(len(right)) / len(S[ai]) * etpr\n #information gain\n ig = etp - split_etp\n ig = etpl + etpr\n # ig = (etp - split_etp) / etp\n # return 2 * ig / (etp + split_etp)\n return ig\n \n \"\"\"\n normalized version of the shannon information gain\n \"\"\"\n def score3(self, S, si, ai, beta=0):\n #different for regression\n #entropy\n etp = self.entropy(list(S['output']))\n #split entropy\n left = [t for a, t in zip(S[ai], S['output']) if a < si]\n right = [t for a, t in zip(S[ai], S['output']) if a > si]\n etpl = self.entropy(left)\n etpr = self.entropy(right)\n split_etp = float(len(left))/len(S[ai]) * etpl + float(len(right)) / len(S[ai]) * etpr\n #information gain\n ig = etp - split_etp\n # return 2 * ig / (etp + split_etp)\n return ig\n \n \"\"\"\n gini impurity\n \"\"\"\n def score2(self, S, si, ai, beta=0):\n left = [t for a, t in zip(S[ai], S['output']) if a < si]\n right = [t for a, t in zip(S[ai], S['output']) if a > si]\n etpl = self.entropy(left)\n etpr = self.entropy(right)\n return etpl + etpr\n \n \"\"\"\n returns a random value between the maximum and the minimum values of the attributes\n \"\"\"\n def pick_a_random_split(self, S, a):\n a_max = max(S[a])\n a_min = min(S[a])\n if a_max == a_min:\n a_c = a_max\n else:\n a_c = random.uniform(a_min, a_max)\n return a, a_c\n \n \"\"\"\n the node can no longer be split if all_attributes_are_constant or the node has too few attributes\n \"\"\"\n def stop_split(self, S):\n # print(len(S))\n if len(S) < self.n_min:\n return True\n if self.all_attributes_are_constant(S):\n return True\n return False\n \n \"\"\"\n returns True if all attributes are constant or the output variable is constant\n else returns False\n \"\"\"\n def all_attributes_are_constant(self, S):\n verdict = True\n if not S:\n return verdict\n for i in range(len(S['output']) - 1):\n # print(S['output'])\n if S['output'][i] != S['output'][i + 1]:\n verdict = False\n if verdict == True:\n return True\n for attribute in S:\n for i in range(len(S[attribute]) - 1):\n if S[attribute][i] != S[attribute][i + 1]:\n return False\n return True \n \n def fit(self, S, y=None):\n self.build_an_extra_tree_ensemble(S) \n open(\"et_trees.txt\",'w').write(str(self.trees)) \n \n \"\"\"\n S = dictionary of train data of the form: {header_value: data_list}\n alg_type = classification or regression, classification supported rn\n M = number of trees\n n_min = minimum sample size for splitting a node\n K = the number of attributes randomly selected at each node\n returns a list of trees that can be used for classification / regression\n \"\"\" \n def build_an_extra_tree_ensemble(self, S):\n T = []\n S = self.to_dict(S)\n num_cores = multiprocessing.cpu_count()\n \n T = Parallel(n_jobs=num_cores)(delayed(self.build_an_extra_tree)(S) for i in range(self.M))\n self.trees = T\n return T\n\n \"\"\"\n returns a tree of the form\n {\n (attr0, split_val1): \n [\n [(output0, freq0)], \n {\n (attr1, split_val1): \n [\n [(output1, freq1)], \n [(output2, freq2)]\n ]\n }\n ]\n }\n \"\"\"\n def build_an_extra_tree(self, S):\n # print(len(S))\n if len(S) < self.n_min or self.all_attributes_are_constant(S):\n return self.labeled_leaf(S)\n beta = np.random.randint(2, 11)\n a, s_star = self.split_a_node(S, beta)\n if a is not None:\n S_l, S_r = self.get_splits(S, a, s_star)\n if not S_l:\n t_r = self.build_an_extra_tree(S_r)\n return t_r\n if not S_r:\n t_l = self.build_an_extra_tree(S_l)\n return t_l\n t_l = self.build_an_extra_tree(S_l)\n t_r = self.build_an_extra_tree(S_r)\n t = {}\n t[(a, s_star)] = [t_l, t_r]\n return t\n else:\n return self.labeled_leaf(S)\n \n def class_frequencies(self, S):\n from itertools import groupby\n if not S:\n return None\n a = (S['output'])\n freq = [(key, len(list(group)) / float(len(a))) for key, group in groupby(a)]\n freq = {x:a.count(x) / len(a) for x in a}\n freq = [(x, c) for x, c in freq.items()]\n # print(freq)\n return freq\n \n \"\"\"\n returns a leaf labeled by class frequencies in S if alg type = classification\n returns a leaf labeled by average output in S if alg type = regression\n \"\"\"\n def labeled_leaf(self, S):\n leaf = {}\n if self.alg_type == 'classification':\n leaf = self.class_frequencies(S)\n return leaf\n if self.alg_type == 'regression':\n leaf = mean(S['output'])\n return leaf\n \n \"\"\"\n splits a node in two based on the split value of the chosen attribute\n \"\"\"\n def get_splits(self, S, a, s_star):\n S_l = {}\n S_r = {}\n for i in range(len(S[a])):\n for key in S:\n if key != a:\n if S[a][i] < s_star:\n if key not in S_l:\n S_l[key] = []\n S_l[key].append(S[key][i])\n else:\n if key not in S_r:\n S_r[key] = []\n S_r[key].append(S[key][i])\n return S_l, S_r\n \n def classify_instance(self, S, trees):\n outputs = []\n for tree in trees:\n output = self.get_verdict(tree, S)\n if output is not None:\n outputs.append(output)\n # print(outputs)\n return mean(outputs)\n \n def to_matrix(self, S):\n mat = []\n header = []\n for key in S:\n header.append(key)\n for i in range(len(S[key])):\n if len(mat) < i + 1:\n mat.append([])\n mat[i].append(S[key][i])\n return mat, header \n \n def predict(self, S):\n S = self.to_dict(S)\n mat, header = self.to_matrix(S)\n verdicts = []\n for line in mat:\n inst = {}\n for key, value in zip(header, line):\n inst[key] = value\n verdict = self.classify_instance(inst, self.trees)\n verdicts.append(verdict)\n return verdicts\n \n def get_verdict(self, tree, S):\n \n if isinstance(tree, (float,)):\n return tree\n return tree\n root = list(tree.keys())[0]\n (root_attr, root_cut) = root\n if S[root_attr] < root_cut:\n return self.get_verdict(tree[root][0], S)\n else:\n return self.get_verdict(tree[root][1], S)\n \n def to_dict(self, df):\n df = df.apply(pd.to_numeric, errors='coerce')\n df = df.fillna(df.mean())\n df = df.to_dict('split')\n S = {}\n for i, key in enumerate(df['columns']):\n S[key] = [row[i] for row in df['data']]\n return S\n \nif __name__ == \"__main__\": \n import pandas as pd\n\n data = pd.read_csv('.\\\\train.csv')\n data = data.replace({'no': 0})\n # data = data.replace({'yes': 1})\n numeric_vars = [ 'v2a1', 'rooms', 'r4h1', 'r4h2', 'r4h3', 'r4m1',\n 'r4m2', 'r4m3', 'r4t1', 'r4t2', 'r4t3', 'tamhog', 'tamviv',\n 'escolari', 'rez_esc', 'hhsize', 'hogar_nin', 'hogar_adul',\n 'hogar_mayor', 'hogar_total', 'dependency', 'qmobilephone',\n 'meaneduc', 'bedrooms', 'overcrowding', 'age', 'Target']\n data = data[numeric_vars]\n df = data.rename(index=str, columns={\"Target\": \"output\"})\n\n\n #resample\n from sklearn.utils import resample\n df = df.replace({'output': {1: 0, 2: 0, 3: 0}})\n df = df.replace({'output': {4: 1}})\n # indian_data = pd.read_csv('.\\\\pima-indians-diabetes.csv')\n # df = indian_data.rename(index=str, columns={\"class\": \"output\"})\n print(df['output'].value_counts())\n df_one = df[df.output==1]\n df_two = df[df.output==0]\n\n no_samples = 2000\n df_one_test = resample(df_one, replace=True, n_samples=no_samples, random_state=23)\n df_two_test = resample(df_two, replace=True, n_samples=no_samples, random_state=23)\n df_one = resample(df_one, replace=True, n_samples=no_samples, random_state=123)\n df_two = resample(df_two, replace=True, n_samples=no_samples, random_state=123)\n \n df = pd.concat([df_one, df_two])\n df_test = pd.concat([df_one_test, df_two_test])\n df = df.sample(frac=1)\n print(df.output.value_counts())\n\n import sys\n sys.setrecursionlimit(3000)\n\n data = to_dict(df)\n\n # trees = build_an_extra_tree_ensemble(data, 'classification', 11, 5, 2)\n trees = build_an_extra_tree_ensemble(data, 'classification', 21, 20, 10)\n\n from sklearn.metrics import accuracy_score\n test_data = to_dict(df_test)\n y_pred = classify(test_data, trees)\n # print(y_pred)\n y_true = test_data['output']\n # print(y_true)\n print(accuracy_score(y_true, y_pred))\n \n \n S = {'age': [20, 50, 49, 24, 22],\n 'height': [172, 165, 175, 165, 183],\n 'weight': [48, 80, 78, 57, 79],\n 'gender': [1, 1, 2, 1, 2],\n 'output': [1, 0, 2, 0, 1]} \n \n inst = {'age': 20,\n 'height': 172,\n 'weight': 48,\n 'gender': 1} \n\n # trees = build_an_extra_tree_ensemble(S, 'classification', 1, 2, 2)\n # print(classify_instance(inst, trees))\n \n ","sub_path":"regression_forest_non_oblique.py","file_name":"regression_forest_non_oblique.py","file_ext":"py","file_size_in_byte":13206,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"251626803","text":"from typing import Iterator\nfrom unittest.mock import MagicMock, patch\n\nimport pytest\nfrom telegram import Update\nfrom telegram.ext import CallbackContext\n\nfrom pdf_bot.merge import MergeService\nfrom pdf_bot.models import FileData\nfrom pdf_bot.pdf import PdfService\nfrom pdf_bot.telegram import TelegramService, TelegramServiceError\n\nWAIT_MERGE_PDF = 0\nMERGE_PDF_DATA = \"merge_pdf_data\"\n\n\n@pytest.fixture(name=\"merge_service\")\ndef fixture_merge_service(pdf_service: PdfService, telegram_service: TelegramService):\n return MergeService(pdf_service, telegram_service)\n\n\n@pytest.fixture(scope=\"session\", autouse=True)\ndef merge_service_set_lang() -> Iterator[None]:\n with patch(\"pdf_bot.merge.merge_service.set_lang\"):\n yield\n\n\n@pytest.fixture(scope=\"session\", autouse=True)\ndef utils_set_lang() -> Iterator[None]:\n with patch(\"pdf_bot.utils.set_lang\"):\n yield\n\n\ndef test_ask_first_pdf(\n merge_service: MergeService,\n telegram_update: Update,\n telegram_context: CallbackContext,\n):\n actual = merge_service.ask_first_pdf(telegram_update, telegram_context)\n assert actual == WAIT_MERGE_PDF\n telegram_context.user_data.__setitem__.assert_called_with(MERGE_PDF_DATA, [])\n\n\ndef test_check_pdf(\n merge_service: MergeService,\n telegram_service: TelegramService,\n telegram_update: Update,\n telegram_context: CallbackContext,\n file_data: FileData,\n):\n file_data_list = MagicMock()\n telegram_context.user_data.__getitem__.return_value = file_data_list\n\n actual = merge_service.check_pdf(telegram_update, telegram_context)\n\n assert actual == WAIT_MERGE_PDF\n telegram_context.user_data.__getitem__.assert_called_with(MERGE_PDF_DATA)\n file_data_list.append.assert_called_once_with(file_data)\n telegram_service.send_file_names.assert_called_once()\n\n\ndef test_check_pdf_invlid_pdf(\n merge_service: MergeService,\n telegram_service: TelegramService,\n telegram_update: Update,\n telegram_context: CallbackContext,\n):\n telegram_service.check_pdf_document.side_effect = TelegramServiceError()\n\n actual = merge_service.check_pdf(telegram_update, telegram_context)\n\n assert actual == WAIT_MERGE_PDF\n telegram_context.user_data.__getitem__.assert_not_called()\n telegram_service.send_file_names.assert_not_called()\n","sub_path":"tests/merge/test_merge_service.py","file_name":"test_merge_service.py","file_ext":"py","file_size_in_byte":2291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"310247502","text":"# write a program to create a dictionary of players and their runs\n# and retrieve run scored by players\nx = {}\nn = int(input('enter howmany element:'))\nfor i in range(n):\n k = input('Enter players name:')\n v = int(input('Enter score:'))\n x.update({k:v})\n\nprint('the players are:', list(x.keys()))\nname = input('enter player name:')\nruns = x.get(name, -1)\nif(runs==-1):\n print('player not found')\nelse:\n print('{} made {} runs'.format(name, runs))\n \n\n","sub_path":"dict/demo3.py","file_name":"demo3.py","file_ext":"py","file_size_in_byte":470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"455417290","text":"from flask import *\nfrom extensions import db\nfrom werkzeug.utils import secure_filename\nimport os\nimport hashlib\n\nUPLOAD_FOLDER = os.path.join(os.getcwd(),'static/images/')\nALLOWED_EXTENSIONS = set(['png', 'jpg', 'bmp', 'gif', 'PNG', 'JPG', 'BMP', 'GIF'])\n\ndef allowed_file(filename):\n\treturn '.' in filename and filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS\n\nalbum = Blueprint('album', __name__, template_folder='templates')\n\n@album.route('/album/edit', methods=['GET','POST'])\ndef album_edit_route():\n\toptions = {\n\t\t\"edit\": True,\n\t}\n\tcur = db.cursor()\n\n\tif request.method == 'GET':\n\t\tif 'albumid' not in request.args:\n\t\t\tabort(404)\n\t\talbum_id = request.args.get('albumid')\n\t\tcur.execute(\"SELECT albumid FROM Album WHERE albumid= %s;\",(album_id,))\n\t\tif(len(cur.fetchall())==0):\n\t\t\tabort(404)\n\t\tcur.execute(\"SELECT * FROM Photo JOIN Contain ON Contain.picid = Photo.picid WHERE Contain.albumid = %s;\", (album_id,))\n\t\tresults = cur.fetchall()\n\n\t\toptions['pictures'] = results;\n\t\tfor picture in options['pictures']:\n\t\t\tpicture['route'] = 'images/' + picture['picid'] + '.' + picture['format']\n\t\toptions['album_id'] = album_id\n\n\telif request.method == 'POST':\n\t\talbum_id = request.form['albumid']\n\t\tif request.form['op'] == \"delete\":\n\t\t\tpic_id = request.form['picid']\n\t\t\tcur.execute(\"SELECT * FROM Photo WHERE picid = %s;\",(pic_id,))\n\t\t\tresults = cur.fetchone()\n\t\t\tpicformat = results['format']\n\t\t\tcur.execute(\"DELETE FROM Photo WHERE picid = %s;\",(pic_id,))\n\t\t\tos.remove(UPLOAD_FOLDER + pic_id + '.' + picformat)\n\t\t\tcur.execute(\"UPDATE Album SET lastupdated = CURRENT_TIMESTAMP WHERE albumid = %s;\",(album_id,))\n\t\t\tdb.commit()\n\t\t\treturn redirect(url_for('album.album_edit_route',albumid=album_id))\n\t\telif request.form['op'] == \"add\":\n\t\t\t# check if the post request has the file part\n\t\t\tif 'file' not in request.files:\n\t\t\t\tflash('No file part')\n\t\t\t\treturn redirect(url_for('album.album_edit_route',albumid=album_id))\n\t\t\tfile = request.files['file']\n\t\t\t# if user does not select file, browser also\n\t\t\t# submit a empty part without filename\n\t\t\tif file.filename == '':\n\t\t\t\tflash('No selected file')\n\t\t\t\treturn redirect(url_for('album.album_edit_route',albumid=album_id))\n\t\t\tif file and allowed_file(file.filename):\n\t\t\t\t#make hash\n\t\t\t\tm = hashlib.md5()\n\t\t\t\tdot = file.filename.find('.')\n\t\t\t\tflname = file.filename[0:dot-1]\n\t\t\t\tflformat = file.filename[dot+1:len(file.filename)]\n\t\t\t\tm.update(str(album_id))\n\t\t\t\tm.update(flname)\n\t\t\t\tpic_id = m.hexdigest()\n\n\t\t\t\t#find sequence number\n\t\t\t\tcur.execute(\"SELECT sequencenum FROM Contain;\")\n\t\t\t\tresults = cur.fetchall()\n\t\t\t\tfor obj in results:\n\t\t\t\t\tnumbers = [obj['sequencenum']]\n\t\t\t\tseqnum = max(numbers) + 1\n\n\t\t\t\tcur.execute(\"INSERT INTO Photo VALUES(%s, %s, CURRENT_TIMESTAMP);\",(str(pic_id),flformat))\n\t\t\t\tcur.execute(\"INSERT INTO Contain VALUES(%s, %s, %s, '');\",(seqnum, album_id, pic_id))\n\n\t\t\t\tcur.execute(\"UPDATE Album SET lastupdated = CURRENT_TIMESTAMP WHERE albumid = %s;\",(album_id,))\n\n\t\t\t\tfilename = secure_filename(pic_id + '.' + flformat)\n\t\t\t\tfile.save(UPLOAD_FOLDER + filename)\n\t\t\t\tdb.commit()\n\t\t\t\treturn redirect(url_for('album.album_edit_route',albumid=album_id))\n\n\treturn render_template(\"album.html\", **options)\n\n@album.route('/album', methods=['GET'])\ndef album_route():\n\toptions = {\n\t\t\"edit\": False\n\t}\n\tcur = db.cursor()\n\n\tif 'albumid' not in request.args:\n\t\tabort(404)\n\n\talbum_id = request.args.get('albumid')\n\tcur.execute(\"SELECT albumid FROM Album WHERE albumid= %s;\",(album_id,))\n\tif(len(cur.fetchall())==0):\n\t\tabort(404)\n\t\t\t\n\tcur.execute(\"SELECT * FROM Photo JOIN Contain ON Contain.picid = Photo.picid WHERE Contain.albumid = %s;\", (album_id,))\n\tresults = cur.fetchall();\n\n\toptions['pictures'] = results\n\toptions['album_id'] = album_id\n\n\tfor picture in options['pictures']:\n\t\t\tpicture['route'] = 'images/' + picture['picid'] + '.' + picture['format']\n\n\treturn render_template(\"album.html\", **options)\n\n","sub_path":"controllers/album.py","file_name":"album.py","file_ext":"py","file_size_in_byte":3869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"533206585","text":"# https://www.acmicpc.net/problem/2887\n\n# 풀이) 최소신장트리 - 크루스칼 알고리즘\nimport sys\ninput = sys.stdin.readline\n \ndef find_parent(parent, x):\n if parent[x] != x:\n parent[x] = find_parent(parent, parent[x])\n return parent[x]\n\ndef union_parent(parent, a, b):\n a = find_parent(parent, a)\n b = find_parent(parent, b)\n if a < b:\n parent[b] = a\n else:\n parent[a] = b\n\nn = int(input()) # 행성의 개수\nx = []\ny = []\nz = []\n\nfor i in range(n):\n data = list(map(int, input().split()))\n x.append((data[0], i))\n y.append((data[1], i))\n z.append((data[2], i))\n\nx.sort()\ny.sort()\nz.sort()\n\n\nedges = []\nfor i in range(n-1):\n x1, y1, z1 = x[i][0], y[i][0], z[i][0]\n x2, y2, z2 = x[i+1][0], y[i+1][0], z[i+1][0]\n \n edges.append((x2-x1, x[i][1], x[i+1][1]))\n edges.append((y2-y1, y[i][1], y[i+1][1]))\n edges.append((z2-z1, z[i][1], z[i+1][1]))\n\nparent = [0] * n\nfor i in range(n):\n parent[i] = i\n\n\nedges.sort()\n# print(edges)\nres = 0\nfor edge in edges:\n cost, a, b = edge\n if find_parent(parent, a) != find_parent(parent, b):\n union_parent(parent, a, b)\n res += cost\n\nprint(res)","sub_path":"Graph/행성터널.py","file_name":"행성터널.py","file_ext":"py","file_size_in_byte":1179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"596736253","text":"import itertools\n \ndef findsubsets(s, n):\n return list(itertools.combinations(s, n))\n\nnbre_test = int(input())\n\nfor t in range(1, nbre_test + 1):\n m = int(input())\n\n deck = {}\n set_pions = []\n for _ in range(m):\n p, n = tuple(input().split(\" \")) \n p, n = int(p), int(n)\n set_pions.append(p)\n deck[p] = n\n\n set_pions = set(set_pions)\n\n for i in range(1, len(deck)):\n liste_sousens_g = findsubsets(set_pions, i)\n liste_sousens_d = []\n for j in liste_sousens_g:\n liste_sousens_d.append(set_pions - set(j))\n\n print(\"\\n\\n\\nSous ensemble gauche : \")\n print(liste_sousens_g)\n\n print(\"\\nSous ensemble droit : \")\n print(liste_sousens_d)\n\n \"\"\"\n total_somme = 0\n total_produit = 0\n\n g1 = []\n g2 = []\n for i in deck:\n\n\n\n print(\"Case #{}: {}\".format(t, cout))\n \"\"\"","sub_path":"code_jam_21/prime_time.py","file_name":"prime_time.py","file_ext":"py","file_size_in_byte":885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"566305191","text":"import pandas as pd\n\nfrom .gene import Gene\n\ndef parse_csv_genes(stream): \n data = pd.read_csv(stream, chunksize=1000)\n\n index = 1\n for chunk in data:\n for _, gene_data in chunk.iterrows():\n yield index, Gene(name=gene_data[0], sequence=gene_data[1])\n index += 1","sub_path":"services/gene_parser/parsers/csv_parser.py","file_name":"csv_parser.py","file_ext":"py","file_size_in_byte":300,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"337608257","text":"from unittest import TestCase\nfrom unittest.mock import patch\nfrom inventory_management.electric_appliances_class import *\nfrom inventory_management.furniture_class import *\nfrom inventory_management.inventory_class import *\nfrom inventory_management.market_prices import *\nfrom inventory_management.main import *\n\n\nclass IntegrationTests(TestCase):\n \"\"\"Integration tests for inventory_management\"\"\"\n\n def test_main_add_inventory_integration(self):\n FULL_INVENTORY.clear()\n price = get_latest_price()\n\n # adding new item to inventory with main\n\n input_inventory = ['1', 'sushi', '1', 'n', 'n']\n item = Inventory('1', 'sushi', price, '1')\n with patch('builtins.input', side_effect=input_inventory):\n add_new_item()\n # Adding furniture item with main\n\n actual_inventory = FULL_INVENTORY\n expected_inventory = {\n '1': item.return_as_dictionary()}\n self.assertDictEqual(actual_inventory, expected_inventory)\n\n def test_main_add_furniture_integration(self):\n FULL_INVENTORY.clear()\n price = get_latest_price()\n\n input_furniture= ['2', 'table', '2', 'y', 'wood', 'S']\n item = Furniture('2', 'table', price, '2', 'wood', 'S')\n with patch('builtins.input', side_effect=input_furniture):\n add_new_item()\n\n actual_inventory = FULL_INVENTORY\n expected_inventory = {\n '2': item.return_as_dictionary()}\n self.assertDictEqual(actual_inventory, expected_inventory)\n\n # Adding electric appliance with main\n def test_main_add_electric_appliance_integration(self):\n FULL_INVENTORY.clear()\n price = get_latest_price()\n\n input_electric = ['3', 'phone', '3', 'n', 'y', 'Apple', '100']\n item = ElectricAppliances('3', 'phone', price, '3', 'Apple', '100')\n with patch('builtins.input', side_effect=input_electric):\n add_new_item()\n\n actual_inventory = FULL_INVENTORY\n expected_inventory = {\n '3': item.return_as_dictionary()}\n self.assertDictEqual(actual_inventory, expected_inventory)\n\n def test_mainmenu_q(self):\n '''test exiting programm'''\n with patch('builtins.input', side_effect=['q']):\n with self.assertRaises(SystemExit):\n exit_program()\n","sub_path":"students/Luyao_Xu/lesson01/assignment_1/test_integration.py","file_name":"test_integration.py","file_ext":"py","file_size_in_byte":2334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"424280309","text":"def bfs(start):\n # 방문한 정점 체크\n global V\n visited = [0 for _ in range(V + 1)]\n queue = []\n queue.append(start)\n\n while(len(queue) != 0):\n v = queue.pop(0)\n\n if visited[v] == 0:\n print(v, end=\" \")\n visited[v] = 1\n\n for next_v in matrix[v]:\n if visited[next_v] == 0:\n queue.append(next_v)\n\ndef dfs(v):\n if visited[v] == 1:\n return\n\n print(v, end=\" \")\n visited[v] = 1\n\n for next_v in matrix[v]:\n dfs(next_v)\n\n return\n\n\nV, E, start = map(int, input().split())\n# 1차원 배열 인덱스가 정점 번호. 2차원 배열에 저장된게 연결된 정점.\nmatrix = [[] for _ in range(V + 1)]\n# 방문한 정점 체크\nvisited = [0 for _ in range(V + 1)]\n\n#정점 입력\nfor _ in range(E):\n v1, v2 = map(int, input().split())\n matrix[v1].append(v2)\n matrix[v2].append(v1)\n\n#정점을 순서 정렬.\nfor i in range(len(matrix)):\n matrix[i].sort()\n\n\n#dfs, bfs 함수호출\ndfs(start)\nprint()\nbfs(start)\nprint()\n\n\n","sub_path":"1206.py","file_name":"1206.py","file_ext":"py","file_size_in_byte":1044,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"547549799","text":"from argparse import ArgumentParser\nfrom collections import OrderedDict\nimport logging\nimport os.path as osp\n\nimport numpy as np\nimport pytorch_lightning as pl\nfrom pytorch_lightning.root_module.root_module import LightningModule\nimport torch\nfrom torch import optim\nimport torch.nn.functional as F\nfrom torch.utils.data import DataLoader\nfrom torch.utils.data.distributed import DistributedSampler\nimport torchvision.transforms as transforms\n\nfrom par.common import backbones\nfrom par.common.layers.classifier import make_classifier\nfrom par.common.dataset.dataset import Dataset\nfrom par.common.metrics.example_based import example_based_metrics\nfrom par.common.metrics.label_based import compute_mean_accuracy\n\n\nclass Baseline(LightningModule):\n\n def __init__(self, hparams):\n super().__init__()\n self.hparams = hparams\n\n self.__build_model()\n\n if self.hparams.weighted_loss:\n positive_ratios = np.load(\n osp.join(self.hparams.data_dir, \"dataset\",\n \"positive_ratios.npy\"))\n self.weight_pos = torch.from_numpy(\n np.exp(1. - positive_ratios)).float()\n self.weight_neg = torch.from_numpy(\n np.exp(positive_ratios)).float()\n\n # ---------------------\n # MODEL SETUP\n # ---------------------\n def __build_model(self):\n self.backbone, feature_size = \\\n getattr(backbones, self.hparams.backbone)()\n\n self.classifier = make_classifier(\n self.hparams.backbone, feature_size, self.hparams.num_classes)\n\n # ---------------------\n # TRAINING\n # ---------------------\n def forward(self, x):\n x = self.backbone(x)\n x = F.adaptive_max_pool2d(x, 1)\n x = torch.flatten(x, 1)\n x = F.dropout(x, self.hparams.dropout, training=self.training)\n x = self.classifier(x)\n return x\n\n def criterion(self, outputs, labels):\n if self.hparams.weighted_loss:\n weight = torch.where(\n labels.cpu() == 1, self.weight_pos, self.weight_neg)\n\n if self.on_gpu:\n weight = weight.cuda(outputs.device.index)\n else:\n weight = None\n\n loss = F.binary_cross_entropy_with_logits(outputs, labels, weight)\n\n return loss\n\n def predict(self, outputs):\n if self.on_gpu:\n outputs = outputs.cuda(outputs.device.index)\n outputs = outputs.detach()\n predictions = (torch.sigmoid(outputs) > 0.5).cpu().numpy()\n return predictions\n\n def training_step(self, batch, batch_idx):\n x, y = batch\n\n outputs = self.forward(x)\n\n loss = self.criterion(outputs, y)\n\n predictions = self.predict(outputs)\n mA = compute_mean_accuracy(predictions, y.cpu().numpy()).mean()\n\n tqdm_dict = {'train_mA': mA}\n output = OrderedDict({\n 'loss': loss,\n 'progress_bar': tqdm_dict,\n 'log': tqdm_dict\n })\n\n return output\n\n def validation_step(self, batch, batch_idx):\n x, y = batch\n outputs = self.forward(x)\n\n loss = self.criterion(outputs, y)\n\n predictions = self.predict(outputs)\n\n output = OrderedDict({\n 'loss': loss,\n 'predictions': predictions,\n 'labels': y.cpu().numpy()\n })\n\n return output\n\n def validation_end(self, outputs):\n avg_loss = 0\n predictions = []\n labels = []\n for output in outputs:\n loss = output['loss']\n avg_loss += loss\n\n predictions.extend(output['predictions'])\n labels.extend(output['labels'])\n\n avg_loss /= len(outputs)\n\n predictions = np.array(predictions)\n labels = np.array(labels)\n\n mA = compute_mean_accuracy(predictions, labels).mean()\n\n accuracy, precision, recall, f1 = \\\n example_based_metrics(predictions, labels)\n\n tqdm_dict = {\n 'val_loss': avg_loss,\n 'val_mA': mA,\n 'val_acc': accuracy,\n 'val_prec': precision,\n 'val_recall': recall,\n 'val_f1': f1,\n }\n\n result = {\n 'progress_bar': tqdm_dict,\n 'log': tqdm_dict,\n 'val_loss': avg_loss}\n\n return result\n\n # ---------------------\n # TRAINING SETUP\n # ---------------------\n def configure_optimizers(self):\n optimizer = optim.SGD(self.parameters(), self.hparams.lr,\n momentum=self.hparams.momentum,\n weight_decay=self.hparams.weight_decay,\n nesterov=True)\n\n scheduler = optim.lr_scheduler.CosineAnnealingLR(\n optimizer, T_max=self.hparams.max_nb_epochs)\n\n return [optimizer], [scheduler]\n\n def __dataloader(self, train):\n normalize = [transforms.ToTensor(),\n transforms.Normalize(mean=(0.485, 0.456, 0.406),\n std=(0.229, 0.224, 0.225))]\n\n if train:\n transform = transforms.Compose([\n transforms.RandomHorizontalFlip(),\n transforms.Resize((256, 128)),\n transforms.Pad(10),\n transforms.RandomCrop((256, 128)),\n ] + normalize)\n else:\n transform = transforms.Compose([\n transforms.Resize((256, 128))\n ] + normalize)\n\n dataset = Dataset(root=self.hparams.data_dir,\n split='train' if train else 'test',\n transform=transform)\n\n # When using multi-node (ddp) we need to add the datasampler\n if self.use_ddp:\n sampler = DistributedSampler(dataset, shuffle=train)\n else:\n sampler = None\n\n loader = DataLoader(dataset=dataset,\n batch_size=self.hparams.batch_size,\n shuffle=train and sampler is None,\n sampler=sampler,\n num_workers=self.hparams.num_workers,\n pin_memory=True,\n worker_init_fn=_init_fn)\n\n return loader\n\n @pl.data_loader\n def train_dataloader(self):\n logging.info('training data loader called')\n return self.__dataloader(train=True)\n\n @pl.data_loader\n def val_dataloader(self):\n logging.info('val data loader called')\n return self.__dataloader(train=False)\n\n @staticmethod\n def add_model_specific_args(parent_parser):\n parser = ArgumentParser(parents=[parent_parser])\n\n parser.add_argument('-backbone', default='resnet50', type=str)\n parser.add_argument('-batch_size', default=64, type=int)\n parser.add_argument('-dropout', default=0., type=float)\n parser.add_argument('-lr', default=0.01, type=float)\n parser.add_argument('-momentum', default=0.9, type=float)\n parser.add_argument('-num_classes', default=51, type=int)\n parser.add_argument('-num_workers', default=8, type=int)\n parser.add_argument('-weight_decay', default=0.0005, type=float)\n parser.add_argument('--weighted_loss', action='store_true')\n\n return parser\n\n\ndef _init_fn(worker_id):\n np.random.seed(0)\n","sub_path":"par/implementations/baseline/baseline.py","file_name":"baseline.py","file_ext":"py","file_size_in_byte":7315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"225099822","text":"import argparse\n\nfrom gensim.models.word2vec import LineSentence, Word2Vec\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\n description='Vector Space Traininng')\n parser.add_argument('-C', '--corpus', required=True,\n help='Corpus file')\n parser.add_argument('-m', '--model', required=True,\n help='Output model file')\n args = parser.parse_args()\n\n sentences = args.corpus\n model = Word2Vec(\n sentences=sentences, size=1000, alpha=0.025, window=5, min_count=2,\\\n seed=1, workers=1, min_alpha=0.0001, sg=1, hs=1, negative=0,\\\n cbow_mean=0)\n model.build_vocab(sentences)\n model.train(sentences)\n model.save(args.model)\n\n","sub_path":"coding/trainn_model.py","file_name":"trainn_model.py","file_ext":"py","file_size_in_byte":735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"380797856","text":"from __future__ import print_function as _a, unicode_literals as _b\n\ndef eclipse_sync_app():\n prefix_len = len(r'eclipse')\n from pathlib import Path\n workspace = Path(__file__).absolute().parent.parent.parent.parent\n zall = workspace.glob(r'eclipse.*')\n zall = tuple(zall)\n new_prefix = r'com.appspot.x19290'\n bundle_names = tuple(new_prefix + proj.name[prefix_len:] for proj in zall)\n valid_names = set(bundle_names)\n for bundle_name, proj in zip(bundle_names, zall):\n mf = proj.joinpath(r'META-INF/MANIFEST.MF')\n with mf.open(r'r+t') as iostream:\n for error in _edit(bundle_name, iostream, valid_names):\n yield error\n\n_bundle_name_prefix = r'Bundle-SymbolicName: '\n_bundle_name_prefix_len = len(_bundle_name_prefix)\n_singleton = r';singleton:=true'\n_singleton_len = len(_singleton)\ndef _edit(bundle_name, iostream, valid_names):\n #{pylint: disable=import-error\n from ..lib.configparser import ConfigParser\n from ..lib.stringio import StringIO\n #}pylint: enable=import-error\n from shutil import copyfileobj\n b = StringIO()\n p = ConfigParser()\n p.readfp_no_section(iostream)\n for k in p.options(r'no-section'):\n v = p.get_cooked(r'no-section', k)\n if k == r'Bundle-SymbolicName':\n v = bundle_name + (r'', _singleton)[v.endswith(_singleton)]\n if k == r'Require-Bundle':\n for error in _all_valid(bundle_name, v, valid_names):\n yield error\n if r',' in v:\n v = ',\\n '.join(sorted(v.split(r','), key=_pre_semicolon))\n print(k, v, file=b, sep=r': ')\n iostream.seek(0)\n orig = iostream.read()\n if b.getvalue() == orig:\n return\n b.seek(0)\n iostream.seek(0)\n iostream.truncate(0)\n copyfileobj(b, iostream)\n\ndef _all_valid(bundle_name, v, valid_names):\n for required_plus_alpha in v.split(r','):\n semi = required_plus_alpha.rfind(r';')\n required = required_plus_alpha\n if 0 <= semi:\n required = required_plus_alpha[:semi]\n else:\n yield r';', bundle_name, required\n if not required.startswith(r'com.appspot.x19290.'):\n continue\n if required not in valid_names:\n yield r'?', bundle_name, required\n\ndef _pre_semicolon(s):\n semi = s.find(r';')\n rv = s[:semi] if 0 <= semi else s\n return rv","sub_path":"x19290/app/eclipsesyncapp.py","file_name":"eclipsesyncapp.py","file_ext":"py","file_size_in_byte":2378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"371455370","text":"import logging\nimport os\nimport sys\n\nfrom foxylib.tools.file.file_tools import FileToolkit\nfrom foxylib.tools.log.logger_tools import FoxylibLogger, LoggerToolkit\nfrom functools import reduce, lru_cache\n\nFILE_PATH = os.path.realpath(__file__)\nREPO_DIR = reduce(lambda x,f:f(x), [os.path.dirname]*3, FILE_PATH)\nLOG_DIR = os.path.join(REPO_DIR,\"log\")\n\nclass HenriqueLogger:\n ROOTNAME = \"henrique\"\n level = logging.DEBUG\n\n @classmethod\n def dirpath(cls): return LOG_DIR\n\n @classmethod\n def _rootname_list(cls):\n return [FoxylibLogger.ROOTNAME, cls.ROOTNAME]\n\n @classmethod\n def attach_handler2loggers(cls, handler):\n for rootname in cls._rootname_list():\n logger = logging.getLogger(rootname)\n LoggerToolkit.add_or_skip_handlers(logger, [handler])\n\n @classmethod\n @lru_cache(maxsize=None)\n def attach_filepath2loggers(cls, filepath):\n FileToolkit.dirpath2mkdirs(os.path.dirname(filepath))\n handler = LoggerToolkit.handler2formatted(LoggerToolkit.filepath2handler_default(filepath))\n handler.setLevel(cls.level)\n cls.attach_handler2loggers(handler)\n\n @classmethod\n @lru_cache(maxsize=2)\n def attach_stderr2loggers(cls,):\n handler = LoggerToolkit.handler2formatted(logging.StreamHandler(sys.stderr))\n handler.setLevel(cls.level)\n cls.attach_handler2loggers(handler)\n\n\n @classmethod\n def func2name(cls, func):\n return LoggerToolkit.rootname_func2name(cls.ROOTNAME, func)\n\n @classmethod\n def func2logger(cls, func):\n return cls.func_level2logger(func, cls.level)\n\n @classmethod\n def func_level2logger(cls, func, level):\n logger = logging.getLogger(cls.func2name(func))\n logger.setLevel(level)\n return logger\n\n @classmethod\n def filename2logger(cls, filename):\n logger = LoggerToolkit.rootname_filename2logger(cls.ROOTNAME, filename)\n logger.setLevel(cls.level)\n return logger\n","sub_path":"henrique/main/hub/logger/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":1976,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"82126626","text":"from tkinter import *\nfrom tkinter import ttk, filedialog, messagebox\nfrom tkinter.ttk import *\n# --\nimport xlrd\nfrom xlrd import open_workbook\n# --\nimport xlwt\nfrom xlwt import Workbook, easyxf\n# --\nfrom xlutils.copy import copy\n\nclass Root(Tk):\n def __init__(self):\n super(Root, self).__init__()\n self.title(\"Account Management\")\n self.geometry(\"800x600\")\n self.iconbitmap(\"banana.ico\") # Replace the Icon\n self.resizable(0, 0)\n self.style = Style()\n self.style.configure('TButton', font = ('fixedsys', 12,), borderwidth = '0')\n\n self.objectList = []\n self.sheetNumList = []\n self.data = [[]]\n self.counter = 1\n\n # Frames\n self.labelFrame = ttk.LabelFrame(self, text = \"Open a File\", width=30, height=30)\n self.labelFrame.grid(column=1, row=0, padx=30, pady=30) # 100 30\n\n self.sheetEntry = ttk.LabelFrame(self, text = \"Sheet Number\")\n self.sheetEntry.grid(column=2, row=0, padx=35, pady=30)\n\n self.addBtnFrame = ttk.LabelFrame(self, text=\"add file\".title())\n self.addBtnFrame.grid(column=3, row=0, padx=30)\n\n self.fileNameFrame = ttk.LabelFrame(self, text=\"Object's Name\")\n self.fileNameFrame.grid(column=0, row=0, padx=30)\n\n self.objectListFrame = ttk.LabelFrame(self, text=\"Files\")\n self.objectListFrame.grid(column=1, row=2)\n\n self.startBtnFrame = ttk.LabelFrame(self, text=\"Start Calculation\")\n self.startBtnFrame.grid(column=1, row=3)\n\n # General\n self.sheetNum = Entry(self.sheetEntry) # Sheet Number Entry\n self.sheetNum.grid()\n\n self.addBtn = Button(self.addBtnFrame, text=\"Add\", command=self.addObject) # Add Button\n self.addBtn.grid()\n\n self.startBtn = ttk.Button(self.startBtnFrame, text=\"Start\", command=self.readExcel) # Start Button\n self.startBtn.grid()\n\n self.openFileBtn() # Browse File\n\n\n def openFileBtn(self):\n self.button = ttk.Button(self.labelFrame, text = \"Browse a File\", command=self.fileDialog)\n self.button.grid(column=0, row=0)\n\n def fileDialog(self):\n # File Dialog method, asks the user for the file path.\n self.filename = filedialog.askopenfilename(initialdir = \"/\", title=\"Select a File\", filetype = ((\".xlsx\", \"*.xlsx\"), (\".xls\", \"*.xls\")))\n print(self.filename)\n if self.filename != \"\":\n self.file = Label(root, text=self.filename)\n self.file.grid(column=2, row=1)\n else:\n messagebox.showinfo(\"Invalid Input\", \"Enter a valid file path\")\n\n def addObject(self):\n # Takes Object Name and Sheet Number and than displays it.\n try:\n if (int(self.sheetNum.get()) or int(self.sheetNum.get()) == 0):\n try:\n if self.filename != \"\": # Bug\n if len(self.objectList) <= 19:\n self.theList = self.filename.split(\"/\")\n self.row = Label(self.objectListFrame, text=f\"{self.counter}) {self.theList[-1]}, {abs(int(self.sheetNum.get()))}\")\n self.objectList.append(self.filename)\n self.sheetNumList.append(abs(int(self.sheetNum.get())))\n self.row.grid()\n self.counter+=1\n else:\n messagebox.showerror(\"Error Occurred\", \"Maximum list objects is 20.\")\n else:\n messagebox.showerror(\"Error Occurred\", \"Enter the file's path.\")\n except AttributeError:\n messagebox.showerror(\"Error Occurred\", \"Enter a file path.\")\n except ValueError:\n messagebox.showerror(\"Error Occurred\", \"Enter a valid sheet number.\")\n\n\n def readExcel(self):\n # This method triggers the Start button.\n\n if len(self.objectList) >= 1: # Update\n print(\"0--+--0\")\n\n for i in range(len(self.objectList)):\n print(\"------------\")\n self.book = xlrd.open_workbook(self.objectList[i])\n self.sheet = self.book.sheet_by_index(self.sheetNumList[i])\n\n self.titles = self.sheet.row_values(0)\n\n\n for x in range(self.sheet.ncols):\n for row in range(self.sheet.nrows):\n for column in range(self.sheet.ncols):\n self.cell = self.sheet.cell(row, column).value\n self.data[x][x].append(self.cell)\n print(self.data)\n\n else:\n messagebox.showerror(\"Error Occurred\", \"Enter at least 2 files!\")\n\n\n\nif __name__ == \"__main__\":\n root = Root()\n root.mainloop()\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"206968901","text":"import re\nimport logging\nfrom datetime import datetime\nimport pandas as pd\nimport numpy as np\nfrom progress.bar import Bar\n\n# Since the database is generally unclean theses are a group of functions that are used repeatedly\nimport rr_fun\n\nimport boto3 #AWS\nimport botocore\n\ns3 = boto3.client('s3')\nBUCKET_NAME = 'fabiano-crm-consys'\n\n# Import experian data\nexperian = pd.read_csv(\n 'imports/experian2.csv',\n index_col=False,\n low_memory=False,\n encoding=\"ISO-8859-1\",\n)\n\n# Import latest deals file\ncontacts = s3.get_object(Bucket=BUCKET_NAME, Key='DUMP/Deals1.csv')\ncontacts = pd.read_csv(\n contacts['Body'],\n index_col=False,\n low_memory=False,\n encoding=\"ISO-8859-1\",\n)\n\n# Drop extra fields\ncontacts = contacts[['dealid', 'ClientName', 'StreetAddress', 'ZipCode', 'City', 'State', 'HomePhone', 'SecondaryPhone', 'EmailAddress', 'status']]\n\n# Only working with lead cloud status\ncontacts = contacts[contacts[\"status\"].str.contains('1|2|5a')==True]\ncontacts = contacts.loc[-contacts['status'].isin([False])]\nall_lead = contacts['dealid'].unique().tolist()\n\n# Clean names\ncontacts['Full Name'] = contacts['ClientName'].apply(rr_fun.clean_client_name)\ncontacts['First Name'], contacts['Last Name'] = contacts['Full Name'].str.split(' ', 1).str\n\n# ========= Experian ==========\n\n# Only dealids with that match status\nexperian = experian.loc[experian['dealid'].isin(all_lead)]\n\n# Remove rows without address\nexperian = experian.dropna(subset=['RA RETURNED A1'])\nexperian = experian.dropna(subset=['EMAILADDRESS'])\n\n# Lowercase email and address\nexperian['EMAILADDRESS'] = experian['EMAILADDRESS'].str.lower()\nexperian['RA RETURNED A1'] = experian['RA RETURNED A1'].str.lower()\nexperian['RA RETURNED FIRST NAME'] = experian['RA RETURNED FIRST NAME'].str.title()\nexperian['RA RETURNED LAST NAME'] = experian['RA RETURNED LAST NAME'].str.title()\n\n# Get unique phone and emails and save in memory\nunique_email = experian['EMAILADDRESS'].unique().tolist()\nunique_phone = experian['PHONE'].unique().tolist()\nunique_address = experian['RA RETURNED A1'].unique().tolist()\n\n# Clean name and addresses\nexperian['Address'] = experian['RA RETURNED A1'].str.title()\nexperian['City'] = experian['RA RETURNED CITY'].str.title()\n\ncontact_names = contacts[['dealid', 'ClientName', 'Full Name', 'First Name', 'Last Name', 'status']]\n\nexperian = pd.merge(experian, contact_names, how='left', left_on='dealid', right_on='dealid')\n\nlist_of_experian = experian['dealid'].unique().tolist()\n\nexperian = experian[['dealid', 'status', 'Full Name', 'First Name', 'Last Name', 'Address', 'City', 'RA RETURNED STATE', 'RA RETURNED ZIP', 'RA RETURNED FIRST NAME', 'RA RETURNED LAST NAME', 'ClientName']]\n\nexperian.to_csv('./Exports/export-experian.csv', index=False)\n\n\n# ============ CRM ============ \n\n# Lowercase email and address\ncontacts['EmailAddress'] = contacts['EmailAddress'].str.lower()\ncontacts['StreetAddress'] = contacts['StreetAddress'].str.lower()\n\nprint(\"Entire:\", contacts.shape[0])\n# Drop leads that already exist in experian\ncontacts = contacts.loc[-contacts['dealid'].isin(list_of_experian)]\nprint(\"Entire minus experian:\", contacts.shape[0])\n\n# Drop rows with blank address\ncontacts = contacts.dropna(subset=['StreetAddress'])\nprint(\"No Street Address:\", contacts.shape[0])\n\n# A lot of dirt\ncontacts = contacts[contacts[\"StreetAddress\"].str.contains('will provide|Refused')==False]\ncontacts = contacts.loc[-contacts['StreetAddress'].isin([False])]\nprint(\"Without Dirt:\", contacts.shape[0])\n\n# Eh? No canada\ncontacts = contacts[contacts[\"City\"].str.contains('Canada|CANADA|canada')==False]\ncontacts = contacts[contacts[\"State\"].str.contains('Canada|CANADA|canada')==False]\ncontacts = contacts.loc[-contacts['City'].isin([False])]\ncontacts = contacts.loc[-contacts['State'].isin([False])]\nprint(\"No Canada:\", contacts.shape[0])\n\n# Clean phone number to only digits\ncontacts['HomePhone'] = contacts['HomePhone'].apply(rr_fun.clean_number)\ncontacts['SecondaryPhone'] = contacts['SecondaryPhone'].apply(rr_fun.clean_number)\n\n# Remove rows that match the list of phone # and emails\ncontacts = contacts.loc[-contacts['HomePhone'].isin(unique_phone)]\nprint(\"Home:\", contacts.shape[0])\n\ncontacts = contacts.loc[-contacts['SecondaryPhone'].isin(unique_phone)]\nprint(\"Secondary:\", contacts.shape[0])\n\ncontacts = contacts.loc[-contacts['EmailAddress'].isin(unique_email)]\nprint(\"Email:\", contacts.shape[0])\n\n# Lowercase email and address\ncontacts['StreetAddress'] = contacts['StreetAddress'].str.lower()\n\n# Drop address that show up in Experian\ncontacts = contacts.loc[-contacts['StreetAddress'].isin(unique_address)]\n\n# Title the streets\ncontacts['StreetAddress'] = contacts['StreetAddress'].str.title()\nprint(\"Address:\", contacts.shape[0])\n\nprint(\"Only 1,2,5a:\", contacts.shape[0])\nprint(len(contacts['StreetAddress'].unique().tolist()))\nprint(len(contacts['HomePhone'].unique().tolist()))\nprint(len(contacts['EmailAddress'].unique().tolist()))\n\ncontacts = contacts[['dealid', 'status', 'ClientName', 'Full Name', 'First Name', 'Last Name','StreetAddress', 'City', 'State', 'ZipCode']]\n\ncontacts.to_csv('./Exports/export-addresses.csv', index=False)","sub_path":"RR-Mailing.py","file_name":"RR-Mailing.py","file_ext":"py","file_size_in_byte":5149,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"97619845","text":"#!/usr/bin/python\n#-*-coding:utf-8 -*-\n# author: zhou yong xia\n# creation date: 2018-09-12\n\n\nimport socket\nimport cv2\nimport sys\nfrom SocketDataTransfer import SocketDataTransfer\nfrom SocketServerForReceive import SocketServerForReceive\n\n\nif __name__ == '__main__':\n strmsg = input(\"choose the role of this program as a server or a client: \\\n \\ns for server to receive a image, \\\n c for client to send a image:\")\n\n if( strmsg.startswith(\"s\")):\n st = SocketDataTransfer()\n st.start_server('', 8010, SocketServerForReceive)\n\n else:\n img = cv2.imread('/home/zyx/图片/c1s1_000151.jpg')\n\n st = SocketDataTransfer()\n st.connect_to_server('192.168.20.191', 8010)\n\n st.send_data_type('image')\n st.send_image(img)\n\n st.close()\n\n\n\ndef send_video(sock, video_source):\n # 从摄像头采集图像\n capture = cv2.VideoCapture(video_source)\n if not capture.isOpened():\n print(\"video cannot be opened\")\n return\n ret, frame = capture.read()\n encode_param=[int(cv2.IMWRITE_JPEG_QUALITY), 90] #设置编码参数\n\n # send images one by one\n while ret:\n # send one frame\n #send_image_data_one_by_one(sock, frame)\n send_image(sock, frame)\n\n ret, frame = capture.read()\n cv2.imshow('sending', frame)\n if cv2.waitKey(10) == 27:\n break\n # 接收对方发送的返回信息 ok\n data_r = sock.recv(2)\n print(data_r)\n\n sock.close()\n cv2.destroyAllWindows()\n\n\n# 接受图片大小的信息\ndef recv_size(sock, count):\n buf = b''\n while count:\n newbuf = sock.recv(count)\n if not newbuf:\n return None\n buf += newbuf\n count -= len(newbuf)\n return buf\n\n\n# 接收图片\n#compatibleCplusplus: compatible with c++\ndef recv_image(sock, count, compatibleCplusplus=0):\n buf = ''\n if compatibleCplusplus:\n # 这里每次只接收一个字节的原因是增强python与C++的兼容性\n # python可以发送任意的字符串,包括乱码,但C++发送的字符中不能包含'\\0',也就是字符串结束标志位\n while count:\n newbuf = sock.recv(1)\n if not newbuf:\n return None\n buf += newbuf\n count -= len(newbuf)\n else:\n newbuf = sock.recv(count)\n if not newbuf:\n return None\n buf += newbuf\n return buf\n\n\n# receive one image:\n# firstly, receive the size of image bytes, then receive image data\n# return the received image\ndef recv_image(conn):\n length = recv_size(conn, 16) # 首先接收来自客户端发送的大小信息\n print(\"length=\", length)\n if isinstance(length, str): # 若成功接收到大小信息,进一步再接收整张图片\n stringData = recv_image(conn, int(length))\n data = numpy.fromstring(stringData, dtype='uint8')\n decimg = cv2.imdecode(data, 1) # 解码处理,返回mat图片\n return decimg\n\n\ndef receive_video(conn):\n while 1:\n decimg = recv_image(conn)\n\n cv2.imshow('receiving', decimg)\n if cv2.waitKey(10) == 27:\n break\n print('Image recieved successfully!')\n conn.send(\"ok\")\n if cv2.waitKey(10) == 27:\n break\n cv2.destroyAllWindows()\n\n\nif __name__ == '__main__':\n str = input(\"choose the role of this program as a server or a client: \\\n \\ns for server to receive a video, c for client to send a video:\")\n\n if( str.startswith(\"s\")):\n # socket.AF_INET用于服务器与服务器之间的网络通信\n # socket.SOCK_STREAM代表基于TCP的流式socket通信\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n # 设置地址与端口,如果是接收任意ip对本服务器的连接,地址栏可空,但端口必须设置\n address = ('', 8010)\n s.bind(address) # 将Socket(套接字)绑定到地址\n s.listen(True) # 开始监听TCP传入连接\n print('Waiting for images...')\n\n # 接受TCP链接并返回(conn, addr),其中conn是新的套接字对象,可以用来接收和发送数据,addr是链接客户端的地址。\n conn, addr = s.accept()\n\n receive_video(conn)\n s.close()\n\n else:\n try:\n # send video by the following socket\n # socket.AF_INET用于服务器与服务器之间的网络通信\n # socket.SOCK_STREAM代表基于TCP的流式socket通信\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n # 连接服务端\n address_server = ('192.168.20.191', 8010)\n sock.connect(address_server)\n except socket.error as msg:\n print(msg)\n sys.exit(1)\n\n send_video(sock, \"/home/zyx/视频/basketball.mp4\")\n sock.close()\n\n","sub_path":"transfer_video.py","file_name":"transfer_video.py","file_ext":"py","file_size_in_byte":4847,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"386658559","text":"\"\"\"Tests for finding untranslated prose.\n\"\"\"\nimport os\nimport sys\nimport unittest\nimport xml.sax\nimport io\nimport i18ndude.untranslated\nfrom argparse import Namespace\nfrom i18ndude.script import find_untranslated as script\nfrom i18ndude.tests.utils import suppress_stdout\n\nTEST_DIR = os.path.dirname(__file__)\nPY3 = sys.version_info > (3,)\nif PY3:\n unicode = str\n unicode_StringIO = io.StringIO\nelse:\n import StringIO\n # io.StringIO in python2 (but not in python3) raises:\n # TypeError: unicode argument expected, got 'str'\n unicode_StringIO = StringIO.StringIO\n\n\ndef find_untranslated(input):\n out = unicode_StringIO()\n parser = xml.sax.make_parser(['expat'])\n handler = i18ndude.untranslated.VerboseHandler(parser, out)\n parser.setContentHandler(handler)\n parser.parse(unicode_StringIO(input))\n return out.getvalue()\n\n\nclass TestUntranslated(unittest.TestCase):\n\n def test_untranslated_content(self):\n \"\"\"\n find-untranslated can find strings missing the i18n:translate marker\n and it will show an error.\n \"\"\"\n result_with_errors = find_untranslated('

foo

')\n self.assertIn(\n 'i18n:translate missing for this:\\n\"\"\"\\nfoo\\n\"\"\"',\n result_with_errors)\n self.assertIn(\n '(0 warnings, 1 errors)',\n result_with_errors)\n\n def test_untranslated(self):\n \"\"\"\n find-untranslated finds no error if the i18n:translate marker is set.\n \"\"\"\n result_without_errors = find_untranslated(\n '

foo

')\n self.assertNotIn(\n 'i18n:translate missing',\n result_without_errors)\n self.assertIn('(0 warnings, 0 errors)', result_without_errors)\n\n def test_ignore_untranslated_with_marker(self):\n \"\"\"\n Adding the i18n:ignore marker will skip untranslated strings.\n \"\"\"\n result_with_marker = find_untranslated(\n '

foo

')\n self.assertIn('(0 warnings, 0 errors)', result_with_marker)\n\n def test_ignore_untranslated_attribute(self):\n \"\"\"\n Attributes missing the i18n:attributes marker will cause\n find-untranslated to show an error.\n\n Attributes marked with i18n:ignore-attributes will cause\n find-untranslated to not show an error.\n \"\"\"\n result_without_attributes = find_untranslated(\n '')\n self.assertIn(\n 'title attribute of lacks i18n:attributes',\n result_without_attributes)\n self.assertIn('(0 warnings, 1 errors)', result_without_attributes)\n\n result_with_ignore_attributes = find_untranslated(\n '''''')\n self.assertIn('(0 warnings, 0 errors)', result_with_ignore_attributes)\n\n def test_ignore_untranslated_attribute_complain_about_other_attrs(self):\n \"\"\"\n find-untranslated will find an error if not all attributes are marked\n to be ignored.\n \"\"\"\n result_without_attributes = find_untranslated(\n '''
\"qux\"\n
''')\n self.assertIn(\n 'alt attribute of lacks i18n:attributes',\n result_without_attributes)\n self.assertIn('(0 warnings, 1 errors)', result_without_attributes)\n\n def test_ignore_untranslated_attribute_multiple_attrs(self):\n \"\"\"\n find-untranslated finds no error if multiple attributes are marked\n to be ignored.\n \"\"\"\n result_with_multiple_ignore_attributes = find_untranslated(\n '''
\"qux\"\n
''')\n self.assertIn('(0 warnings, 0 errors)',\n result_with_multiple_ignore_attributes)\n\n def test_find_untranslated_placeholder_attribute(self):\n result_with_untranslated_placeholder = find_untranslated(\n '
')\n self.assertIn(\n 'placeholder attribute of lacks i18n:attributes',\n result_with_untranslated_placeholder)\n\n def test_ignore_translated_placeholder_attribute(self):\n result_with_translated_placeholder = find_untranslated(\n '
')\n self.assertNotIn(\n 'placeholder attribute of lacks i18n:attributes',\n result_with_translated_placeholder)\n\n\nclass TestUntranslatedScript(unittest.TestCase):\n\n def test_script_template_1(self):\n path = os.path.join(TEST_DIR, 'input', 'test1.pt')\n with suppress_stdout():\n result = script(Namespace(\n silent=False, nosummary=False, files=[path]))\n self.assertEqual(result, 0)\n\n def test_script_template_3(self):\n path = os.path.join(TEST_DIR, 'input', 'test3.pt')\n with suppress_stdout():\n result = script(Namespace(\n silent=False, nosummary=False, files=[path]))\n self.assertEqual(result, 1)\n\n def test_script_template_4(self):\n path = os.path.join(TEST_DIR, 'input', 'test4.pt')\n output = unicode_StringIO()\n old_stdout = sys.stdout\n sys.stdout = output\n try:\n result = script(Namespace(\n silent=False, nosummary=False, files=[path]))\n finally:\n sys.stdout = old_stdout\n self.assertEqual(result, 1)\n # A specific line should be reported as missing an i18n.\n self.assertIn('{}:16'.format(path), output.getvalue())\n\n def test_script_directory(self):\n path = os.path.join(TEST_DIR, 'input')\n with suppress_stdout():\n result = script(Namespace(\n silent=False, nosummary=False, files=[path]))\n self.assertEqual(result, 2)\n","sub_path":"src/i18ndude/tests/test_untranslated.py","file_name":"test_untranslated.py","file_ext":"py","file_size_in_byte":6111,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"641380573","text":"#!/usr/bin/env python\n# ian.heywood@physics.ox.ac.uk\n\nimport matplotlib\nmatplotlib.use('agg')\n\nimport daskms as xms\nimport time\nimport sys\nimport pylab\nimport numpy\nimport ShadeMS\n\nlog = ShadeMS.log\n\ndef get_chan_freqs(myms):\n spw_tab = xms.xds_from_table(\n myms+'::SPECTRAL_WINDOW', columns=['CHAN_FREQ'])\n chan_freqs = spw_tab[0].CHAN_FREQ\n return chan_freqs\n\n\ndef get_field_names(myms):\n field_tab = xms.xds_from_table(\n myms+'::FIELD', columns=['NAME', 'SOURCE_ID'])\n field_ids = field_tab[0].SOURCE_ID.values\n field_names = field_tab[0].NAME.values\n return field_ids, field_names\n\n\ndef freq_to_wavel(ff):\n c = 299792458.0 # m/s\n return c/ff\n\n\ndef make_plot(data, data_xmin, data_xmax, data_ymin, data_ymax, xmin, xmax, ymin, ymax, xlabel, ylabel, title, pngname, bgcol, fontsize, figx=24, figy=12):\n\n def match(artist):\n return artist.__module__ == 'matplotlib.text'\n\n fig = pylab.figure(figsize=(figx, figy))\n ax = fig.add_subplot(111, facecolor=bgcol)\n ax.imshow(X=data, extent=[data_xmin, data_xmax, data_ymin, data_ymax],\n aspect='auto', origin='upper')\n ax.set_title(title)\n ax.set_xlabel(xlabel)\n ax.set_ylabel(ylabel)\n ax.plot(xmin,ymin,'.',alpha=0.0)\n ax.plot(xmax,ymax,'.',alpha=0.0)\n\n ax.set_xlim([numpy.min((data_xmin,xmin)),\n numpy.max((data_xmax,xmax))])\n\n ax.set_ylim([numpy.min((data_ymin,ymin)),\n numpy.max((data_ymax,ymax))])\n\n for textobj in fig.findobj(match=match):\n textobj.set_fontsize(fontsize)\n fig.savefig(pngname, bbox_inches='tight')\n return pngname\n\n\ndef now():\n # stamp = time.strftime('[%Y-%m-%d %H:%M:%S]: ')\n # msg = '\\033[92m'+stamp+'\\033[0m' # time in green\n stamp = time.strftime(' [%H:%M:%S] ')\n msg = stamp+' '\n return msg\n\n\ndef stamp():\n return str(time.time()).replace('.', '')\n\n\ndef blank():\n log.info('')\n\n\ndef fullname(shortname):\n fullnames = [('a', 'Amplitude', ''),\n ('p', 'Phase', '[rad]'),\n ('r', 'Real', ''),\n ('i', 'Imaginary', ''),\n ('t', 'Time', '[s]'),\n ('c', 'Channel', ''),\n ('f', 'Frequency', '[Hz]'),\n ('uv', 'uv-distance', '[wavelengths]'),\n ('u', 'u', '[wavelengths]'),\n ('v', 'v', '[wavelengths]')]\n for xx in fullnames:\n if xx[0] == shortname:\n fullname = xx[1]\n units = xx[2]\n return fullname, units\n","sub_path":"ShadeMS/shadeMS.py","file_name":"shadeMS.py","file_ext":"py","file_size_in_byte":2494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"184511545","text":"# -*- coding: utf-8 -*-\n\nimport os\nimport cv2\nimport numpy as np\nimport filter\nimport convolution\n\n\ndef main():\n\n original_images = ['Elegent_Girl.jpg']\n filter_names = ['Naive', 'Sharpness_Center', 'Sharpness_Edge', 'Edge_Detection_360_degree',\n 'Edge_Detection_45_degree', 'Embossing_45_degree', 'Embossing_Asymmetric',\n 'Averaging_Blur', 'Completed_Blur', 'Motion_Blur', 'Gaussian_Blur']\n\n # Choose the filter from list:'filter_names'\n filter_name = 'Naive'\n\n for original_image in original_images:\n original_image_path = os.path.join(os.getcwd()[:-3], 'Image_Origin/', original_image)\n img =cv2.imread(original_image_path, 3)\n\n filter_0, filter_1, filter_2 = filter.Filter(filter_name)\n\n img2 = np.zeros((424,600,3), dtype=np.float)\n\n for i in range(1,423,1):\n for j in range(1,599,1):\n\n img2[i][j][0] = convolution.conv(img, filter_0, i, j)\n img2[i][j][1] = convolution.conv(img, filter_1, i, j)\n img2[i][j][2] = convolution.conv(img, filter_2, i, j)\n\n generated_image = filter_name + '.jpg'\n\n generated_image_path = os.path.join(os.getcwd()[:-3], 'Image_Generated/', generated_image)\n cv2.imwrite(generated_image_path,img2)\n img_show = cv2.imread(generated_image_path)\n\n cv2.imshow(generated_image, img_show)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"605640400","text":"import json\nimport base64\nimport time\nimport random\nimport requests\nfrom urllib.parse import urlparse, urlencode\nimport igetget_config\nimport lsieun_util\n\n\ndef get_raw_headers() -> dict:\n file_path = igetget_config.IGETGET_HEADERS_PATH\n line_list = lsieun_util.read_line_list(file_path)\n json_str = line_list[0]\n\n headers = json.loads(json_str)\n return headers\n\n\ndef update_headers(headers: dict, content_type: str, timestamp: str, nonce: str, sign: str) -> None:\n \"\"\"\n 更新Request的headers\n\n :param headers:\n :param content_type: application/x-www-form-urlencoded 或\n :param timestamp:\n :param nonce:\n :param sign:\n :return:\n \"\"\"\n headers[\"Content-Type\"] = content_type\n headers[\"G-Auth-Ts\"] = timestamp\n headers[\"G-Auth-Nonce\"] = nonce\n headers[\"G-Auth-Sign\"] = sign\n\n\ndef current_timestamp():\n t = time.time()\n seconds = int(round(t))\n return str(seconds)\n\n\ndef get_random_char():\n char_list = [\"0\", \"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\", \"a\", \"b\", \"c\", \"d\", \"e\", \"f\"]\n index = random.randint(0, len(char_list)-1)\n return char_list[index]\n\n\ndef get_nonce(count = 16):\n nonce = \"\"\n\n num = 0\n while num < count:\n ch = get_random_char()\n nonce += ch\n num = num + 1\n\n return nonce\n\n\ndef generate_sign(encoded_path: str, method: str,\n encoded_query: str, content_type: str,\n payload: str, timestamp: str,\n nonce: str, token: str, verbose=True):\n \"\"\"\n encoded_path = \"/parthenon/v1/articleaudio/listall\"\n method = \"POST\"\n encoded_query = \"\"\n content_type = \"application/x-www-form-urlencoded\"\n body_param = \"column_id=20&count=20&max_id=0&order=1§ion=0&since_id=0\"\n timestamp = \"1531555906\"\n nonce = \"3ebcd8a61e161b4c\"\n token = \"eyJhbGciOiJIUzUxMiIsInR5cCI6IkpXVCJ9.eyJhdWQiOiJsdW9qaWxhYi5jb20iLCJleHAiOjE1MzI4NTE4ODUsImlhdCI6MTUzMTU1NTg4NSwiaXNzIjoiRWFzZUdhdGV3YXkgSldUQXV0aCBQbHVnaW4iLCJuYmYiOjE1MzE1NTU4ODUsInN1YiI6IjE5MDcyNDgifQ.9baS3uzV_YnO7HxdfrG7IoQtppWA8K398lare2mUVsupqUtQ6OX4rI6QtkxqMgCoNYvpD1vKOQKVOLIKDyQlxg\"\n \"\"\"\n # 拼接字符串\n conn_str = \"\"\n conn_str = conn_str + encoded_path + \"\\n\"\n conn_str = conn_str + method + \"\\n\"\n conn_str = conn_str + encoded_query + \"\\n\"\n conn_str = conn_str + content_type + \"\\n\"\n conn_str = conn_str + payload + \"\\n\"\n conn_str = conn_str + timestamp + \"\\n\"\n conn_str = conn_str + nonce + \"\\n\"\n conn_str = conn_str + token\n\n # MD5加密\n # m = hashlib.md5()\n # m.update(conn_str.encode(\"UTF8\"))\n # md5_str = m.hexdigest()\n md5_str = lsieun_util.make_md5(conn_str)\n md5_bytes = md5_str.encode(\"UTF8\")\n\n # Base64编码\n base64_bytes = base64.b64encode(md5_bytes)\n base64_str = base64_bytes.decode(\"UTF8\")\n\n if verbose:\n print(\"=\"*30, \"sign\", \"=\"*30)\n print(\" conn_str = \\r\\n%s\" % conn_str)\n print(\" md5_str = %s\" % md5_str)\n print(\"base64_str = %s\" % base64_str)\n print(\"=\" * 66)\n\n return base64_str\n\n\ndef get_sign(token: str) -> str:\n param_string = \"dc923c14b6419aca91d8bb1e2e5e35e4\"\n sb = \"appid\" + \"1632426125495894021\" + \",\"\n sb += \"token\" + token + \",\" + param_string\n md5_str = lsieun_util.make_md5(sb)\n sign = md5_str[2:18]\n return sign\n\n\ndef post(url: str, post_data: dict, content_type: str, method=igetget_config.METHOD_POST, verbose=True) -> str:\n # (1)請求的url和data\n parse_result = urlparse(url)\n encoded_path = parse_result.path\n encoded_query = parse_result.query\n if content_type == igetget_config.CONTENT_TYPE_JSON:\n payload = json.dumps(post_data)\n else:\n payload = urlencode(post_data)\n\n if verbose:\n print(\"=\"*30, \"post\", \"=\"*30)\n print(\"url = {}\".format(url))\n print(\"encoded_path = {}\".format(encoded_path))\n print(\"encoded_query = {}\".format(encoded_query))\n print(\"payload = {}\".format(payload))\n print(\"=\" * 66)\n\n # (2)請求headers\n headers = get_raw_headers()\n token = headers.get(\"G-Auth-Token\")\n timestamp = current_timestamp()\n nonce = get_nonce()\n sign = generate_sign(encoded_path, method, encoded_query, content_type, payload, timestamp, nonce, token)\n update_headers(headers, content_type, timestamp, nonce, sign)\n\n if verbose:\n lsieun_util.view(headers, \"Request Headers\")\n\n # (3)發送請求,返回數據\n r = requests.post(url, data=payload.encode(\"UTF8\"), headers=headers)\n status_code = r.status_code\n response_headers = r.headers\n response_text = r.text\n if verbose:\n print(\"status_code = {}\".format(status_code))\n print(\"response_headers = {}\".format(response_headers))\n print(\"response_text = {}\".format(response_text))\n print(\"=\" * 60)\n\n return response_text\n\n\ndef get(url, verbose=True):\n headers = get_raw_headers()\n headers.pop(\"G-Auth-Sign\")\n headers.pop(\"X-UID\")\n headers.pop(\"G-Auth-Ts\")\n headers.pop(\"G-Auth-Token\")\n headers.pop(\"G-Auth-Nonce\")\n r = requests.get(url, headers)\n status_code = r.status_code\n response_headers = r.headers\n response_text = r.text\n if verbose:\n print(\"url = {}\".format(url))\n print(\"status_code = {}\".format(status_code))\n print(\"response_headers = {}\".format(response_headers))\n print(\"response_text = {}\".format(response_text))\n print(\"=\" * 60)\n\n return response_text\n\n\ndef get_data_list(response_text: str) -> list:\n d = json.loads(response_text)\n c = d.get(\"c\")\n lst = c.get(\"list\")\n return lst\n","sub_path":"python/app/igetget/version_mitmproxy/request_util.py","file_name":"request_util.py","file_ext":"py","file_size_in_byte":5599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"441031515","text":"import os\nfrom twilio.rest import Client\n\n\naccount_sid = \"AC60c06b56f769a6d030463edd5f9e2809\" #Your Twilio account ID\nauth_token = \"d6bfdd719f8f7aaa0ec98c8a2da1f815\" #Your secret API Token\n \nclient = Client(account_sid, auth_token)\n\n\nif __name__ == '__main__':\n print(\"SMS sent\")\n\n message = client.messages \\\n .create(\n body=\"Hello. Are you Evans Costa\",\n from_='+17122645494',\n to='+16479067569'\n )\n print(message.sid)","sub_path":"Test_Phone.py","file_name":"Test_Phone.py","file_ext":"py","file_size_in_byte":543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"488184980","text":"from django.urls import path\nfrom .views.user_views import landing_view, register, login_view, logout_view, access_denied\n\nurlpatterns = [\n path('', landing_view, name=\"landing_view\"),\n path('register/', register, name=\"register\"),\n path('login/', login_view, name=\"login\"),\n path('logout/', logout_view, name=\"logout\"),\n path('access_denied/', access_denied, name=\"access_denied\"),\n]","sub_path":"accounts/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"264131410","text":"# 测试代码可用性: 提取特征\n##\n# 图谱可视化:https://github.com/ownthink/KnowledgeGraphData ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n#https://www.ownthink.com/knowledge.html\n\n'''\n构建知识图谱的逻辑.\nalbert没法生成词向量. bert生成词向量.-------顺势函数用的cosin? 我记得用的cross entropy\n'''\n\n\n\n\n\n'''\n看看bert词向量的用法:\n\n\n\nhttps://github.com/hanxiao/bert-as-service/tree/master/server/bert_serving/server/bert\n\n\n\n\n\n\n\n'''\n\n\n\n\n\n\n\n\nfrom bert4keras.backend import keras\nfrom bert4keras.models import build_transformer_model\nfrom bert4keras.tokenizers import Tokenizer\nfrom bert4keras.snippets import to_array\nimport numpy as np\n\nconfig_path = '/mnt/chinese_L-12_H-768_A-12/bert_config.json'\ncheckpoint_path = '/mnt/chinese_L-12_H-768_A-12/bert_model.ckpt'\ndict_path = '/mnt/chinese_L-12_H-768_A-12/vocab.txt'\n\ntokenizer = Tokenizer(dict_path, do_lower_case=True) # 建立分词器\nmodel = build_transformer_model(config_path, checkpoint_path) # 建立模型,加载权重\n\n\n\n\n\n\nimport os, re, json\nimport json\nimport nmslib\nimport torch\nimport random\nimport pandas as pd\nfrom tqdm import tqdm\nimport numpy as np\n#import tensorflow_hub as hub\nfrom sklearn.feature_extraction.text import TfidfVectorizer,CountVectorizer\nfrom joblib import dump, load\nfrom string import punctuation\nfrom operator import itemgetter\nfrom functools import wraps\nfrom pytorch_pretrained_bert import BertModel, BertTokenizer, BertConfig\nfrom sklearn.metrics.pairwise import cosine_similarity\nfrom rusenttokenize import ru_sent_tokenize\n\n\ndef singleton(cls):\n instance = None\n @wraps(cls)\n def inner(*args, **kwargs):\n nonlocal instance\n if instance is None:\n instance = cls(*args, **kwargs)\n return instance\n return inner\n\n\nclass BertEmbedder(object):\n \"\"\"\n Embedding Wrapper on Bert Multilingual Cased\n \"\"\"\n\n def __init__(self, path=''):\n # use self.model_file with a path instead of 'bert-base-uncased' if you have a custom pretrained model\n self.model_file = 'bert-base-uncased' # os.path.join(path, \"bert-base-multilingual-cased.tar.gz\")\n self.vocab_file = 'bert-base-uncased-vocab.txt' # os.path.join(path, \"data_bert-base-multilingual-cased-vocab.txt\") # 'bert-base-uncased'\n self.model = self.bert_model()\n self.tokenizer = self.bert_tokenizer()\n self.embedding_matrix = self.get_bert_embed_matrix()\n\n @singleton\n def bert_model(self):\n model = BertModel.from_pretrained(self.model_file).eval()\n return model\n\n @singleton\n def bert_tokenizer(self):\n tokenizer = BertTokenizer.from_pretrained(self.vocab_file, do_lower_case=False)\n return tokenizer\n\n @singleton\n def get_bert_embed_matrix(self):\n bert_embeddings = list(self.model.children())[0]\n bert_word_embeddings = list(bert_embeddings.children())[0]\n matrix = bert_word_embeddings.weight.data.numpy()\n return matrix\n\n def sentence_embedding(self, text):\n token_list = self.tokenizer.tokenize(\"[CLS] \" + text + \" [SEP]\")\n segments_ids, indexed_tokens = [1] * len(token_list), self.tokenizer.convert_tokens_to_ids(token_list)\n segments_tensors, tokens_tensor = torch.tensor([segments_ids]), torch.tensor([indexed_tokens])\n with torch.no_grad():\n encoded_layers, _ = self.model(tokens_tensor, segments_tensors)\n sent_embedding = torch.mean(encoded_layers[11], 1)\n return sent_embedding\n\n def sentences_embedding(self, text_list):\n embeddings = []\n for text in tqdm(text_list):\n token_list = self.tokenizer.tokenize(\"[CLS] \" + text + \" [SEP]\")\n segments_ids, indexed_tokens = [1] * len(token_list), self.tokenizer.convert_tokens_to_ids(token_list)\n segments_tensors, tokens_tensor = torch.tensor([segments_ids]), torch.tensor([indexed_tokens])\n with torch.no_grad():\n encoded_layers, _ = self.model(tokens_tensor, segments_tensors)\n sent_embedding = torch.mean(encoded_layers[11], 1)\n embeddings.append(sent_embedding)\n return embeddings\n\n def token_embedding(self, token_list):\n token_embedding = []\n for token in token_list:\n ontoken = self.tokenizer.tokenize(token)\n segments_ids, indexed_tokens = [1] * len(ontoken), self.tokenizer.convert_tokens_to_ids(ontoken)\n segments_tensors, tokens_tensor = torch.tensor([segments_ids]), torch.tensor([indexed_tokens])\n with torch.no_grad():\n encoded_layers, _ = self.model(tokens_tensor, segments_tensors)\n ontoken_embeddings = []\n for subtoken_i in range(len(ontoken)):\n hidden_layers = []\n for layer_i in range(len(encoded_layers)):\n vector = encoded_layers[layer_i][0][subtoken_i]\n hidden_layers.append(vector)\n ontoken_embeddings.append(hidden_layers)\n cat_last_4_layers = [torch.cat((layer[-4:]), 0) for layer in ontoken_embeddings]\n token_embedding.append(cat_last_4_layers)\n token_embedding = torch.stack(token_embedding[0], 0) if len(token_embedding) > 1 else token_embedding[0][0]\n return token_embedding\n\n\n\ntmp=BertEmbedder()\ntmp=tmp.sentence_embedding\n\n\ntmp2=tmp('我们')\ntmp3=tmp('他们')\n\n\n\n\n\nprint(1)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\ndef vec2(tex):\n\n # 编码测试\n token_ids, segment_ids = tokenizer.encode(tex)\n token_ids, segment_ids = to_array([token_ids], [segment_ids])\n global tmp\n # tmp=model.predict([token_ids, segment_ids])[:,0,:] # 0代表取[CLS]\n tmp3=tmp(tex)\n # print(tmp)\n return tmp3\n\n\n\n\n\n\n\n\n\nfrom ltp import LTP\n\nltp = LTP()\n# text='我现在在天津,我想知道这里的大学都有什么学校.'\n\n\n\n\n\n\ndef searchKG(kglist,text): # 用bert来算距离的\n tmp3 = []\n for i in kglist:\n t = (cosine_distance(vec2(i), vec2(text)))\n tmp3.append(t)\n tmp3 = np.array(tmp3)\n print('所有的距离为',tmp3)\n # 查询到的最近kg 3元组是!!!!!!!!!!!!!!!!\n dix = np.argmin(tmp3)\n print('最近的3元组是', kglist[dix], '对应的阈值是', tmp3[dix])\n\n\n return kglist[dix]\n\n\nimport numpy as np\ndef cosine_distance(a, b): # fanwei 0---2\n if a.shape != b.shape:\n raise RuntimeError(\"array {} shape not match {}\".format(a.shape, b.shape))\n if a.ndim==1:\n a_norm = np.linalg.norm(a)\n b_norm = np.linalg.norm(b)\n elif a.ndim==2:\n a_norm = np.linalg.norm(a, axis=1, keepdims=True)\n b_norm = np.linalg.norm(b, axis=1, keepdims=True)\n else:\n raise RuntimeError(\"array dimensions {} not right\".format(a.ndim))\n similiarity = np.dot(a, b.T)/(a_norm * b_norm)\n dist = 1. - similiarity\n return dist\n\n\n\n\n\n\n\n'''\n下面我们进行bert计算距离:\nkg里面:\n天津的大学\n天津的人口\n天津的面积\n'''\n\n\n\n\n\n#---------------下面开始调用!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\nimport torch\n\nkglist=['大学','人口','面积']\ntext='姚明的妻子的丈夫的妻子'\ntext='我现在在天津,这里有什么大学?'\n# text='姚明的妻子'\n\n\n\n##\n#--------从测试看出来,ner本身对问题有干扰,所以在kglist里面要去除.\n\n\n\n\n\n\n\n# tiaozhuan=searchKG(kglist=['地点','地址','大小','颜色','老婆','丈夫'],text='我家住在和平区哪个地方')\n\n\n\n\n\n\n\n\n# print(tiaozhuan,\"jieguo shi !!!!!!!!!!!!!!!!\")\n##\n\n\n\n\n\n\n# 加入句子成分跳转.\nseg, hidden = ltp.seg([text])\n# sdp = ltp.sdp(hidden, graph=False)\n\nprint(seg,\"seg\")\npos = ltp.pos(hidden)\nner = ltp.ner(hidden)\nprint(\"ner\",ner)\nsrl = ltp.srl(hidden)\ndep = ltp.dep(hidden)\nsdp = ltp.sdp(hidden)\n\nprint(ner,\"ner结果\")\nseg=seg[0]\ndep=dep[0]\nsdp=sdp[0]\nprint(sdp,\"语义分析!!!!!!!!!!!!!!!!!!!\") # 太难用了.\nprint(dep)\nfor i in dep: #dep算法目前识别不出来老婆的跳转.\n\n print(i, seg[i[0]-1], seg[i[1]-1]) # 注意下标会多一个, 箭1后为真正下标.\n\n'''\n下面我们根据跳跃图简历bfs算法\n'''\n\n#dep 就是我们需要的图\n# 从Ner出发,进行遍历图.找到他到root的一个路线.\ntiaozhuanlist={'ATT','SBV'} # 正序表\ntiaozhuanlist2={'VOB'} # 范旭表\nner=ner[0]\nluxian=[]\nfor ner_sample in ner:\n # 然后对ner_sample进行跳跃搜索.\n\n ner_sample_index=ner_sample[1]+1 # 变成dep图的索引类型.\n luxian = [ner_sample_index]\n def search_new_node():\n ner_sample_index=luxian[-1]\n for i in dep:\n if i[0]==ner_sample_index and i[2] in tiaozhuanlist and i[1] not in luxian: # 进行的字跳转.并且防止循环.\n luxian.append(i[1])\n ner_sample_index=i[1]\n return 1\n if i[0]==ner_sample_index and i[2] in tiaozhuanlist2 and i[1] not in luxian: # 进行的字跳转.并且防止循环.\n luxian.append(i[1])\n ner_sample_index=i[1]\n return 1\n if i[1]==ner_sample_index and i[2] in tiaozhuanlist2 and i[0] not in luxian: # 进行的字跳转.并且防止循环. vob可能会反.也要考虑\n luxian.append(i[0])\n ner_sample_index=i[0]\n return 1\n return 0 # 说明找不到新跳跃了.\n while search_new_node():\n print(\"running\")\n print(luxian,\"bfs方法找到的路线!!!!!!!!!!!!!!!\")\n # 根据luxian ��转即可,原则是能跳转就跳转,跳转不了就停下,直接返回当前结果.\n\n\n#--------------上面拿到路线了luxian, 在kg里面进行跳转即可.# 可以做词向量距离,来进行模糊跳转.\n\n\n# 如果luxian里面长度是1,说明没有找到跳转.只有ner.那么我们就用luxian里面这个.进入词向量.搜索算法即可.\nif 1:\n print(\"下面用bert做辅助判断\")\n #kglist = luxian[0] 这个东西的所有的边.\n tiaozhuan = searchKG(kglist=['地点','地址','大小','老婆','丈夫'], text='妻子')\n # 利用距离小于一个阈值,我们就使用这个tiaozhuan,目前只支持bert算法的一次跳转,多次跳转没想到.\n\nif len(luxian)==0:\n print(\"ner没有\")\n\n\n\n\n\n\n","sub_path":"知识图谱项目/kg加深度学习/huizong平均.py","file_name":"huizong平均.py","file_ext":"py","file_size_in_byte":10139,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"35603579","text":"class Solution:\n def solve(self, board):\n \"\"\"\n :type board: List[List[str]]\n :rtype: void Do not return anything, modify board in-place instead.\n \"\"\"\n if len(board) == 0: return\n\n def dfs(x, y):\n for nx, ny in ((x + 1, y), (x - 1, y), (x, y + 1), (x, y - 1)):\n if 0 <= nx < m and 0 <= ny < n and board[nx][ny] == 'O' and not visited[nx][ny]:\n visited[nx][ny] = True\n board[nx][ny] = 'V'\n dfs(nx, ny)\n\n m, n = len(board), len(board[0])\n visited = [[False] * n for _ in range(m)]\n\n border = ([(0, i) for i in range(n)] +\n [(m - 1, i) for i in range(n)] +\n [(i, 0) for i in range(m)] +\n [(i, n - 1) for i in range(m)])\n\n for x, y in border:\n if board[x][y] == 'O' and not visited[x][y]:\n visited[x][y] = True\n board[x][y] = 'V'\n dfs(x, y)\n\n for row in board:\n for col, val in enumerate(row):\n row[col] = 'XO'[val == 'V']\n","sub_path":"DFS/130_Surrounded_regions.py","file_name":"130_Surrounded_regions.py","file_ext":"py","file_size_in_byte":1114,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"157987052","text":"# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2015, ParaTools, Inc.\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n# (1) Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n# (2) Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n# (3) Neither the name of ParaTools, Inc. nor the names of its contributors may\n# be used to endorse or promote products derived from this software without\n# specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n#\n\"\"\"Draw progress indicators on the console.\n\nShow bars or spinners, possibly with instantaneous CPU load average.\n\"\"\"\n\nimport os\nimport sys\nimport threading\nimport logging\nimport itertools\nfrom contextlib import contextmanager\nfrom datetime import datetime\nfrom taucmdr import logger\nfrom taucmdr.error import ConfigurationError\n\n\nLOGGER = logger.get_logger(__name__)\n\n\ndef _read_proc_stat_cpu():\n with open('/proc/stat') as fin:\n cpu_line = fin.readline()\n values = (float(x) for x in cpu_line.split()[1:])\n fields = 'user', 'nice', 'sys', 'idle', 'iowait', 'irq', 'sirq'\n return dict(zip(fields, values))\n\ndef _proc_stat_cpu_load_average():\n if not hasattr(_proc_stat_cpu_load_average, 'prev'):\n _proc_stat_cpu_load_average.prev = _read_proc_stat_cpu()\n prev = _proc_stat_cpu_load_average.prev\n cur = _read_proc_stat_cpu()\n if prev and cur:\n prev_idle = prev['idle'] + prev['iowait']\n cur_idle = cur['idle'] + cur['iowait']\n prev_total = sum(prev.itervalues())\n cur_total = sum(cur.itervalues())\n diff_total = cur_total - prev_total\n diff_idle = cur_idle - prev_idle\n _proc_stat_cpu_load_average.prev = cur\n if diff_total:\n return (diff_total - diff_idle) / diff_total\n return 0.0\n\ndef load_average():\n \"\"\"Calculate the CPU load average.\n\n Returns:\n float: Load average since last time this routine was called\n or 0.0 if couldn't calculate load average.\n \"\"\"\n try:\n cpu_load_avg = _proc_stat_cpu_load_average()\n except IOError:\n cpu_load_avg = 0.0\n return cpu_load_avg\n\n\n@contextmanager\ndef progress_spinner(show_cpu=True):\n \"\"\"Show a progress spinner until the wrapped object returns.\"\"\"\n flag = threading.Event()\n def show_progress():\n with ProgressIndicator(show_cpu=show_cpu) as spinner:\n while not flag.wait(0.25):\n spinner.update()\n thread = threading.Thread(target=show_progress)\n # Kill thread ungracefully when main thread exits, see\n # https://docs.python.org/2/library/threading.html#thread-objects\n thread.daemon = True\n thread.start()\n # Send control to wrapped object\n yield\n # Wrapped object has returned, stop the thread\n flag.set()\n thread.join()\n\n\nclass ProgressIndicator(object):\n \"\"\"Display a progress bar or spinner on a stream.\"\"\"\n \n _spinner = itertools.cycle(['-', '/', '|', '\\\\'])\n \n def __init__(self, total_size=0, block_size=1, show_cpu=True, mode=None):\n \"\"\" Initialize the ProgressBar object.\n\n Args:\n total_size (int): Total amount of work to be completed.\n block_size (int): Size of a work block.\n show_cpu (bool): If True, show CPU load average as well as progress.\n mode (str): One of 'full', 'minimal', 'disabled', or None.\n If ``mode == None`` then the default value for ``mode`` is taken from \n the __TAUCMDR_PROGRESS_BARS__ environment variable. If that variable is not set \n then the default is 'full'.\n If ``mode == 'full'`` then all output is written to :any:`sys.stdout`.\n If ``mode == 'minimal'`` then a single '.' character is written to sys.stdout approximately\n every five seconds without erasing the line (best for Travis regression test).\n If ``mode == 'disabled'`` then no output is written to stdout.\n \"\"\"\n if mode is None:\n mode = os.environ.get('__TAUCMDR_PROGRESS_BARS__', 'full').lower()\n if mode not in ('none', 'disabled', 'minimal', 'full'):\n raise ConfigurationError('Invalid value for __TAUCMDR_PROGRESS_BARS__ environment variable: %s' % mode) \n self.count = 0\n self.total_size = total_size\n self.block_size = block_size\n self.show_cpu = show_cpu\n self.mode = mode\n self._last_time = datetime.now()\n self._start_time = None\n self._line_remaining = 0\n \n def __enter__(self):\n self.update(0)\n return self\n\n def __exit__(self, exc_type, exc_value, traceback):\n self.complete()\n return False\n \n def _line_reset(self):\n sys.stdout.write('\\r')\n sys.stdout.write(logger.COLORED_LINE_MARKER)\n self._line_remaining = logger.LINE_WIDTH\n \n def _line_append(self, text):\n from taucmdr import util\n sys.stdout.write(text)\n self._line_remaining -= len(util.uncolor_text(text))\n \n def _line_flush(self):\n sys.stdout.flush()\n assert self._line_remaining >= 0\n \n def _draw_bar(self, percent, width, char, *args, **kwargs):\n from taucmdr import util \n bar_on = max(int(percent*width), 1)\n bar_off = width - bar_on\n self._line_append(util.color_text(char*bar_on, *args, **kwargs))\n self._line_append(' '*bar_off)\n \n def _update_minimal(self):\n if self._start_time is None:\n self._start_time = datetime.now()\n sys.stdout.write(logger.COLORED_LINE_MARKER)\n tdelta = datetime.now() - self._last_time\n if tdelta.total_seconds() >= 5:\n self._last_time = datetime.now()\n sys.stdout.write('.')\n sys.stdout.flush()\n \n def _update_full(self, count, block_size, total_size):\n if count is not None:\n self.count = count\n if block_size is not None:\n self.block_size = block_size\n if total_size is not None:\n self.total_size = total_size\n if self._start_time is None:\n self._start_time = datetime.now()\n show_bar = self.total_size > 0\n tdelta = datetime.now() - self._start_time\n self._line_reset()\n self._line_append(\"%0.1f seconds \" % tdelta.total_seconds()) \n if (not self.show_cpu and not show_bar) or (self._line_remaining < 40):\n self._line_append('[%s]' % self._spinner.next())\n self._line_flush()\n else:\n if self.show_cpu:\n cpu_load = min(load_average(), 1.0)\n self._line_append(\"[CPU: %0.1f \" % (100*cpu_load))\n width = (self._line_remaining/4) if show_bar else (self._line_remaining-2)\n self._draw_bar(cpu_load, width, '|', 'white', 'on_white')\n self._line_append(\"]\")\n if show_bar:\n if self.show_cpu:\n self._line_append(\" \")\n percent = max(min(float(self.count*self.block_size) / self.total_size, 1.0), 0.0)\n self._line_append(\"[%0.1f%% \" % (100*percent))\n width = self._line_remaining - 3\n self._draw_bar(percent, width, '>', 'green', 'on_green')\n self._line_append(\"]\")\n self._line_flush()\n\n def update(self, count=None, block_size=None, total_size=None):\n \"\"\"Show progress.\n\n Updates `block_size` or `total_size` if given for compatibility with :any:`urllib.urlretrieve`.\n\n Args:\n count (int): Number of blocks of `block_size` that have been completed.\n block_size (int): Size of a work block.\n total_size (int): Total amount of work to be completed.\n \"\"\"\n if self.mode == 'disabled' or getattr(logging, logger.LOG_LEVEL) >= logging.ERROR:\n return\n elif self.mode == 'minimal':\n self._update_minimal()\n elif self.mode == 'full':\n self._update_full(count, block_size, total_size)\n\n def complete(self):\n if self.mode != 'disabled':\n tdelta = datetime.now() - self._start_time\n elapsed = \"Completed in %0.3f seconds\" % tdelta.total_seconds()\n if self.mode == 'minimal':\n sys.stdout.write(' %s\\n' % elapsed)\n sys.stdout.flush()\n elif self.mode == 'full':\n self._line_reset()\n self._line_append(elapsed)\n self._line_append(' '*self._line_remaining)\n self._line_flush()\n self._start_time = None\n","sub_path":"packages/taucmdr/progress.py","file_name":"progress.py","file_ext":"py","file_size_in_byte":9810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"44643170","text":"import re\n\nfrom config import misago\nfrom utils import BOOL_TRUE, input_bool, print_setup_changed_message\n\n\ndef run_github_signin_wizard(env_file):\n if input_bool(\"Enable sign-in with GitHub?\"):\n run_github_key_wizard(env_file)\n run_github_secret_wizard(env_file)\n env_file[\"SOCIAL_AUTH_GITHUB_ENABLE\"] = \"yes\"\n else:\n env_file[\"SOCIAL_AUTH_GITHUB_ENABLE\"] = \"no\"\n\n\ndef run_github_key_wizard(env_file):\n github_key_prompt = \"Enter your GitHub Client ID: \"\n github_key = None\n\n while not github_key:\n github_key = input(github_key_prompt).strip().lower()\n if not github_key:\n print(\"You have to enter a GitHub Client ID.\")\n print()\n\n env_file[\"SOCIAL_AUTH_GITHUB_KEY\"] = github_key\n\n\ndef run_github_secret_wizard(env_file):\n github_secret_prompt = \"Enter your GitHub Client Secret: \"\n github_secret = None\n\n while not github_secret:\n github_secret = input(github_secret_prompt).strip().lower()\n if not github_secret:\n print(\"You have to enter a GitHub Client Secret.\")\n print()\n\n env_file[\"SOCIAL_AUTH_GITHUB_SECRET\"] = github_secret\n\n\ndef print_github_signin_setup(env_file):\n if env_file.get(\"SOCIAL_AUTH_GITHUB_ENABLE\") in BOOL_TRUE:\n print(\"Sign-in with GitHub is enabled:\")\n print()\n print(\"Client ID: %s\" % env_file.get(\"SOCIAL_AUTH_GITHUB_KEY\"))\n print(\"Client Secret: %s\" % env_file.get(\"SOCIAL_AUTH_GITHUB_SECRET\"))\n else:\n print(\"Sign-in with GitHub is disabled.\")\n\n\ndef change_github_signin_setup(env_file):\n print_github_signin_setup(misago)\n print()\n if input_bool(\"Change sign-in with GitHub setup?\", default=False):\n run_github_signin_wizard(env_file)\n env_file.save()\n print_setup_changed_message()\n\n\nif __name__ == \"__main__\":\n if misago.is_file():\n try:\n change_github_signin_setup(misago)\n except KeyboardInterrupt:\n print()\n","sub_path":"wizard/github.py","file_name":"github.py","file_ext":"py","file_size_in_byte":1991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"136150136","text":"from ex12357911 import is_insight\nboolean = False\ncount=0\nfor x in range(141,240):\n for y in range (61,260):\n if is_insight([x,y],[140,60,100,200]) == False:\n count += 1\nif count == 0:\n print(\"your code are true\")\nelse:\n print(\"your codes are wrong\")\n\n","sub_path":"lab3/homeworklab3/ex12.py","file_name":"ex12.py","file_ext":"py","file_size_in_byte":279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"29147703","text":"##importing the necessary packages for the model\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pd\r\n\r\n#downloading iris data\r\niris = pd.read_csv(\"IRIS.csv\")\r\n\r\niris.head()\r\niris.tail()\r\niris.isnull().sum()\r\niris.info()\r\niris.describe()\r\n\r\n#seperating input and target\r\nX = iris.iloc[:,:-1].values#to get only values without coloum names\r\nY = iris.iloc[:,-1].values\r\n\r\n#train and test split\r\nfrom sklearn.model_selection import train_test_split\r\nx_train,x_test,y_train,y_test = train_test_split(X,Y,train_size =0.8,test_size = 0.2,random_state = 0)\r\n\r\n#preprocessing\r\nfrom sklearn.preprocessing import MinMaxScaler\r\nScaler = MinMaxScaler()\r\nScaler.fit(x_train)\r\n\r\nx_train = Scaler.transform(x_train)\r\nx_test = Scaler.transform(x_test)\r\n\r\nx_train.shape\r\n\r\nfrom sklearn.neighbors import KNeighborsClassifier\r\nKnn = KNeighborsClassifier(n_neighbors = 10)\r\nKnn.fit(x_train,y_train)\r\ny_pred = Knn.predict(x_test)\r\n\r\n\r\nfrom sklearn.metrics import accuracy_score,confusion_matrix\r\ncm = confusion_matrix(y_pred,y_test)\r\nprint(cm)\r\n\r\nAccuracy = accuracy_score(y_pred,y_test)*100\r\nprint(Accuracy)\r\n\r\n\r\nerror = []\r\n\r\nfor i in range(1,119):\r\n Knn = KNeighborsClassifier(n_neighbors = i)\r\n Knn.fit(x_train,y_train)\r\n y_pred_i = Knn.predict(x_test)\r\n error.append(np.mean(y_pred_i != y_test))\r\n\r\nplt.figure(figsize=(10, 5)) \r\nplt.plot(range(1,119),error)\r\nplt.title(\"Error Rate\")\r\nplt.xlabel(\"Jack\")\r\nplt.ylabel(\"Jandu\")\r\nplt.xlim(0,30)#for axis range\r\n\r\n","sub_path":"KNN for IRIS Data.py","file_name":"KNN for IRIS Data.py","file_ext":"py","file_size_in_byte":1479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"91067972","text":"# -*- coding: utf-8 -*-\n\"\"\"\n resources.Store\n\n ~~~~~~~~~~~~~~~~~\n\n A class that handles async information sharing.\n\n :copyright: 2017 ORPEC\n\"\"\"\n# Standard Modules\nimport threading\nimport requests\nimport logging\n\n# Application Modules\nfrom resources.Member import Member\nfrom resources.GoogleClient import google_client\n\n\nclass Store:\n _lock = threading.Lock()\n _tracked_users = list()\n _status = False\n\n _logger = logging.getLogger(__name__)\n\n def fetch_tracked_users(self):\n \"\"\"\n \n :return: \n \"\"\"\n self._logger.info(\"Fetching Tracked Users\")\n tracked_users = google_client.get_db()\n\n for user in tracked_users:\n existing_member = False\n\n for i, org_member in enumerate(self._tracked_users):\n if org_member.discord_name == user.discord_name:\n existing_member = True\n break\n\n if not existing_member:\n try:\n self._lock.acquire()\n self._tracked_users.append(Member(tracked_user=user))\n\n except Exception as e:\n self._logger.exception(e)\n\n finally:\n self._lock.release()\n\n def update_discord_users(self, discord_members):\n self._logger.info(\"Updateing Users from Discord.\")\n try:\n self._lock.acquire()\n\n for member in discord_members:\n existing_member = False\n for i, org_member in enumerate(self._tracked_users):\n if org_member.discord_name == member.discord_name:\n org_member.update_discord_info(member)\n self._tracked_users[i] = org_member\n existing_member = True\n break\n\n elif member.discord_name == 'ORPEC SC Recruit Manager#0898':\n existing_member = True\n break\n\n if not existing_member:\n self._tracked_users.append(Member(discord_user=member))\n\n except:\n raise\n\n finally:\n self._lock.release()\n\n @staticmethod\n def fetch_rsi_info(rsi_handle, request_type='cache'):\n if rsi_handle != \"\":\n\n r = requests.get(\n \"http://sc-api.com/?api_source=\" + request_type + \"&system=accounts&action=full_profile&target_id=\" + rsi_handle + \\\n \"&expedite=0&format=raw\"\n )\n\n if r.status_code == 200:\n return r.json()\n\n return False\n\n def update_users(self, request_type):\n \"\"\"\n \n :param type: \n :return: \n \"\"\"\n try:\n self._lock.acquire()\n\n for i, user in enumerate(self._tracked_users):\n rsi_info = self.fetch_rsi_info(user.rsi_handle, request_type)\n\n if rsi_info:\n user.update_rsi_info(rsi_info)\n self._tracked_users[i] = user\n print(user.create_record())\n\n except Exception as e:\n logging.exception(e)\n\n finally:\n self._lock.release()\n\n def get_status(self):\n \"\"\"\n \n :return: \n \"\"\"\n try:\n self._lock.acquire()\n status = self._status\n\n except:\n raise\n\n finally:\n self._lock.release()\n\n return status\n\n def toggle_status(self):\n \"\"\"\n \n :return: \n \"\"\"\n\n try:\n self._lock.acquire()\n self._status = not self._status\n\n except:\n raise\n\n finally:\n self._lock.release()\n\n def update_tracked_users(self):\n \"\"\"\n \n :return: \n \"\"\"\n try:\n self._lock.acquire()\n\n tracked_users = self._tracked_users\n\n except Exception as e:\n logging.exception(e)\n\n finally:\n self._lock.release()\n\n google_client.clean_db()\n\n for i, user in enumerate(tracked_users):\n try:\n google_client.write_db(i+2, user.create_record())\n\n except Exception as e:\n logging.error(e)\n\n google_client.reset_client()\n\n try:\n google_client.write_db(i + 2, user.create_record())\n\n except Exception as e_2:\n logging.exception(e_2)\n\n self.toggle_status()\n\n\nstore = Store()\n","sub_path":"resources/Store.py","file_name":"Store.py","file_ext":"py","file_size_in_byte":4540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"615126741","text":"###################################################################\r\n# Name : Suphinya Wu\r\n# ID : 6201012630100\r\n###################################################################\r\n\r\nimport pygame\r\nimport pygame.camera\r\nfrom pygame.locals import *\r\nimport sys\r\n\r\n# size\r\nscr_w = 640\r\nscr_h = 480\r\n\r\ndef open_camera( frame_size=(640,480),mode='RGB'):\r\n pygame.camera.init()\r\n list_cameras = pygame.camera.list_cameras()\r\n print( 'Mumber of cameras found: ', len(list_cameras) )\r\n if list_cameras:\r\n # use the first camera found\r\n camera = pygame.camera.Camera(list_cameras[0], frame_size, mode )\r\n return camera \r\n return None\r\n\r\n# open camera\r\ncamera = open_camera()\r\nif camera:\r\n camera.start()\r\nelse:\r\n print('Cannot open camera')\r\n sys.exit(-1)\r\n\r\n# try to capture the next image from the camera \r\nimg = camera.get_image()\r\n# save the current image into the output file\r\npygame.image.save( img, 'image.jpg' )\r\ncamera.stop()\r\n\r\n# start\r\npygame.init()\r\nscreen = pygame.display.set_mode((scr_w, scr_h))\r\npygame.display.set_caption(\"Click Picture\")\r\nsurface = pygame.Surface( screen.get_size(), pygame.SRCALPHA )\r\n\r\n# get the image size\r\n#img_rect = img.get_rect()\r\n\r\n\r\n# draw (MxN) tiles of the images\r\nM,N = 10,8\r\nrw, rh = scr_w//M, scr_h//N\r\nfor i in range(M):\r\n for j in range(N):\r\n # draw a green frame (tile)\r\n rect = (i*rw, j*rh, rw, rh)\r\n stroke = pygame.draw.rect( img, (0,255,0), rect, 1)\r\n surface.blit( img , rect, rect )\r\n \r\n \r\npos_a = None \r\npos_b = None\r\npos_c = None\r\npos_d = None\r\nrect_ab = None\r\n\r\nis_running = True\r\nwhile is_running:\r\n \r\n bg = screen.fill((255,255,255))\r\n\r\n # close programe\r\n for e in pygame.event.get():\r\n \r\n # switch picture on screen by click mouse\r\n if e.type == pygame.MOUSEBUTTONDOWN:\r\n if e.button == 1:\r\n [a,b] = pygame.mouse.get_pos()\r\n print(a,b)\r\n\r\n for i in range(M):\r\n for j in range(N):\r\n allrect = (i*rw, j*rh, rw, rh)\r\n # check position mouse \r\n if i*rw <= a <= (i+1)*rw and j*rh <= b <= (j+1)*rh:\r\n print(i*rw,j*rh)\r\n pos_a = i*rw\r\n pos_b = j*rh\r\n rect_ab = (i*rw, j*rh, rw, rh)\r\n\r\n if e.type == pygame.MOUSEBUTTONUP:\r\n if e.button == 1:\r\n [c,d] = pygame.mouse.get_pos()\r\n print(c,d)\r\n\r\n for i in range(M):\r\n for j in range(N):\r\n allrect = (i*rw, j*rh, rw, rh)\r\n # check position mouse \r\n if i*rw <= c <= (i+1)*rw and j*rh <= d <= (j+1)*rh:\r\n print(i*rw,j*rh)\r\n print('Switch Here')\r\n pos_c = i*rw\r\n pos_d = j*rh\r\n surface.blit( img , (pos_a,pos_b) , allrect )\r\n surface.blit( img , (pos_c,pos_d) , rect_ab )\r\n \r\n\r\n \r\n \r\n # close program\r\n if e.type == pygame.QUIT:\r\n is_running = False \r\n pygame.quit()\r\n sys.exit()\r\n\r\n\r\n # write the surface to the screen and update the display\r\n screen.blit( surface, (0,0) )\r\n pygame.display.update()\r\n\r\n\r\nprint(\"Done...\")\r\n#####################################################\r\n\r\n# refference\r\n# https://www.pygame.org/docs/ref/surface.html\r\n","sub_path":"assignment_2020-07-29/problem2/cameraswitch-1.py","file_name":"cameraswitch-1.py","file_ext":"py","file_size_in_byte":3641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"650906052","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\ndef first():\r\n n_groups = 5\r\n means_ssc = (20, 35, 30, 35, 27)\r\n std_ssc = (2, 3, 4, 1, 2)\r\n\r\n means_nonssc = (25, 32, 34, 20, 25)\r\n std_nonssc = (3, 5, 2, 3, 3)\r\n\r\n fig, ax = plt.subplots()\r\n\r\n index = np.arange(n_groups)\r\n bar_width = 0.35\r\n\r\n opacity = 0.4\r\n error_config = {'ecolor': '0.3'}\r\n\r\n rects1 = plt.bar(index, means_ssc, bar_width,\r\n alpha=opacity,\r\n color='b',\r\n yerr=std_ssc,\r\n error_kw=error_config,\r\n label='SSC')\r\n\r\n rects2 = plt.bar(index + bar_width, means_nonssc, bar_width,\r\n alpha=opacity,\r\n color='r',\r\n yerr=std_nonssc,\r\n error_kw=error_config,\r\n label='Non-SSC')\r\n\r\n plt.xlabel('MONTH')\r\n plt.ylabel('EVENTS')\r\n plt.title('Events by SSC and Non-SSC')\r\n plt.xticks(index + bar_width / 2, ('June', 'July', 'Aug', 'Sep', 'Oct'))\r\n plt.legend()\r\n\r\n plt.tight_layout()\r\n plt.show()\r\n\r\n\r\nfirst()\r\n","sub_path":"graph1.py","file_name":"graph1.py","file_ext":"py","file_size_in_byte":1131,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"277979071","text":"import argparse\nimport os.path\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numpy as np\nimport models\nfrom torch.autograd import Variable\nfrom dataset import FashionAI\nimport csv\n\n\nparser = argparse.ArgumentParser(description='FashionAI')\nparser.add_argument('--model', type=str, default='resnet34', metavar='M',\n help='model name')\nparser.add_argument('--attribute', type=str, default='coat_length_labels', metavar='A',\n help='fashion attribute (default: coat_length_labels)')\nparser.add_argument('--batch-size', type=int, default=1000, metavar='N',\n help='input batch size for training (default: 64)')\nparser.add_argument('--no-cuda', action='store_true', default=False,\n help='disables CUDA training')\nparser.add_argument('--seed', type=int, default=1, metavar='S',\n help='random seed (default: 1)')\nparser.add_argument('--log-interval', type=int, default=10, metavar='N',\n help='how many batches to wait before logging training status')\nargs = parser.parse_args()\nargs.cuda = not args.no_cuda and torch.cuda.is_available()\n\ntorch.manual_seed(args.seed)\nif args.cuda:\n torch.cuda.manual_seed(args.seed)\n\nkwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {}\nevalset = FashionAI('./', attribute=args.attribute, data_type='eval', reset=False)\neval_loader = torch.utils.data.DataLoader(evalset, batch_size=args.batch_size, shuffle=True, **kwargs)\n\n\nclass Net(nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n self.conv1 = nn.Conv2d(3, 10, kernel_size=5)\n self.conv2 = nn.Conv2d(10, 20, kernel_size=5)\n self.conv2_drop = nn.Dropout2d()\n self.fc1 = nn.Linear(180, 50)\n self.fc2 = nn.Linear(50, 8)\n\n def forward(self, x):\n x = F.relu(F.max_pool2d(self.conv1(x), 2))\n x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))\n x = x.view(-1, 180)\n x = F.relu(self.fc1(x))\n x = F.dropout(x, training=self.training)\n x = self.fc2(x)\n return F.log_softmax(x, dim=1)\n\n\nif args.model == 'resnet34':\n model = models.resnet34(True)\n model.fc = nn.Linear(2048, FashionAI.AttrKey[args.attribute])\nelse:\n model = Net()\n\nsave_folder = os.path.join(os.path.expanduser('.'), 'save', args.attribute, args.model)\n\nif os.path.exists(os.path.join(save_folder, args.model + '_checkpoint.pth')):\n start_epoch = torch.load(os.path.join(save_folder, args.model + '_checkpoint.pth'))\n model.load_state_dict(torch.load(os.path.join(save_folder, args.model + '_' + str(start_epoch) + '.pth')))\nelse:\n start_epoch = 0\n\nif args.cuda:\n model.cuda()\n\n\ndef eval():\n model.eval()\n writedata = []\n for data, target in eval_loader:\n if args.cuda:\n data = data.cuda()\n data = Variable(data, volatile=True)\n output = model(data)\n output = np.exp(output.cpu().data.numpy()).tolist()\n writedata.extend([ [j, args.attribute, \";\".join([ str(ii) for ii in i ])] for (i, j) in zip(output, target) ])\n\n return writedata\n\n\neval_file = os.path.join(os.path.expanduser('.'), 'save', args.attribute, args.model + '_' + str(start_epoch) + '_eval.csv')\nwith open(eval_file, 'w', newline='') as f:\n writer = csv.writer(f)\n writer.writerows(eval())","sub_path":"evaluate.py","file_name":"evaluate.py","file_ext":"py","file_size_in_byte":3353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"574897592","text":"# Exercice 9.2 from Practical astronomy\n#=======================================================================\n#- Sylvie Dagoret-Campagne\n#- affliliation : IJCLAB/IN2P3/CNRS\n#- creation date : May 10th 2020\n#https://www.astro.ubc.ca/people/jvw/ASTROSTATS/pracstats_web_ed1.html#chapter9\n# 9.2 Variance of estimators for $w(\\theta)$ (D). Generate 20,000 data points randomly in the region $0^o < \\alpha < 5^o$, $0^o < \\delta < 5^o$. Estimate $w(\\theta)$ using the Natural estimator $w_1$, the Peebles estimator $w_2$, the Landy-Szalay estimator $w_3$ and the Hamilton estimator $w_4$. (Average $DR$ and $RR$ over say 10 comparison sets each of 20,000 random points). Plot the results as a function of $\\delta$ showing Poisson error bars $1/\\sqrt{DD}$. Comment on the results - which estimator is best?\n\nimport numpy as np\nimport pandas as pd\n\nfrom astropy import units as u\nfrom astropy.coordinates import Angle\n\nimport time\nfrom datetime import datetime,date\nimport dateutil.parser\n\nra0=0\ndec0=0\n\ndef distance(ra,dec):\n return np.sqrt((ra-ra0)**2+(dec-dec0)**2)\n\ndef dist_row(row):\n return distance(row[\"ra\"],row[\"dec\"])\n\n\n\nif __name__ == '__main__':\n today = date.today()\n string_date=today.strftime(\"%Y-%m-%d\")\n\n\n # data file\n file_data = \"Ex9_2_data.xlsx\"\n\n df = pd.read_excel(file_data, header=2)\n # right ascenssion in hours\n df1 = pd.concat([df[\"ra1\"], df[\"ra2\"], df[\"ra3\"], df[\"ra4\"], df[\"ra5\"]], axis=0)\n # declination in degrees\n df2 = pd.concat([df[\"dec1\"], df[\"dec2\"], df[\"dec3\"], df[\"dec4\"], df[\"dec5\"]], axis=0)\n\n # use Angle to convert into degrees\n myra = Angle(df1.values, unit=\"hour\")\n\n df_data = pd.DataFrame()\n\n df_data[\"ra\"] = myra.degree # convert right ascenssion into degrees\n df_data[\"dec\"] = df2.values # keep declination into degrees\n\n # size of data\n Nobj = len(df_data)\n array_shape = df_data.shape\n\n\n # random\n random_sky = np.random.uniform(0, 5, array_shape)\n df_n = pd.DataFrame()\n df_n[\"ra\"] = random_sky[:, 0]\n df_n[\"dec\"] = random_sky[:, 1]\n\n\n # histogram config\n THETAS = np.arange(0, 10, 0.05)\n NBINS = len(THETAS) - 1\n BINSIZE = (THETAS[-1] - THETAS[0]) / NBINS\n\n BINSTART = THETAS[0]\n BINSTOP = THETAS[-1] + BINSIZE\n NBINS += 1\n\n # loop on each element in the original dataframe\n df2 = df_n.copy()\n\n df2.reset_index()\n df_data.reset_index()\n\n for index, row in df_data.iterrows():\n # isolate the current element\n ra0 = row['ra']\n dec0 = row['dec']\n\n if index % 1000 == 0:\n print(index, \" ra0=\", ra0, \" dec0=\", dec0)\n\n df2[\"dist\"] = df2.apply(dist_row, axis=1)\n if index == 0:\n histo = np.histogram(df2[\"dist\"].values, bins=NBINS, range=(BINSTART, BINSTOP))[0]\n else:\n histo += np.histogram(df2[\"dist\"].values, bins=NBINS, range=(BINSTART, BINSTOP))[0]\n\n\n\n # save histo\n NDR=histo\n\n filename_histo = string_date + \"_ndr.npy\"\n np.save(filename_histo, NDR)\n\n\n\n","sub_path":"Chapter09/ComputeNDR.py","file_name":"ComputeNDR.py","file_ext":"py","file_size_in_byte":3002,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"267744771","text":"from Tree import treeTool\nclass TreeNode(object):\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\nfrom collections import deque\nclass Codec:\n def serialize(self, root):\n \"\"\"Encodes a tree to a single string.\n\n :type root: TreeNode\n :rtype: str\n \"\"\"\n if root == None:\n return '[null]'\n queue = deque()\n queue.append(root)\n res = [str(root.val)]\n while len(queue) > 0:\n curNode = queue.popleft()\n if curNode.left:\n res.append(str(curNode.left.val))\n queue.append(curNode.left)\n else:\n res.append('null')\n if curNode.right:\n res.append(str(curNode.right.val))\n queue.append(curNode.right)\n else:\n res.append('null')\n while res[-1] == 'null':\n res.pop()\n return \"[\"+','.join(res)+']'\n\n\n def deserialize(self, data):\n \"\"\"Decodes your encoded data to tree.\n\n :type data: str\n :rtype: TreeNode\n \"\"\"\n data= data[1:len(data)-1].split(',')\n if data[0] == 'null':\n return None\n\n # def getTree( s, index):\n # if index >= len(s) or s[index] == 'null':\n # return None\n # root = TreeNode(int(s[index]))\n # left = index * 2 + 1\n # right = index * 2 + 2\n # root.left = getTree(s, left)\n # root.right = getTree(s, right)\n # return root\n root = TreeNode(int(data[0]))\n count = 1\n queue = deque()\n queue.append(root)\n while count < len(data):\n curNode = queue.popleft()\n if data[count] != 'null':\n curNode.left = TreeNode(int(data[count]))\n queue.append(curNode.left)\n count += 1\n if count == len(data):\n break\n if data[count] != 'null':\n curNode.right = TreeNode(int(data[count]))\n queue.append(curNode.right)\n count += 1\n\n return root\n\n\nif __name__ == \"__main__\":\n a = Codec()\n myT = a.deserialize(\"[4,2,5,1,3]\")\n print(a.serialize(myT))\n print(\"Done!\")\n\n\n","sub_path":"Tree/Leetcode_297/Mycode.py","file_name":"Mycode.py","file_ext":"py","file_size_in_byte":2286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"571291882","text":"import cv2\nimport numpy as np\nimport picamera\nimport os\nimport io\nimport time\nimport multiprocessing\n\ndef getCapture(cap) :\n with picamera.PiCamera() as camera :\n camera.resolution = (416, 416)\n while True :\n camera.capture(\"images/\"+str(cap)+\".jpg\")\n cap += 1\n if cap > 30 :\n return\n\ndef yolo(cap) :\n cap_lig = 0\n \n net = cv2.dnn.readNet(\"yolov3-tiny_3000.weights\", \"yolov3-tiny.cfg\")\n os.chdir('images')\n classes = [\"lighter\"]\n layer_names = net.getLayerNames()\n output_layers = [layer_names[i[0] - 1] for i in net.getUnconnectedOutLayers()]\n\n prev = time.time()\n \n while True :\n if os.path.isfile(str(cap)+\".jpg\") :\n img = cv2.imread(str(cap)+\".jpg\")\n try :\n blob = cv2.dnn.blobFromImage(img, 0.00392, (416, 416), (0, 0, 0), True, crop=False)\n net.setInput(blob)\n outs = net.forward(output_layers)\n\n confidences = []\n boxes = []\n \n for out in outs:\n for detection in out:\n scores = detection[5:]\n class_id = np.argmax(scores)\n confidence = scores[class_id]\n if confidence > 0.3:\n # Object detected\n center_x = int(detection[0] * 416)\n center_y = int(detection[1] * 416)\n w = int(detection[2] * 416)\n h = int(detection[3] * 416)\n x = int(center_x - w / 2)\n y = int(center_y - h / 2)\n boxes.append([x, y, w, h])\n confidences.append(float(confidence))\n\n indexes = cv2.dnn.NMSBoxes(boxes, confidences, 0.3, 0.2)\n\n # 인식된 라이터가 다섯개 이상이면 업로드\n if len(indexes) >= 5 :\n cv2.imwrite(\"ok\"+str(cap_lig)+\".jpg\", img)\n cap_lig += 1\n\n # 처리가 끝난 이미지는 무조건 삭제\n os.remove(str(cap)+\".jpg\")\n cap += 1\n prev = time.time()\n\n except Exception as e :\n print(str(e))\n \n else :\n if time.time() - prev > 10 :\n return\n else :\n pass\n \nif __name__ == '__main__' :\n cap = 0\n proc1 = multiprocessing.Process(target=getCapture, args=(cap,))\n proc1.start()\n proc2 = multiprocessing.Process(target=yolo, args=(cap,))\n proc2.start()\n \n proc1.join()\n proc2.join()","sub_path":"lighter_image_training/getLighterCap_test.py","file_name":"getLighterCap_test.py","file_ext":"py","file_size_in_byte":2740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"178994594","text":"#!/usr/bin/env python3\n# -*- coding: UTF-8 -*-\n\n__author__ = 'RemiZOffAlex'\n__copyright__ = '(c) RemiZOffAlex'\n__license__ = 'MIT'\n__email__ = 'remizoffalex@mail.ru'\n\n\"\"\"\npip3 install rrdtool psutil\n\"\"\"\n\nimport os\nimport time\nimport rrdtool\nimport subprocess\n\nfname = 'database.rrd' \n\nip = '8.8.8.8'\n\nif not os.path.isfile(fname):\n rrdtool.create(\n fname,\n # шаг 300с — данные, хранимые в БД будут привязаны к «сетке», шагом в пять минут\n \"--step\", \"10s\",\n # Источник\n # cpu - название источника\n # type - GAUGE\n # heartbeat - 5m\n # min, max - предельные значения\n 'DS:ping:GAUGE:1m:0:65535',\n # описываем какие отчёты хотим хранить в БД. \n # последние 48 часов, каждые 5 мин\n # 0.5 - xff\n # 576 (48 часов и каждые 5 минут)\n 'RRA:AVERAGE:0.5:1m:2d',\n 'RRA:MAX:0.5:1m:2d'\n )\n\ndef getping(IP):\n result = False\n p = subprocess.Popen([\"/bin/fping\", \"-e\", IP], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n output,error = p.communicate()\n val = output.decode(\"utf-8\").split(\" \")\n if val[2] == \"alive\":\n result = val[3][1:]\n return result\n\nwhile True:\n\n val = getping(ip)\n print(val)\n\n if val:\n rrdtool.update(fname, '-t', 'ping', 'N:' + str(val))\n\n\n rrdtool.graph( 'graph.png',\n # За какой период показать график: 30 минут\n \"--start\", \"-1d\",\n \"--title\", \"Ping\",\n \"--disable-rrdtool-tag\",\n \"--imgformat\" , \"PNG\" ,\n # \"--vertical-label=CPU\",\n \"--watermark\", \"http://SpecialistOff.NET/ from http://RemiZOffAlex/\",\n \"-w 800\", \"-h 300\",\n \"DEF:valping1=\" + fname + \":ping:AVERAGE\",\n \"DEF:valping2=\" + fname + \":ping:MAX\",\n # Заливка области\n \"AREA:valping1#00FF00:Время ответа среднее\",\n \"LINE1:valping2#FF0000:Время ответа максимальное\")\n\n time.sleep(10)\n","sub_path":"Python/pingmonitor.py","file_name":"pingmonitor.py","file_ext":"py","file_size_in_byte":2168,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"441557036","text":"#1. Create a greeting for your program.\nprint(\"Welcome to the name generator.\")\n\n#2. Ask the user to input his/her first name\nfirst_name = input(\"Enter your first name \\n-> \")\n\n#3. Ask the user to input his/her last name\nlast_name = input(\"Enter your last name \\n-> \")\n\n#4. Combine both the names\nname = first_name + \" \" + last_name\n\n#5. print the final output\nprint(\"Your full name is \" + name)\n","sub_path":"programs/name_generator.py","file_name":"name_generator.py","file_ext":"py","file_size_in_byte":398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"225174163","text":"#!/usr/bin/env python\n\ndef next_prime(limit):\n non_prime = set() # <1>\n\n for i in range(2, limit):\n if i in non_prime:\n continue\n for j in range(2 * i, limit + 1, i):\n non_prime.add(j) # <2>\n yield i # <3>\n\n\nnp = next_prime(200) # <4>\n\nprint(np)\n\nfor prime in np: # <5>\n # prime = next(np)\n print(prime, end=' ')\n\ndef silly():\n yield \"fee\"\n yield \"fi\"\n yield \"fo\"\n yield \"fum\"\n\n\nsg = silly()\nfor s in sg:\n print(s)\nprint()\n\nsg = silly()\nprint(next(sg))\nprint(next(sg))\nprint(\"but wait, there's more...\")\nfor s in sg:\n print(s)\nprint()\n\nthings = ['a', 'b', 'c']\ni = iter(things)\nfor t in things:\n print(t)\n\n\n\n\n","sub_path":"EXAMPLES/sieve_generator.py","file_name":"sieve_generator.py","file_ext":"py","file_size_in_byte":690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"457831550","text":"# FIXME: poor error checking for this module\nimport click\nimport os\nimport sys\nimport subprocess\nimport signal\nimport filecmp\nimport termicoder.utils.display as display\nimport termicoder.utils.parse as parse\nimport termicoder.utils.style as style\nimport time\ndiff_strings = [\n '''\nHelp:\nshows first 10 different lines along with line no for each testcase\nnewlines are shown as \\\\n\nspaces are shown as _''']\n\nlang_map = {\n \".py\": \"python\",\n \".c\": \"c\",\n \".cpp\": \"cpp\",\n \".cc\": \"cpp\",\n \".c++\": \"cpp\",\n \".java\": \"java\"\n}\n\ninterpreted = [\"python\"]\n\n\ndef get_script_folder():\n # though not correct but will work\n if \"nt\" in os.name:\n return \"bat\"\n else:\n return \"bash\"\n\n\ndef get_shell():\n if \"nt\" in os.name:\n return \"cmd.exe\"\n else:\n return \"bash\"\n\n\ndef is_same(ansfile, outfile):\n a = False\n try:\n a = filecmp.cmp(ansfile, outfile)\n except BaseException:\n display.error(\"error in infile/outfile\")\n return a\n\n\ndef diff(ansfile, outfile):\n pad = style.pad\n a = []\n error = False\n try:\n s1 = open(ansfile, \"r\").readlines()\n s2 = open(outfile, \"r\").readlines()\n except BaseException:\n error = True\n\n if(not error):\n lines = min(len(s1), len(s2))\n lno_pad = max(len(str(lines)), 3)\n else:\n lines = 0\n lno_pad = 3\n a.append('|' + pad(\"lno\", lno_pad) +\n '|' + pad(ansfile, 25) + '|'+pad(outfile, 25)+'|')\n a.append('+' + '-'*lno_pad +\n '+' + '-'*25 + '+' + '-'*25+'+')\n\n for i in range(lines):\n line1 = s1[i]\n line2 = s2[i]\n if(line1 != line2):\n line1 = line1.replace(' ', '_')\n line1 = line1.replace('\\n', '\\\\n')\n\n line2 = line2.replace(' ', '_')\n line2 = line2.replace('\\n', '\\\\n')\n\n sno = str(i+1)\n a.append('|' + pad(sno, lno_pad) +\n '|' + pad(line1, 25) + '|'+pad(line2, 25)+'|')\n\n if(len(a) >= 12):\n break\n\n if(error):\n a.append(\"error in opening files\")\n return '\\n'.join(a)\n\n\ndef test(code_file, time_limit, live):\n # TODO: check for testcase folder if not found then exit\n # TODO: check if the answer file exists\n # code file will exist; thanks to 'click'\n if(time_limit is None):\n time_limit = parse.get_time_limit()\n\n extension = os.path.splitext(code_file)[1]\n try:\n lang = lang_map[extension]\n except BaseException:\n click.echo(\"the following language extension is not supported:\"\n + extension)\n sys.exit()\n else:\n\n # retriving the correct bash and batch scripts #######################\n scripts_folder = os.path.join(os.path.dirname(__file__), \"scripts\")\n os_scripts = os.path.join(scripts_folder, get_script_folder())\n lang_folder = lang\n lang_folder = os.path.join(os_scripts, lang_folder)\n compile_script = None\n run_script = None\n for s in os.listdir(lang_folder):\n sname = os.path.splitext(s)[0]\n if(sname == \"compile\"):\n compile_script = os.path.join(lang_folder, s)\n elif(sname == \"run\"):\n run_script = os.path.join(lang_folder, s)\n ######################################################################\n\n # using subprocess.call to maintain backward compactiblity with py2\n # if language is compiled\n # call scripts to compile\n # TODO: then print time\n shell_command = get_shell()\n if(lang not in interpreted):\n executable_name = os.path.splitext(code_file)[0]\n compilecall = [shell_command, compile_script,\n code_file, executable_name]\n click.echo(\"compiling...\", nl=False)\n start = time.time()\n a = subprocess.call(compilecall)\n stop = time.time()\n tdiff = stop-start\n\n # if process return a non zero exit code\n if(a):\n click.echo(style.error(\"\\t COMPILATION ERROR\"), nl=False)\n click.echo(\"\\t compile time: %.4fs\\n\" % tdiff)\n sys.exit()\n # TODO: print status here instead of done\n click.echo(\"\\t Done\", nl=False)\n click.echo(\"\\t compile time: %.4fs\\n\" % tdiff)\n\n else:\n executable_name = code_file\n\n if(live):\n runcall = [shell_command, run_script, executable_name]\n click.echo(\"The program is now running:\")\n a = subprocess.call(runcall)\n click.echo(\n \"\\nTermicoder:\\nthe process exited with status code \" +\n str(a) +\n \"\\none \\\\n was added after your code's output by Termicoder\")\n else:\n # run on testcases\n # call scripts to output to a file ; then print time\n # script takes arguments as in_filename and out_filename\n global diff_strings\n\n testcase_dir = \"testcases\"\n testcase_files = sorted(os.listdir(testcase_dir))\n click.echo(\"running...\")\n for testcase_file in testcase_files:\n filename = os.path.splitext(testcase_file)[0]\n file_extension = os.path.splitext(testcase_file)[1]\n\n if(file_extension == \".in\"):\n status = None\n infile = os.path.join(\"testcases\", testcase_file)\n outfile = os.path.join(\"testcases\", filename+\".outx\")\n stdin = open(infile, \"r\")\n stdout = open(outfile, \"w\")\n runcall = [shell_command, run_script, executable_name]\n\n # TODO: use a better function then time.clock()\n start = time.time()\n try:\n a = subprocess.Popen(\n runcall,\n stdin=stdin, stdout=stdout,\n preexec_fn=os.setsid)\n a.wait(timeout=time_limit)\n except subprocess.TimeoutExpired:\n status = style.error(\"TLE\")\n except BaseException:\n click.echo(\"\")\n status = \"Internal Error\" #some other error!! in this\n return_code=a.poll()\n if(return_code==None): #process not terminated\n status = style.error(\"TLE\")\n os.killpg(os.getpgid(a.pid), signal.SIGTERM) #kills all\n elif(return_code!=0): # runtime error\n status = style.error(\"RTE\")\n\n stop = time.time()\n tdiff = stop-start\n\n if(status is None):\n # check output and append create diff strings\n ansfile = os.path.join(\"testcases\", filename+\".out\")\n if(is_same(ansfile, outfile)):\n status = style.correct(\"AC\")\n else:\n status = style.error(\"WA\")\n diff_strings.append(\"\\nTestcase \"+filename+\"\\n\")\n diff_strings.append(diff(ansfile, outfile))\n\n click.echo(\"testcase \"+filename, nl=False)\n click.echo(\"\\t %s\" % status, nl=False)\n click.echo(\"\\t Time: %.4fs\" % tdiff)\n\n if(len(diff_strings) > 1):\n click.confirm(\n \"There were some WA's\\n\" +\n \"Do you want to view the diff page?\",\n default=True,\n abort=True)\n click.echo_via_pager('\\n'.join(diff_strings))\n\n\ndef edit_scripts():\n click.confirm(\"This will open the scripts folder in file manager\\n\" +\n \"Where you can edit compile and run scripts\\n\"\n \"Do you want to contnue?\", default=True, abort=True)\n scripts_folder = os.path.join(os.path.dirname(__file__), \"scripts\")\n click.launch(scripts_folder)\n sys.exit()\n","sub_path":"termicoder/utils/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":8152,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"282912588","text":"with open('/srv/var/secret_key.txt') as f:\n SECRET_KEY = f.read().strip()\n\nDEBUG = False\nTEMPLATE_DEBUG = False\nALLOWED_HOSTS = ['lubnik.cz','lubnik.pavelsilar.cz', 'test.lubnik.cz', 'lubnik-0217.rostiapp.cz']\nINSTALLED_APPS = (\n 'grappelli',\n 'filebrowser',\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'pages',\n 'board',\n 'news',\n 'gallery',\n 'newsletter',\n 'django_markdown',\n)\n\n# DATABASE Configured by URL\nimport dj_database_url\n\nwith open('/srv/var/database_url.txt') as db:\n DATABASE_URL = db.read().strip()\n\nDATABASES = {'default': dj_database_url.parse(DATABASE_URL)}\n","sub_path":"lubnik/settings/prod.py","file_name":"prod.py","file_ext":"py","file_size_in_byte":749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"557969414","text":"'''\nThis code was largely inspired by this source:\n- Author: Jon Fincher\n- Date: 9/16/2019\n- Type: Python code/tutorial \n- Web address: https://realpython.com/pygame-a-primer/\n'''\n\nimport pygame\nimport random\n\n# Import necessary user/key commands.\nfrom pygame.locals import (\n K_SPACE,\n QUIT,\n K_RETURN,\n)\n\nprint(\"K_SPACE:\",K_SPACE)\n\nscreen_width = 500\nscreen_height = 500\n\npygame.init()\n\nwindow = pygame.display.set_mode((screen_width, screen_height))\n\n# Use these variables for square jumping.\nisJumping = False\ncount = 1\n\n# This class defines the user's character, a red square. \nclass Square(pygame.sprite.Sprite):\n def __init__(self):\n super(Square, self).__init__()\n self.surface = pygame.Surface((50, 50))\n self.surface.fill((255, 0, 0))\n self.rect = self.surface.get_rect(\n center=(50, 475)\n )\n self.jumpVelocity = 7\n\n # Move the square based on user actions (in this case, the only user action\n # that results in user movement is the space key, which causes the square\n # to jump).\n def update(self, actions):\n # Reference the global variables used for jumping.\n global isJumping\n global count\n\n if actions[K_SPACE]: \n isJumping = True\n\n if isJumping: \n # Ascend until the peak of the jump is reached (based on a vertical velocity\n # of 7 pixels/count, and an acceleration of \"gravity\" of 1 pixel/count).\n if count <= 7: \n self.rect.y -= int((0.5 * 1 * count ** 2) + (self.jumpVelocity * count))\n count += 1\n\n # Keep the square on the screen.\n if self.rect.bottom >= screen_height:\n isJumping = False\n count = 1\n \n # Descend until the square hits the bottom of the screen.\n if count > 7:\n self.rect.y += int((0.5 * 1 * count ** 2) + (self.jumpVelocity * count))\n count += 1 \n\n # Keep the player on the screen.\n if self.rect.bottom >= screen_height:\n self.rect.bottom = screen_height\n\n# This class defines an \"enemy\" in the game, which are small yellow squares that \n# our red square must avoid touching. \nclass Enemy(pygame.sprite.Sprite):\n def __init__(self): \n super(Enemy, self).__init__()\n self.surface = pygame.Surface((10, 10))\n self.surface.fill((255, 255, 0))\n self.rect = self.surface.get_rect(\n center=(\n random.randint(screen_width + 50, screen_width + 100),\n random.randint(425, screen_height)\n )\n )\n\n self.speed = 25\n\n # Continue to move each enemy left. \n def update(self):\n self.rect.move_ip(-self.speed, 0)\n if self.rect.right < 0: \n self.kill()\n\n# Function for the start/death messages on screen.\ndef startOrDeathScreen(screenRunning, death): \n font = pygame.font.Font(None, 30)\n\n # If your character \"died\", then prompt the user to try again.\n if death: \n text = font.render(\"Press the Enter key to try again.\", True, (255, 255, 255))\n\n # If loading up the game for the first time, then prompt the user to start.\n text = font.render(\"Press the Enter key to start.\", True, (255, 255, 255))\n\n text_container = text.get_rect(\n center=(\n screen_width/2, screen_height/2\n )\n )\n\n # Add the message to the screen.\n window.blit(text, text_container)\n\n while screenRunning: \n for event in pygame.event.get(): \n print(event.type)\n if event.type == QUIT:\n pygame.quit()\n\n actions = pygame.key.get_pressed()\n\n # If the user presses enter, then exit this screen playing the game. \n if actions[K_RETURN]: \n screenRunning = False\n\n # Update the screen.\n pygame.display.flip()\n\n # Start playing the game.\n gameRunning(True) \n\n# Function for while the game is running.\ndef gameRunning(windowOpen):\n # Create unique user event for adding\n # a new enemy every 800 milliseconds. \n ADDENEMY = pygame.USEREVENT + 1\n pygame.time.set_timer(ADDENEMY, 800)\n\n # Create the user's square character.\n square = Square()\n\n # Add the square and all enemies to \n # the game's group of Sprites. \n enemies = pygame.sprite.Group()\n allSprites = pygame.sprite.Group()\n allSprites.add(square)\n\n while windowOpen: \n # Slight time delay allows for more smoothness with movements.\n pygame.time.delay(50)\n\n # Check if the user wants to quit the game, or if a new enemy \n # should be added to the screen.\n for event in pygame.event.get(): \n if event.type == QUIT:\n pygame.quit()\n\n elif event.type == ADDENEMY:\n enemy = Enemy()\n enemies.add(enemy)\n allSprites.add(enemy)\n\n # See if the user wants the square to jump.\n actions = pygame.key.get_pressed()\n square.update(actions)\n\n # Keep moving enemies to the left.\n enemies.update()\n\n # Fill the window with a black background.\n window.fill((0, 0, 0))\n\n # Put all Sprites (the user's square and any enemies)\n # on the screen.\n for sprite in allSprites:\n window.blit(sprite.surface, sprite.rect)\n\n # If the user's square collides with an enemy, \n # end the game. \n if pygame.sprite.spritecollideany(square, enemies): \n square.kill()\n windowOpen = False\n\n # Update the screen.\n pygame.display.flip()\n\n startOrDeathScreen(True, True)\n\n# Begin with the start screen. \nstartOrDeathScreen(True, False)\n","sub_path":"squareJumpGame.py","file_name":"squareJumpGame.py","file_ext":"py","file_size_in_byte":5750,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"543559726","text":"import cfnresponse\nimport boto3\nimport json\n\n\ndef lowest_open_priority(event, context):\n\n try:\n cfn_request_type = event[u'RequestType']\n print(\"Handling CFN request of type {}\".format(cfn_request_type))\n cfn = True\n except KeyError:\n print(\"Handling non-CFN request\")\n cfn = False\n\n # Check for delete of lambda backed custom resource by CFN, because that is a valid reason for the absence of a\n # Listener-Arn in the body\n if cfn and cfn_request_type == u'Delete':\n print(\"Responding OK to CFN delete request. And exiting...\")\n cfnresponse.send(event, context, cfnresponse.SUCCESS)\n return\n\n try:\n if cfn:\n listener_arn = event[u'ResourceProperties'][u'Listener-Arn']\n else:\n request_body = json.loads(event[u'body'].encode('utf-8'))\n listener_arn = request_body['ResourceProperties']['Listener-Arn']\n print(\"Listener-Arn retrieved: '{}'\".format(listener_arn))\n except KeyError:\n print(\"No 'Listener-Arn' was supplied. Returning 400\")\n return {\n \"statusCode\": 400,\n \"body\": json.dumps(\"Request parameter 'Listener-Arn' needs to be supplied\")\n }\n\n elb = boto3.client('elbv2')\n\n def retrieve_rule_pages():\n resp = elb.describe_rules(\n ListenerArn=listener_arn,\n PageSize=10,\n )\n yield resp\n try:\n while 'NextMarker' in resp:\n resp = elb.describe_rules(\n ListenerArn=listener_arn,\n PageSize=10,\n Marker=resp['NextMarker']\n )\n yield resp\n except KeyError:\n # last page does not contain a 'NextMarker', even though\n # http://boto3.readthedocs.io/en/latest/reference/services/elbv2.html specifies an empty string\n pass\n\n def retrieve_rule_priorities():\n for page in retrieve_rule_pages():\n print(\"Page request: {}\".format(page))\n for rule in page['Rules']:\n print(\"Found rule with priority {}\".format(rule['Priority']))\n if rule['Priority'] != 'default':\n yield int(rule['Priority'])\n\n lowest_priority = 1\n while lowest_priority in retrieve_rule_priorities():\n lowest_priority = lowest_priority + 1\n\n response_data = {\n 'lowestOpenPriority': lowest_priority,\n }\n\n print(\"Responding with data {}\".format(response_data))\n\n if cfn:\n cfnresponse.send(event, context, cfnresponse.SUCCESS, response_data=response_data)\n else:\n return {\n \"statusCode\": 200,\n \"body\": json.dumps(response_data),\n }\n","sub_path":"cloudformation/elb-info/handler.py","file_name":"handler.py","file_ext":"py","file_size_in_byte":2727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"439249068","text":"from app import app\nfrom flask import render_template, redirect, url_for, flash, request\n\nfrom app.forms import MyLoginForm\nfrom app.forms import MyRegistrationForm\nfrom app.forms import ShortMessageForm\n\nfrom app import db\n\nfrom werkzeug.urls import url_parse\nfrom app.models import User, Short_messages\nfrom flask_login import login_required, login_user, logout_user, current_user\n\nfrom flask_login import LoginManager, login_required, login_user, logout_user, current_user\n\n@app.route('/')\n@login_required\ndef main():\n return render_template('index.html')\n \n@app.route('/login', methods=['get', 'post'])\ndef login():\n if current_user.is_authenticated:\n return redirect(url_for('/'))\n \n form = MyLoginForm()\n if form.validate_on_submit(): # check if it is a POST request and if it is valid.\n user = User.query.filter_by(username=form.username.data).first()\n if user is None:\n flash('You are not registered yet', 'info')\n return redirect(url_for('login'))\n elif not user.check_password(form.password.data):\n flash('Invalid username or password', 'info')\n return redirect(url_for('login'))\n login_user(user)\n next_page = request.args.get('next')\n if not next_page or url_parse(next_page).netloc != '':\n next_page = url_for('main')\n return redirect(next_page)\n else:\n return render_template('my_login_form.html', form = form)\n\n \n@app.route('/register', methods=['GET', 'POST'])\ndef register():\n if current_user.is_authenticated:\n return redirect(url_for('main'))\n form = MyRegistrationForm()\n if form.validate_on_submit():\n user = User(username=form.username.data, email=form.email.data)\n user.set_password(form.password.data)\n db.session.add(user)\n db.session.commit()\n flash('Congratulations, you are now a registered user!', 'info')\n return redirect(url_for('login'))\n return render_template('my_registration_form.html', title='Register', form=form)\n\n\n@app.route('/drop_message', methods=['GET', 'POST'])\n@login_required\ndef drop_message():\n form = ShortMessageForm()\n if form.validate_on_submit():\n short_text = Short_messages ( text=form.text.data, author = current_user )\n db.session.add(short_text)\n db.session.commit()\n flash('Congratulations, your message has been successfully registered !!! ', 'info')\n return redirect(url_for('main'))\n else:\n return render_template('drop_message.html', form = form)\n\n \n@app.route('/show_messages')\n@login_required\ndef show_messages():\n text_list = Short_messages.query.filter_by( users_id=current_user.get_id() ).all()\n return render_template('show_short_messages.html', text_list = text_list )\n\n\n@app.errorhandler(404)\ndef page_not_found(e):\n return render_template('404.html'), 404\n\n@app.route('/logout')\n@login_required\ndef logout():\n logout_user()\n return redirect(url_for('main'))\n\n@app.route('/page_a')\n@login_required\ndef page_a():\n return render_template('page_a.html')\n\n@app.route('/page_b')\n@login_required\ndef page_b():\n return render_template('page_b.html')\n\n","sub_path":"ExoSlides/Exo4 - Chap6/app/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":3196,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"135229690","text":"import os.path\nimport fake_useragent\n\n\n_DEFAULT_RSSANT_USER_AGENT = (\n 'Mozilla/5.0 (X11; Linux x86_64) '\n 'AppleWebKit/537.36 (KHTML, like Gecko) '\n 'Chrome/67.0.3396.87 Safari/537.36 RSSAnt/1.0'\n)\n\n\n_DEFAULT_USER_AGENT = (\n 'Mozilla/5.0 (Linux; Android 8.0.0; TA-1053 Build/OPR1.170623.026) '\n 'AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3368.0 Mobile Safari/537.36'\n)\n\n\n_dir = os.path.dirname(__file__)\n_filename = 'fake_useragent_{}.json'.format(fake_useragent.VERSION)\n_useragent_path = os.path.join(_dir, _filename)\n\nuseragent = fake_useragent.UserAgent(\n path=_useragent_path, fallback=_DEFAULT_USER_AGENT)\n\n\ndef DEFAULT_USER_AGENT():\n return str(useragent.random)\n","sub_path":"rssant_feedlib/useragent/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"5604932","text":"import services.controlers.loggControler\nimport services.querys.reportQuery\nimport services.querys.countryQuery\nimport services.querys.warehouseQuery\nimport services.querys.maintenanceQuery\nimport services.querys.kindOfMaintenanceQuery\nimport services.querys.resourceQuery\nimport services.querys.resourceCategoryQuery\nimport services.querys.consumptionKindQuery\nimport services.querys.supplierQuery\nimport time\nimport datetime\nfrom datetime import datetime\nfrom services.exceptions import *\nimport os\nimport hashlib\nimport re\nimport cgi\nimport urllib\n\nclass ReportsControler:\n\n\tdef getUsersReport(self, initialDate, finalDate, companyId):\n\t\treportQuery = services.querys.reportQuery.ReportQuery()\n\t\tcountryQuery = services.querys.countryQuery.CountryQuery()\n\t\tusersList=[]\n\t\tinitialDate = datetime.strptime(initialDate, \"%Y-%m-%d %H:%M:%S.%f\")\n\t\tfinalDate = datetime.strptime(finalDate, \"%Y-%m-%d %H:%M:%S.%f\")\n\t\ttry:\n\t\t\tcontrol = \"\"\n\t\t\tif str(companyId) == str(\"-1\"):\n\t\t\t\tdata = reportQuery.getUsersReport(initialDate, finalDate)\n\t\t\telse:\n\t\t\t\tdata = reportQuery.getUsersReportForCompany(initialDate, finalDate, companyId)\n\t\t\tfor userObject in data:\n\t\t\t\tuser={}\n\t\t\t\tid = str(userObject.key().id())\n\t\t\t\tuser[\"id\"]=id\n\t\t\t\tuser[\"autoId\"]=userObject.autoId\n\t\t\t\tuser[\"userName\"]=userObject.userName\n\t\t\t\tuser[\"firstName\"]=userObject.firstName\n\t\t\t\tuser[\"lastName\"]=userObject.lastName\n\t\t\t\tuser[\"email\"]=userObject.email\n\t\t\t\tuser[\"isActive\"]=str(userObject.isActive)\n\t\t\t\tcountry = countryQuery.getCountryById(userObject.countryId)\n\t\t\t\tuser[\"country\"] = country.name\n\t\t\t\tif userObject.creationDate <= finalDate and userObject.creationDate >= initialDate:\n\t\t\t\t\tusersList.append(user)\n\t\texcept Exception as e:\n\t\t\tloggControler = services.controlers.loggControler.LoggControler()\n\t\t\tloggControler.addLogg('Critical', ERROR_NO_DEFINIDO, e.message)\n\t\treturn usersList\n\n\tdef getWarehousesReport(self, initialDate, finalDate):\n\t\twarehouseQuery = services.querys.warehouseQuery.WarehouseQuery()\n\t\twarehousesList=[]\n\t\tinitialDate = datetime.strptime(initialDate, \"%Y-%m-%d %H:%M:%S.%f\")\n\t\tfinalDate = datetime.strptime(finalDate, \"%Y-%m-%d %H:%M:%S.%f\")\t\n\t\ttry:\n\t\t\tdata = warehouseQuery.getWarehouses()\n\t\t\tfor warehouseObject in data:\n\t\t\t\twarehouse={}\n\t\t\t\tid = str(warehouseObject.key().id())\n\t\t\t\twarehouse[\"id\"]=id\n\t\t\t\twarehouse[\"autoId\"]=warehouseObject.autoId\n\t\t\t\twarehouse[\"name\"]=warehouseObject.name\n\t\t\t\twarehouse[\"code\"]=warehouseObject.code\n\t\t\t\twarehouse[\"inCharge\"]=warehouseObject.inCharge\n\t\t\t\twarehouse[\"responsability\"]=warehouseObject.responsability\n\t\t\t\twarehouse[\"isActive\"]=str(warehouseObject.isActive)\n\t\t\t\tif warehouseObject.creationDate <= finalDate and warehouseObject.creationDate >= initialDate:\n\t\t\t\t\twarehousesList.append(warehouse)\n\t\texcept Exception as e:\n\t\t\tloggControler = services.controlers.loggControler.LoggControler()\n\t\t\tloggControler.addLogg('Critical', ERROR_NO_DEFINIDO, e.message)\n\t\treturn warehousesList\n\n\tdef getMaintenancesReport(self, initialDate, finalDate ,elementId, tipeOfMaintenance):\n\t\tmaintenanceQuery = services.querys.maintenanceQuery.MaintenanceQuery()\n\t\tkindOfMaintenanceQuery = services.querys.kindOfMaintenanceQuery.KindOfMaintenanceQuery()\n\t\tmaintenanceList=[]\n\t\tinitialDate = datetime.strptime(initialDate, \"%Y-%m-%d %H:%M:%S.%f\")\n\t\tfinalDate = datetime.strptime(finalDate, \"%Y-%m-%d %H:%M:%S.%f\")\t\t\n\t\ttry:\n\t\t\tif str(elementId) == str(\"-1\"):\n\t\t\t\tif str(tipeOfMaintenance) == str(\"-1\"):\n\t\t\t\t\tdata = maintenanceQuery.getMaintenancesReport() #No llega nada\n\t\t\t\telse:\n\t\t\t\t\tdata = maintenanceQuery.getMaintenancesReportByTipeOfMaintenance(tipeOfMaintenance) # llega solo tipo de mantenimiento\n\t\t\telse:\n\t\t\t\tif str(tipeOfMaintenance) == str(\"-1\"):\n\t\t\t\t\tdata = maintenanceQuery.getMaintenancesReportByElementId(elementId) #solo llega id del elemento\n\t\t\t\telse:\n\t\t\t\t\tdata = maintenanceQuery.getMaintenancesReportByTipeOfMaintenanceAndElementId(tipeOfMaintenance, elementId) #llega todo\n\t\t\tfor maintenanceObject in data:\n\t\t\t\tmaintenance={}\n\t\t\t\tid = str(maintenanceObject.key().id())\n\t\t\t\tmaintenance[\"id\"]=id\n\t\t\t\tmaintenance[\"autoId\"]=maintenanceObject.autoId\n\t\t\t\tmaintenance[\"elementId\"]= maintenanceObject.elementId\n\t\t\t\telementObject = maintenanceQuery.getResourceById(int(maintenanceObject.elementId))\n\t\t\t\tmaintenance[\"code\"]=elementObject.code\n\t\t\t\tmaintenance[\"name\"]=elementObject.name\n\t\t\t\tmaintenance[\"ref\"]=elementObject.ref\n\t\t\t\ttypeOfMaintenanceObject = kindOfMaintenanceQuery.getKindOfMaintenanceById(int(maintenanceObject.tipeOfMaintenance))\n\t\t\t\tmaintenance[\"typeOfMaintenance\"] = typeOfMaintenanceObject.name\n\t\t\t\tmaintenance[\"lastMaintenanceDate\"]=maintenanceObject.lastMaintenanceDate\n\t\t\t\tmaintenance[\"responsable\"]=maintenanceObject.responsable\n\t\t\t\tmaintenance[\"Observations\"]=maintenanceObject.Observations\t\t\t\t\n\t\t\t\tif maintenanceObject.creationDate <= finalDate and maintenanceObject.creationDate >= initialDate:\n\t\t\t\t\tmaintenanceList.append(maintenance)\n\t\texcept Exception as e:\n\t\t\tloggControler = services.controlers.loggControler.LoggControler()\n\t\t\tloggControler.addLogg('Critical', ERROR_NO_DEFINIDO, e.message)\n\t\treturn maintenanceList\n\n\tdef getResourcesReport(self, fromDate, toDate, category):\n\t\tresourceQuery = services.querys.resourceQuery.ResourceQuery()\n\t\tresourceCategoryQuery = services.querys.resourceCategoryQuery.ResourceCategoryQuery()\n\t\tconsumptionKindQuery = services.querys.consumptionKindQuery.ConsumptionKindQuery()\n\t\tsupplierQuery = services.querys.supplierQuery.SupplierQuery()\n\t\tinitialDate = datetime.strptime(fromDate, \"%Y-%m-%d %H:%M:%S.%f\")\n\t\tfinalDate = datetime.strptime(toDate, \"%Y-%m-%d %H:%M:%S.%f\")\t\n\t\tresourceList=[]\n\t\ttry:\n\t\t\tif str(category)==str(\"-1\"):\n\t\t\t\tdata = resourceQuery.getResourcesReport(fromDate, toDate)\n\t\t\telse:\n\t\t\t \tdata = reportQuery.getResourcesReportByCategory(fromDate, toDate, category)\n\t\t\t\n\t\t\tfor resourceObject in data:\n\t\t\t\tresource={}\n\t\t\t\tid = str(resourceObject.key().id())\n\t\t\t\tresource[\"id\"]=id\n\t\t\t\tresource[\"autoId\"]=resourceObject.autoId\n\t\t\t\tresource[\"code\"]=resourceObject.code\n\t\t\t\tresource[\"ref\"]=resourceObject.ref\n\t\t\t\tresource[\"name\"]=resourceObject.name\n\t\t\t\tcategoryObject = resourceCategoryQuery.getResourceCategoryById(resourceObject.category)\n\t\t\t\tresource[\"category\"]=categoryObject.name\n\t\t\t\tconsumeTypeObject = consumptionKindQuery.getConsumptionKindById(resourceObject.consumeType)\n\t\t\t\tresource[\"consumeType\"]=consumeTypeObject.name\n\t\t\t\tproviderObject = supplierQuery.getSupplierById(resourceObject.provider)\n\t\t\t\tresource[\"provider\"]=providerObject.name\n\t\t\t\tresource[\"amount\"]=resourceObject.amount\n\t\t\t\tresource[\"isActive\"]=resourceObject.isActive\n\t\t\t\tif resourceObject.creationDate <= finalDate and resourceObject.creationDate >= initialDate:\n\t\t\t\t\tresourceList.append(resource)\n\t\texcept Exception as e:\n\t\t\tloggControler = services.controlers.loggControler.LoggControler()\n\t\t\tloggControler.addLogg('Critical', ERROR_NO_DEFINIDO, e.message)\n\t\treturn resourceList\n\n\t","sub_path":"services/controlers/reportsControler.py","file_name":"reportsControler.py","file_ext":"py","file_size_in_byte":6875,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"73133423","text":"from entities.count_vehicle_response import CountedVehiclesByArea\nfrom services.core_services.tracking_service import TrackingService, init_tracker\nfrom services.core_services.vehicle_recognition_image_service import VehicleRecognitionImageService\nimport numpy as np\nimport cv2\n\nfrom utils.deep_sort import preprocessing\nfrom utils import error_code\nfrom utils.application_properties import get_config_variable\nfrom utils.customized_exception import BadRequestException\nfrom utils.deep_sort.detection import Detection\nfrom entities.common_entity import Area, LicensePlate, Box\nfrom utils.draw_results_to_frame import draw_recognized_license_plate_frame\n\n\ndef get_image_from_box(bounding_box: Box, frame):\n frame_height, frame_width, _ = frame.shape\n x_min = max(0, int(bounding_box.x1))\n y_min = max(0, int(bounding_box.y1))\n x_max = min(frame_width, int(bounding_box.x2))\n y_max = min(frame_height, int(bounding_box.y2))\n return frame[y_min: y_max, x_min: x_max]\n\n\ndef count_vehicles_from_tracks(counted_vehicles_by_area, tracker):\n for track in tracker.tracks:\n if not track.is_confirmed() or track.time_since_update > 0:\n continue\n counted_vehicles_by_area.update_current_bounding_box(track.cls, track.bounding_box)\n if not track.is_counted:\n counted_vehicles_by_area.increase_vehicle(track.cls)\n track.is_counted = True\n\n\ndef validate_area(areas, frame_height, frame_width):\n for i, area in enumerate(areas):\n x_area_min, y_area_min, x_area_max, y_area_max = area.detection_area.get_left_top_right_bottom()\n if not (0 <= x_area_min < x_area_max and x_area_min < x_area_max < frame_width\n and 0 <= y_area_min < y_area_max and y_area_min < y_area_max < frame_height):\n raise BadRequestException(error_code.INVALID_AREA, \"areas\", x_area_min, y_area_min,\n area.detection_area.width, area.detection_area.height)\n\n\ndef init_vehicle_results_dict_by_area(areas):\n result_dict = {}\n for area in areas:\n result_dict[area.area_id] = []\n return result_dict\n\n\ndef parse_point_plate_to_tracker_input(point_plate_list, area):\n if point_plate_list[0] is None:\n tracker_input = np.empty((0, 5))\n else:\n tracker_input = np.array(point_plate_list) \\\n + np.array([area.detection_area.x1, area.detection_area.y1,\n area.detection_area.x1, area.detection_area.y1, 0])\n return tracker_input\n\n\nclass VehicleRecognitionVideoService:\n def __init__(self, vehicle_recognition_image_service: VehicleRecognitionImageService, tracking_service: TrackingService):\n self.vehicle_recognition_image_service = vehicle_recognition_image_service\n self.tracking_service = tracking_service\n self.nms_max_overlap = 1.0\n\n def __detect_and_track_vehicles(self, frame, tracker):\n bounding_boxes, confidences, label = self.vehicle_recognition_image_service.detect_vehicle(frame.copy())\n feature_vectors = self.tracking_service.extract_tracking_feature(frame, bounding_boxes)\n detections = [Detection(bbox, confidence, cls, feature) for bbox, confidence, cls, feature in\n zip(bounding_boxes, confidences, label, feature_vectors)]\n boxes = np.array([d.tlwh for d in detections])\n scores = np.array([d.confidence for d in detections])\n indices = preprocessing.non_max_suppression(boxes, self.nms_max_overlap, scores)\n detections = [detections[i] for i in indices]\n\n tracker.predict()\n tracker.update(detections)\n\n def recognize_license_plate(self, frame_generator, areas: [Area]):\n trackers = []\n recognized_license_plates = []\n is_validated_area = False\n for i, area in enumerate(areas):\n recognized_license_plates.append(CountedVehiclesByArea(area.area_id))\n trackers.append(init_tracker(\"sort\"))\n for frame, timestamp in frame_generator:\n license_plate_results = []\n frame_height, frame_width, _ = frame.shape\n\n for i, area in enumerate(areas):\n # validate area config\n if not is_validated_area:\n validate_area(areas, frame_height, frame_width)\n is_validated_area = True\n\n image = get_image_from_box(area.detection_area, frame)\n point_plate_list, license_plate_images = self.vehicle_recognition_image_service\\\n .detect_one_license_plate_per_image([image])\n tracker_input = parse_point_plate_to_tracker_input(point_plate_list, area)\n trackers[i].update(tracker_input)\n # expect only one license plate per area\n if license_plate_images[0] is not None and trackers[i].trackers:\n track = trackers[i].trackers[0]\n self.__update_license_plate_info_of_track(track, license_plate_images)\n bounding_box = Box(x=int(tracker_input[0, 0]), y=int(tracker_input[0, 1]),\n width=int(tracker_input[0, 2] - tracker_input[0, 0]),\n height=int(tracker_input[0, 3] - tracker_input[0, 1]))\n license_plate_results.append(LicensePlate(track_id=track.id,\n area_id=area.area_id,\n license_plate=track.license_plate,\n confidence=track.confidence,\n bounding_box=bounding_box,\n license_plate_image=track.license_plate_image))\n yield license_plate_results, timestamp, frame\n if get_config_variable(\"debug_mode\"):\n show_frame = draw_recognized_license_plate_frame(frame, areas, license_plate_results)\n cv2.imshow(\"debug\", show_frame)\n cv2.waitKey(1)\n\n def __update_license_plate_info_of_track(self, track, license_plate_images):\n if track.confidence < 95:\n labels, probs = self.vehicle_recognition_image_service \\\n .extract_text_from_license_plates(license_plate_images)\n if track.confidence <= probs[0]:\n track.license_plate = labels[0]\n track.license_plate_image = cv2.imencode('.jpg', license_plate_images[0])[1].tostring()\n track.confidence = probs[0]\n\n def detect_vehicle(self, frame_generator):\n tracker = init_tracker(\"deep_sort\")\n # used to record the time when we processed last frame\n for frame, timestamp in frame_generator:\n frame_height, frame_width, _ = frame.shape\n self.__detect_and_track_vehicles(frame, tracker)\n for track in tracker.tracks:\n if not track.is_confirmed() or track.time_since_update > 0:\n continue\n track.vehicle_image = get_image_from_box(track.bounding_box, frame)\n yield tracker.tracks, timestamp, frame\n\n def recognize_vehicle_and_license_plate(self, frame_generator):\n recognized_vehicles_generator = self.detect_vehicle(frame_generator)\n for tracks, timestamp, frame in recognized_vehicles_generator:\n for track in tracks:\n if not track.is_confirmed() or track.time_since_update > 0:\n continue\n if track.vehicle_image is not None and track.license_plate_text == '':\n track.license_plate_text = self.vehicle_recognition_image_service\\\n .extract_license_plate_from_vehicle_image(track.vehicle_image)\n yield tracks, timestamp, frame\n","sub_path":"services/core_services/vehicle_recognition_video_service.py","file_name":"vehicle_recognition_video_service.py","file_ext":"py","file_size_in_byte":7891,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"112007209","text":"# random pivot implementation\nfrom random import randint\ndef quick_sort( array ):\n\n\tif len( array ) <= 1:\n\t\treturn array\n\trand = randint(0, len( array ) - 1)\n\tmid = array[rand]\n\tleft, right = [], []\n\tfor item in array[:rand] + array[rand+1:]:\n\t\tif item < mid:\n\t\t\tleft = left + [item]\n\t\telse:\n\t\t\tright = right + [item]\n\n\treturn quick_sort( left ) + [mid] + quick_sort( right )","sub_path":"sorts/quick_sort.py","file_name":"quick_sort.py","file_ext":"py","file_size_in_byte":375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"220646169","text":"\"\"\"\nSink plot (noon and midnight), of a 1d soil \n\"\"\"\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nname = \"singleroot\"\nstr_ = [\"sra\", \"ups\"]\n# str_ = [\"cyl\", \"sra\", \"agg\"]\n# str_ = [\"cyl\", \"sra\", \"agg\", \"ups\"]\n\n# name = \"rootsystem1d\"\n# str_ = [\"sra\", \"ups\"]\n\nfnames = np.array([\"sink_\" + name + \"_\" + s for s in str_ ])\n\ncmap = plt.get_cmap('Set1')\ncol = cmap([1, 0, 4, 3, 2, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) # adjust colors to jans plot\n\ndays = 21\nl = 150 # cm soil depth\ndx = 1 # cm resolution\nylim = 99.5\n\ncell_volume = 4 # cm3\nplot_times = [1., 5, 10, 15, 20]\npath = \"results/\"\n\nSMALL_SIZE = 16\nMEDIUM_SIZE = 16\nBIGGER_SIZE = 16\nplt.rc('font', size = SMALL_SIZE) # controls default text sizes\nplt.rc('axes', titlesize = SMALL_SIZE) # fontsize of the axes title\nplt.rc('axes', labelsize = MEDIUM_SIZE) # fontsize of the x and y labels\nplt.rc('xtick', labelsize = SMALL_SIZE) # fontsize of the tick labels\nplt.rc('ytick', labelsize = SMALL_SIZE) # fontsize of the tick labels\nplt.rc('legend', fontsize = SMALL_SIZE) # legend fontsize\nplt.rc('figure', titlesize = BIGGER_SIZE) # fontsize of the figure title\nprop_cycle = plt.rcParams['axes.prop_cycle']\ncolors = prop_cycle.by_key()['color']\n\n\"\"\" load data \"\"\"\nn = len(fnames)\ndata = [np.load(path + n_ + \".npy\") for n_ in fnames]\n\n\"\"\" sink plot \"\"\"\nfig, ax = plt.subplots(1, 2, figsize = (18, 10))\nax[0].set_ylabel(\"depth [cm]\")\nax[0].set_xlabel(\"sink term at noon [1/day]\")\nax[1].set_xlabel(\"sink term at night [1/day]\")\nax[0].plot([0, 0], [-l, 0.], \"k:\")\nax[1].plot([0, 0], [-l, 0.], \"k:\")\nls = [\"-\", \"--\", \"-.\", \":\"]\n\n\"\"\" noon \"\"\"\nfor i in range(0, n):\n\n sink_ = data[i]\n soil_z_ = np.linspace(-l + dx / 2., -dx / 2., sink_.shape[1]) # segment mids\n\n peak_id = np.round(sink_.shape[0] / days * np.array([0.5 + i for i in plot_times]))\n peak_id = peak_id.astype(int)\n\n for ind, j in enumerate(peak_id):\n lstr = \"{:g}d ({:s})\".format(plot_times[ind], str_[i])\n ax[0].plot(sink_[j,:] / cell_volume, soil_z_, label = lstr, color = col[ind], linestyle = ls[i])\n\n ax[0].set_ylim([-ylim, 0.])\n ax[0].legend()\n\n\"\"\" midnight \"\"\"\nfor i in range(0, n):\n\n sink_ = data[i]\n soil_z_ = np.linspace(-l + dx / 2., -dx / 2., sink_.shape[1]) # segment mids\n\n redistribution_id = np.round(sink_.shape[0] / days * np.array([i for i in plot_times]))\n redistribution_id = redistribution_id.astype(int)\n\n for ind, j in enumerate(redistribution_id):\n lstr = \"{:g}d ({:s})\".format(plot_times[ind], str_[i])\n ax[1].plot(sink_[j,:] / cell_volume, soil_z_, label = lstr, color = col[ind], linestyle = ls[i])\n\n ax[1].set_ylim([-ylim, 0.])\n ax[1].legend()\n\nplt.tight_layout()\nplt.show()\n\n","sub_path":"python/_upscaling/plot_sink1d.py","file_name":"plot_sink1d.py","file_ext":"py","file_size_in_byte":2706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"344041718","text":"import pandas as pd\nimport csv\n\n# 기본 읽기\n# df=pd.read_csv('D:/atom_py/section4/csv_s1.csv')\n# print(df)\n\ndf2=pd.read_csv('D:/atom_py/section4/csv_s2.csv',sep=';',skiprows=[0],header=None,names=[\"First name\",\"Test1\",\"Test2\",\"Test3\",\"Final\",\"Grade\"]) #skiprows 원래는 줄을 생략하고 읽겠다\n# print(df)\n\n# 원본 columns 변경\n# print(df2['Grade'])\ndf2['Grade']=df2['Grade'].str.replace('\"','')\n# print(df2)\n\n# 평균 컬럼 추가\ndf2['AVG']=df2[['Test1','Test2','Test3']].mean(axis=1)\n# print(df2)\n\n# 합계 컬럼 추가\ndf2['SUM']=df2[['Test1','Test2','Test3']].sum(axis=1)\nprint(df2)\n","sub_path":"section4/4-5-1.py","file_name":"4-5-1.py","file_ext":"py","file_size_in_byte":605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"632573493","text":"import time\nimport picamera\npicFine = False\nwhile picFine != True:\n with picamera.PiCamera() as camera:\n camera.start_preview()\n time.sleep(2)\n camera.capture\n camera.start_preview\n print('Captured %s' % filename)\n picFineInput = input(\"Is this Picture ok?\").lower()\n if picFineInput == yes or picFineInput == y:\n picFine = True\n else:\n picFine = False\n","sub_path":"PiCam/piCamTimelapse.py","file_name":"piCamTimelapse.py","file_ext":"py","file_size_in_byte":414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"497882833","text":"\ndef object_table_printer(classname = \"BaseOptimizer\", add_attr = dict()): \n head = \"_\" * 81 + \"\\n|%-80s|\\n|\" % (f\"{classname} : \") + \"_\" * 80 + \"|\\n\"\n content = \"\"\n footer = \"|\" + \"_\" * 80 + \"|\"\n\n for k, v in add_attr.items():\n if k in [\"-\"] and v:\n content += \"|%-15s : %-62s|\\n\" % (k * 15, k * 62)\n continue\n content += \"|%-15s : %-62s|\\n\" % (k, str(v))\n return head + content + footer","sub_path":"ravana/printer.py","file_name":"printer.py","file_ext":"py","file_size_in_byte":492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"462606801","text":"import threading\nimport multiprocessing\nfrom testcase.api.login.login_all_api import LoginApi\nfrom utils.config import NewConfig\nfrom testcase.api.studyCenter.getServiceInfo_step1 import GetServiceInfo\nfrom testcase.api.studyCenter.getTaskInfo_step2 import GetTaskInfo\nfrom testcase.api.studyCenter.startLearning_step3 import StartLearning\nfrom testcase.api.studyCenter.getTaskInfo_step4 import GetTaskInfo2\n\nfrom testcase.api.studyCenter.words_lists.all_cihui import AllCihuiInterface\nfrom testcase.api.studyCenter.sysListening.all_listening_interface import AllListenInterface\nfrom testcase.api.studyCenter .reading.all_reading_interface import AllReadInterface\nfrom testcase.api.studyCenter.grammar.all_gra_interface import AllGraInterface\nfrom testcase.api.studyCenter.writing.all_wrt_interface import AllWrtInterface\n\n\ndef finish_rid(task, common, headers, access_token):\n practiceType = task.get(\"practiceType\")\n currStatus = task.get(\"currStatus\")\n taskID = task.get(\"taskID\")\n groupID = task.get(\"groupID\")\n if currStatus == 0:\n if practiceType == 13:\n # print(taskID, groupID)\n sa = AllReadInterface(common, headers, access_token)\n all_answer = sa.get_all_sen_analysis_answer(groupID, taskID)\n # print(\"句子分析\", all_answer)\n if len(all_answer) != 0:\n # print(\"a\", all_answer)\n for a in all_answer:\n # print(\"AAAA\", a)\n sa.post_all_sen_analysis_answer(taskID, a)\n sa_words = sa.get_sa_words(groupID, taskID, practiceType)\n for star in sa_words:\n r = sa.put_sa_words(star)\n # print(r)\n sa_done_data = sa.return_sa_done_data(groupID, practiceType)\n sa.put_sa_done(sa_done_data, taskID)\n if practiceType == 14:\n st = AllReadInterface(common, headers, access_token)\n all_answer = st.get_all_sec_train_answer(groupID, taskID)\n # print(\"all_answer\", all_answer)\n for a in all_answer:\n if \"newF\" in list(a.keys()):\n stars_3 = st.get_sec_train_words(groupID, taskID, practiceType)\n for star in stars_3:\n ne_re = st.put_sec_train_words(star)\n st_data = st.get_sec_train_word_done_data(groupID, taskID, practiceType)\n re = st.put_sec_train_words_done(taskID, st_data)\n else:\n st.post_all_sec_train_answer(taskID, a)\n if practiceType == 15:\n at = AllReadInterface(common, headers, access_token)\n all_answer = at.get_all_art_train_answer(groupID, taskID)\n # print(\"文章训练\", all_answer)\n for a in all_answer:\n print(\"A==============\", a, a.get('stepType'))\n # if \"newF\" not in list(a.keys()):\n # while True:\n if a.get(\"stepType\") == 3:\n at.post_all_art_train_answer(taskID, a)\n # break\n if a.get(\"stepType\") == None:\n if \"newF\" in list(a.keys()):\n stars_3 = at.get_article_train_words(groupID, taskID, practiceType)\n for star in stars_3:\n at.put_article_train_words(star)\n at_data = at.get_articleTrain_word_done_data(groupID, taskID, practiceType)\n at.put_article_train_done(taskID, at_data)\n # break\n if a.get(\"stepType\") == 2:\n at.post_all_art_train_answer(taskID, a)\n # break\n if a.get(\"stepType\") == 4:\n at.post_all_art_train_answer(taskID, a)\n # break\n\n if practiceType == 16:\n clozeTest = AllReadInterface(common, headers, access_token)\n all_answer = clozeTest.get_all_ClozeTest_answer(groupID, taskID)\n # print(all_answer)\n for a in all_answer:\n if \"newF\" in list(a.keys()):\n stars_3 = clozeTest.get_ClozeTest_words(groupID, taskID, practiceType)\n for star in stars_3:\n clozeTest.put_ClozeTest_words(star)\n data = clozeTest.get_ClozeTest_word_done_data(groupID, taskID, practiceType)\n clozeTest.put_ClozeTest_words_done(taskID, data)\n if \"newF\" not in list(a.keys()):\n clozeTest.post_all_clozeTest_answer(taskID, a)\n if practiceType == 17:\n cloze75 = AllReadInterface(common, headers, access_token)\n all_answer = cloze75.get_all_Cloze75_answer(groupID, taskID)\n for a in all_answer:\n if \"newF\" in list(a.keys()):\n stars_3 = cloze75.get_Cloze75_words(groupID, taskID, practiceType)\n for star in stars_3:\n cloze75.put_Cloze75_words(star)\n data = cloze75.get_Cloze75_word_done_data(groupID, taskID, practiceType)\n cloze75.put_Cloze75_words_done(taskID, data)\n if \"newF\" not in list(a.keys()):\n try:\n data = cloze75.get_Cloze75_word_done_data(groupID, taskID, practiceType)\n cloze75.put_Cloze75_words_done(taskID, data)\n except:\n pass\n cloze75.post_all_cloze75_answer(taskID, a)\n else:\n pass","sub_path":"testcase/api/common/finish_rid.py","file_name":"finish_rid.py","file_ext":"py","file_size_in_byte":5557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"165508858","text":"from hunspell import Hunspell\nimport csv\nh=Hunspell()\ndef correction(num):\n if h.spell(str(num)) is False:\n new=h.suggest(str(num))[0]\n return new\n else:\n return num\nres=[]\nwith open('/Users/hhy/Desktop/raw1.csv') as csvfile1:\n rows = csv.reader(csvfile1)\n with open('/Users/hhy/Desktop/write.csv','a', encoding='utf-8' , newline='') as f:\n writer = csv.writer(f)\n for row in rows:\n res = []\n for num in row:\n try:\n if num == ' ':\n continue\n else:\n res.append(correction(num))\n except:\n continue\n writer.writerow(res)\n","sub_path":"sentiment/correction.py","file_name":"correction.py","file_ext":"py","file_size_in_byte":731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"33860299","text":"from django.db import models\r\nfrom datetime import date\r\n\r\n\r\n# Create your models here.\r\nclass Aluno(models.Model):\r\n id_aluno = models.AutoField('Id do Aluno', primary_key=True)\r\n num_matricula = models.CharField('Matricula', max_length=50, blank=True)\r\n nome = models.CharField('Nome', max_length=30, blank=False)\r\n sobrenome = models.CharField('Sobre Nome', max_length=100, blank=False)\r\n nascimento = models.DateField('Nascimento', auto_now_add=False) \r\n data_cadastro = models.DateTimeField('Cadastrado em', auto_now_add=True)\r\n data_atualização = models.DateTimeField('Atualizado em', auto_now=True)\r\n\r\n\r\n @property\r\n def idade(self):\r\n hoje = date.today()\r\n idade = hoje.year - self.nascimento.year - ((hoje.month, hoje.day) < (self.nascimento.month, self.nascimento.day))\r\n return idade\r\n \r\n @property\r\n def nome_completo(self):\r\n \"Retorna o nome completo do aluno\"\r\n return f'{self.nome} {self.sobrenome}'\r\n\r\n class Meta:\r\n ordering = ['nome']\r\n verbose_name = 'Alunos'\r\n verbose_name_plural = 'Alunos'\r\n \r\n def __str__(self):\r\n return self.nome_completo","sub_path":"aluno/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1177,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"204807691","text":"from slack_sdk import WebClient\nfrom slack_sdk.errors import SlackApiError\n\nfrom petisco.legacy.notifier.domain.interface_notifier import INotifier\nfrom petisco.legacy.notifier.domain.notifier_message import NotifierMessage\nfrom petisco.legacy.notifier.infrastructure.slack.errors import SlackError\nfrom petisco.legacy.notifier.infrastructure.slack.interface_slack_notifier_message_converter import (\n ISlackNotifierMessageConverter,\n)\nfrom petisco.legacy.notifier.infrastructure.slack.slack_notifier_message_converter import (\n SlackNotifierMessageConverter,\n)\n\n\nclass SlackNotifier(INotifier):\n def __init__(\n self,\n token: str,\n channel: str,\n converter: ISlackNotifierMessageConverter = SlackNotifierMessageConverter(),\n ):\n self.token = token\n self.channel = channel\n self.converter = converter\n\n def publish(self, notifier_message: NotifierMessage):\n\n client = WebClient(token=self.token)\n\n try:\n client.chat_postMessage(\n channel=self.channel,\n blocks=self.converter.convert(notifier_message=notifier_message),\n )\n except SlackApiError as e:\n raise SlackError(e.response[\"error\"])\n","sub_path":"data/codefile/alice-biometrics@petisco__9abf7b1__petisco$legacy$notifier$infrastructure$slack$slack_notifier.py.target.py","file_name":"alice-biometrics@petisco__9abf7b1__petisco$legacy$notifier$infrastructure$slack$slack_notifier.py.target.py","file_ext":"py","file_size_in_byte":1236,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"461576287","text":"from flask import (\n Flask,\n request,\n redirect,\n render_template\n)\n\napp = Flask(__name__)\n\ntodos = []\n\n\n@app.route('/')\ndef index():\n return render_template('index.html', todos=todos)\n\n\n@app.route('/add', methods=['POST'])\ndef add():\n t = request.form.get('title', '')\n\n l = len(todos)\n if l == 0:\n id = 1\n else:\n id = l + 1\n data = {\n 'title': t,\n 'id': id,\n }\n todos.append(data)\n return redirect('/')\n\n\n@app.route('/edit', methods=['GET'])\ndef edit():\n return render_template('edit.html')\n\n\n@app.route('/update', methods=['POST'])\ndef update():\n id = int(request.form.get('id'))\n title = request.form.get('title')\n index = -1\n for i, n in enumerate(todos):\n if n['id'] == id:\n index = i\n if index != -1:\n todos[index]['title'] = title\n return redirect('/')\nprint('a111fsadf')\n\n\n@app.route('/remove', methods=['GET'])\ndef remove():\n id = request.args.get('id')\n index = -1\n print('id',type(id), id)\n for i, n in enumerate(todos):\n if n['id'] == int(id):\n index = i\n if index != -1:\n todos.pop(index)\n print('todos', todos)\n return redirect('/')\n\n\n\nif __name__ == '__main__':\n config = dict(\n host = '',\n port = 3000,\n debug = True,\n )\n app.run(**config)\n","sub_path":"todo.py","file_name":"todo.py","file_ext":"py","file_size_in_byte":1347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"419565731","text":"import sys\n\ndef main():\n if len(sys.argv) !=2:\n print(\"Correct usage:\\n python vigenere.py cipher\")\n exit(1)\n for letter in sys.argv[1]:\n if ord(letter)<65 or ord(letter)>122:\n print(\"Please insert a string as the cipher\")\n exit(1)\n \n plain = input(\"plaintext: \")\n cipherString = sys.argv[1]\n encrypted= []\n j=0\n l=len(cipherString)\n \n for i in range(len(plain)):\n if ord(plain[i]) >= 65 and ord(plain[i]) <= 122:\n encrypted.append(cipher(plain[i],cipherString[j%l]))\n j+=1\n else:\n encrypted.append(plain[i])\n \n for i in range(len(encrypted)):\n if encrypted[i] == ' ':\n pass\n else:\n encrypted[i] = chr(encrypted[i])\n \n print(''.join(encrypted))\n \ndef cipher(initial, cipher):\n k = ord(cipher.upper())-65\n if initial.islower():\n initial = (ord(initial)-97+k)%26+97\n elif initial.isupper():\n initial = (ord(initial)-65+k)%26+65\n return initial\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"Course - Introduction to Computer Science - CS50/problemSet-6/vigenere.py","file_name":"vigenere.py","file_ext":"py","file_size_in_byte":1094,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"279978346","text":"#!/usr/bin/env python\n\nimport test_cli.osn_unittest as test\n\n\nclass StorageSystemBasics(test.OsnTestCase):\n\n SLEEP_TIME = 0.5\n\n def setUp(self):\n \"\"\"Activate a system and delete grid before running 'StorageSystemManagement.test_...'.\"\"\"\n _ = self.activate()\n try:\n _, _ = self.grid_get()\n except test.env.GridQSError: # if no grid exists\n pass\n else: # a grid exists; delete it.\n self.grid_delete()\n\n def tearDown(self):\n \"\"\"Deactivate the system after running 'StorageSystemManagement.test_...()'.\"\"\"\n self.deactivate() # deactivate all systems\n\n def test_system_get_list(self):\n \"\"\"Test 'system-get' and 'system-list' functionality.\"\"\"\n # test system-list\n systems = self.system_list()\n # confirm there is a system id that is not None\n assert_msg = \"Expected 1 system\"\n self.assertEqual(1, len(systems), assert_msg)\n\n # use system_get to obtain system_id\n system_id, _, _ = self.system_get()\n # confirm same system_id\n assert_msg = \"Values don't match\"\n self.assertEqual(systems[0], system_id, assert_msg)\n\n def test_system_modify(self):\n \"\"\"Test 'system-modify' functionality.\"\"\"\n\n qs_command = test.env.qs_api.system_get()\n system_info = self.qs_output(qs_command)\n assert_msg = \"Values don't match\"\n\n # initial values\n initial_sys_values = dict([(test.env.xml.STORAGE_SYSTEM_ID, system_info.findtext(test.env.xml.ID))])\n initial_sys_values[test.env.xml.NAME] = system_info.findtext(test.env.xml.NAME)\n initial_sys_values[test.env.xml.DESCRIPTION] = system_info.findtext(test.env.xml.DESCRIPTION)\n initial_sys_values[test.env.xml.LOCATION] = system_info.findtext(test.env.xml.LOCATION)\n # modified values\n mod_sys_values = dict(initial_sys_values)\n mod_sys_values[test.env.xml.NAME] = 'system-modify-' + test.env.time_stamp()\n mod_sys_values[test.env.xml.DESCRIPTION] = 'modified functional test system'\n mod_sys_values[test.env.xml.LOCATION] = 'DC-02, Floor-03, Row-02, Rack-05, Level-23'\n\n # test system-modify\n qs_command = test.env.qs_api.system_modify(mod_sys_values[test.env.xml.STORAGE_SYSTEM_ID],\n name=mod_sys_values[test.env.xml.NAME],\n desc=mod_sys_values[test.env.xml.DESCRIPTION],\n location=mod_sys_values[test.env.xml.LOCATION])\n _ = self.qs_output(qs_command)\n # use system-get to get modified system info\n qs_command = test.env.qs_api.system_get()\n modified_system_info = self.qs_output(qs_command)\n\n # confirm modified values\n self.assertEqual(mod_sys_values[test.env.xml.NAME], modified_system_info.findtext(test.env.xml.NAME), assert_msg)\n self.assertEqual(mod_sys_values[test.env.xml.DESCRIPTION], modified_system_info.findtext(test.env.xml.DESCRIPTION), assert_msg)\n self.assertEqual(mod_sys_values[test.env.xml.LOCATION], modified_system_info.findtext(test.env.xml.LOCATION), assert_msg)\n\n # reset system to initial values\n qs_command = test.env.qs_api.system_modify(initial_sys_values[test.env.xml.STORAGE_SYSTEM_ID],\n name=initial_sys_values[test.env.xml.NAME],\n desc=initial_sys_values[test.env.xml.DESCRIPTION],\n location=initial_sys_values[test.env.xml.LOCATION])\n _ = self.qs_output(qs_command)\n qs_command = test.env.qs_api.system_get()\n reset_system_info = self.qs_env.qs_output(self.server(), qs_command)\n\n # confirm reset values\n self.assertEqual(initial_sys_values[test.env.xml.NAME], reset_system_info.findtext(test.env.xml.NAME), assert_msg)\n self.assertEqual(initial_sys_values[test.env.xml.DESCRIPTION], reset_system_info.findtext(test.env.xml.DESCRIPTION), assert_msg)\n self.assertEqual(initial_sys_values[test.env.xml.LOCATION], reset_system_info.findtext(test.env.xml.LOCATION), assert_msg)\n\n @test.unittest.skip('test not yet developed')\n def test_system_platform_event(self):\n event_type = 0\n event_msg = \"functional test platform event\"\n qs_command = test.env.qs_api.system_platform_event(event_type, event_msg)\n event_result = self.qs_output(qs_command)\n","sub_path":"test/osnpy/test_cli/qs_test_cases/test_system_basic.py","file_name":"test_system_basic.py","file_ext":"py","file_size_in_byte":4545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"215054289","text":"#Import the required plugins\r\nfrom flask import Flask, request, jsonify\r\nfrom flask_restful import Api, Resource \r\nimport pickle\r\nimport pandas as pd\r\n\r\n#Step 1: Wrapping our app\r\napp = Flask(__name__)\r\napi = Api(app) #Wrapping o ur app in a restful API\r\n\r\n#Step 1.5: Load the model\r\nmodel = pickle.load(open('model.pkl', 'rb'))\r\n\r\n\r\n#Step 2: Define our API Resources\r\nclass HelloWorld(Resource):\r\n \r\n def get(self):\r\n return {\"hello\":\"world\"}\r\n\r\nclass Predict(Resource):\r\n\r\n def post(self): #post request\r\n json_data = request.get_json()\r\n\r\n # For 1 observation \r\n df = pd.DataFrame(json_data.values(), \r\n index = json_data.keys()).transpose()\r\n\r\n # How to take multiple observation\r\n df = pd.DataFrame(json_data)\r\n \r\n result = model.predict(df)\r\n return result.tolist()\r\n\r\n\r\n#Step 3: Assign our endpoints\r\napi.add_resource(HelloWorld, '/helloworld')\r\napi.add_resource(Predict,'/predict')\r\n\r\n\r\n\r\n#Step 4: Runing our api app\r\nif __name__ == '__main__': \r\n app.run(debug=True)\r\n # app.run(host='0.0.0.0')","sub_path":"w7/d2_api_deploy_aws/lecture/andrews_docs/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1103,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"373060054","text":"#!/usr/bin/env python3 \n# -*- coding:utf-8 _*- \n# Author: Wengs\n# Time : 3/6/2019 3:10 PM \n# File : plot_solow_steady.py \n# IDE : PyCharm\n\nfrom ClassSolow import Solow\nimport matplotlib.pyplot as plt\n\ns1 = Solow()\ns2 = Solow(k=8.0)\n\nT = 60\nfig,ax = plt.subplots(figsize=(10, 6))\n\n# Plot the common steady state value of capital\nax.plot([s1.steady_state()] * T, 'k-', label='steady state')\n\n# Plot time series for each economy\nfor s in s1, s2:\n lb = f'capital series from initial state {s.k}'\n ax.plot(s.generate_sequence(T), 'o-', lw=2, alpha=0.6, label=lb)\n\nax.legend()\nplt.show()\n","sub_path":"L3.2_puthon_oop/plot_solow_steady.py","file_name":"plot_solow_steady.py","file_ext":"py","file_size_in_byte":594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"556180938","text":"# Copyright 2014 The Chromium Authors. All rights reserved.\n# Use of this source code is governed by a BSD-style license that can be\n# found in the LICENSE file.\n\n\"\"\"Log status events to a file on disk or event collection endpoint.\"\"\"\n\n\nimport json\nimport logging\nimport logging.handlers\nimport os\nimport time\n\nfrom logging.handlers import TimedRotatingFileHandler\n\nimport buildbot.status.results\n\nfrom buildbot.status.base import StatusReceiverMultiService\nfrom twisted.python import log as twisted_log\n\nfrom common import chromium_utils\n\n\nclass StatusEventLogger(StatusReceiverMultiService):\n \"\"\"Log status events to a file on disk or event collection endpoint.\n\n Files on disk are rotated, while the event collection endpoint is contacted\n through a script in the infra/infra repository (separate checkout).\n\n A file, .logstatus, is used to configure the logger. If it exists then\n file logging is enabled. If it parses as json, the keys event_logging,\n file_logging, logging_ignore_basedir, logfile, can be used to configure the\n logger at runtime.\n \"\"\"\n\n DEFAULT_LOGGING_IGNORE_BASEDIR = False\n\n def __init__(self, logfile='status.log', configfile='.logstatus',\n basedir=None, event_logging_dir=None):\n \"\"\"Create a StatusEventLogger.\n\n Args:\n logfile: base filename for events to be written to.\n configfile: the name of the configuration file.\n basedir: the basedir of the configuration and log files. Set to the\n service's parent directory by default, mainly overridden for\n testing.\n event_logging_dir: directory where to write events. This object adds the\n master name to the path. Mainly overridden for testing.\n \"\"\"\n self._logfile = self._original_logfile = logfile\n self._configfile = configfile\n self._basedir = basedir\n self.master_dir = os.path.basename(os.path.abspath(os.curdir))\n\n self._event_logging_dir = os.path.join(\n event_logging_dir or '/var/log/chrome-infra',\n 'status_logger-' + self.master_dir)\n\n self._event_logfile = os.path.join(self._event_logging_dir, 'events.log')\n self._ts_mon_logfile = os.path.join(self._event_logging_dir, 'ts_mon.log')\n\n # These are defaults which may be overridden.\n self.logging_ignore_basedir = self.DEFAULT_LOGGING_IGNORE_BASEDIR\n\n # Will be initialized in startService.\n self.logger = None\n self.event_logger = None\n self.ts_mon_logger = None\n self.status = None\n self._active = False\n self._last_checked_active = 0\n self._logging = False\n self._event_logging = False\n self._ts_mon_logging = False\n # Can't use super because StatusReceiverMultiService is an old-style class.\n StatusReceiverMultiService.__init__(self)\n\n def as_dict(self):\n return {\n 'basedir': self.basedir,\n 'configfile': self.configfile,\n 'file_logging': self._logging,\n 'event_logging': self._event_logging,\n 'ts_mon_logging': self._ts_mon_logging,\n 'logfile': self.logfile,\n 'logging_ignore_basedir': self.logging_ignore_basedir,\n }\n\n def _configure(self, config_data):\n old_config = self.as_dict()\n\n self._logging = config_data.get(\n 'file_logging', True) # Preserve old behavior.\n self._event_logging = config_data.get('event_logging',\n self._event_logging)\n self._ts_mon_logging = config_data.get('ts_mon_logging',\n self._ts_mon_logging)\n self._logfile = config_data.get(\n 'logfile', self._original_logfile)\n self.logging_ignore_basedir = config_data.get(\n 'logging_ignore_basedir', self.DEFAULT_LOGGING_IGNORE_BASEDIR)\n\n new_config = self.as_dict()\n if new_config != old_config:\n twisted_log.msg(\n 'Configuration change detected. Old:\\n%s\\n\\nNew:\\n%s\\n' % (\n json.dumps(old_config, sort_keys=True, indent=2),\n json.dumps(new_config, sort_keys=True, indent=2)))\n\n # Clean up if needed.\n if not old_config['file_logging'] and new_config['file_logging']:\n self._create_logger()\n\n if not old_config['event_logging'] and new_config['event_logging']:\n self._create_event_logger()\n\n if not old_config['ts_mon_logging'] and new_config['ts_mon_logging']:\n self._create_ts_mon_logger()\n\n @staticmethod\n def _get_requested_at_millis(build):\n return int(build.getProperty('requestedAt') * 1000)\n\n @property\n def basedir(self):\n \"\"\"Returns dynamic or preset basedir.\n\n self.parent doesn't exist until the service is running, so this has to be\n here instead of precomputing the logfile and configfile in __init__.\n \"\"\"\n return self._basedir or self.parent.basedir\n\n def _canonical_file(self, filename, ignore_basedir=False):\n \"\"\"Returns an absolute path for a config or log file.\"\"\"\n if ignore_basedir:\n full_filename = filename\n else:\n full_filename = os.path.join(self.basedir, filename)\n return chromium_utils.AbsoluteCanonicalPath(full_filename)\n\n @property\n def configfile(self):\n return self._canonical_file(self._configfile)\n\n @property\n def logfile(self):\n return self._canonical_file(\n self._logfile, ignore_basedir=self.logging_ignore_basedir)\n\n @property\n def active(self):\n now = time.time()\n # Cache the value for self._active for one minute.\n if now - self._last_checked_active > 60:\n active_before = self._active\n self._active = os.path.isfile(self.configfile)\n\n if not self._active and active_before:\n twisted_log.msg('Disabling status_logger.')\n\n if self._active:\n # Test if it parses as json, otherwise use defaults.\n data = {}\n try:\n with open(self.configfile) as f:\n data = json.load(f)\n except ValueError as err:\n twisted_log.msg(\"status_logger config file parsing failed: %s\\n%s\"\n % (self.configfile, err), logLevel=logging.ERROR)\n self._configure(data)\n\n if not active_before:\n twisted_log.msg(\n 'Enabling status_logger. file_logger: %s / event_logging: %s '\n '/ ts_mon_logging: %s' % (\n self._logging, self._event_logging, self._ts_mon_logging))\n else:\n self._configure({'file_logging': False,\n 'event_logging': False,\n 'ts_mon_logging': False}) # Reset to defaults.\n\n self._last_checked_active = now\n return self._active\n\n\n def send_build_result(\n self, scheduled, started, finished, builder_name, bot_name, result,\n project_id=None, subproject_tag=None, steps=None, pre_test_time_s=None):\n \"\"\"Log a build result for ts_mon.\n\n This allows computing metrics for builds in mastermon.\n \"\"\"\n d = {\n 'timestamp_ms': finished * 1000,\n 'builder': builder_name,\n 'slave': bot_name,\n 'result': result.lower(),\n 'duration_s': finished - started,\n 'pending_s': started - scheduled,\n 'total_s': finished - scheduled,\n }\n if project_id:\n d['project_id'] = project_id\n if subproject_tag:\n d['subproject_tag'] = subproject_tag\n if steps:\n d['steps'] = steps\n if pre_test_time_s is not None:\n d['pre_test_time_s'] = pre_test_time_s\n self.ts_mon_logger.info(json.dumps(d))\n\n def send_build_event(self, timestamp_kind, timestamp, build_event_type,\n bot_name, builder_name, build_number, build_scheduled_ts,\n step_name=None, step_number=None, result=None,\n extra_result_code=None, patch_url=None):\n \"\"\"Log a build/step event for event_mon.\"\"\"\n\n if self.active and self._event_logging:\n # List options to pass to send_monitoring_event, without the --, to save\n # a bit of space.\n d = {'event-mon-timestamp-kind': timestamp_kind,\n 'event-mon-event-timestamp': timestamp,\n 'event-mon-service-name': 'buildbot/master/%s' % self.master_dir,\n 'build-event-type': build_event_type,\n 'build-event-hostname': bot_name,\n 'build-event-build-name': builder_name,\n 'build-event-build-number': build_number,\n 'build-event-build-scheduling-time': build_scheduled_ts,\n }\n if step_name:\n d['build-event-step-name'] = step_name\n d['build-event-step-number'] = step_number\n if result:\n d['build-event-result'] = result.upper()\n if extra_result_code:\n d['build-event-extra-result-code'] = extra_result_code\n if patch_url:\n d['build-event-patch-url'] = patch_url\n\n self.event_logger.info(json.dumps(d))\n\n def _create_logging_dir(self):\n \"\"\"Make sure the logging directory exists.\n\n Try to create the directory if it doesn't exist, returns False if it\n fails.\n\n Returns:\n logs_dir_exists(bool): True is the directory is available\n\n \"\"\"\n event_logging_dir_exists = os.path.isdir(self._event_logging_dir)\n if not event_logging_dir_exists:\n try:\n os.mkdir(self._event_logging_dir)\n except OSError:\n twisted_log.msg('Logging directory cannot be created, no events will '\n 'be written:', self._event_logging_dir)\n else:\n event_logging_dir_exists = True\n\n return event_logging_dir_exists\n\n def _create_ts_mon_logger(self):\n \"\"\"Set up a logger for ts_mon events.\n\n If the destination directory does not exist, ignore data sent to\n ts_mon_logger.\n \"\"\"\n\n event_logging_dir_exists = self._create_logging_dir()\n logger = logging.getLogger(__name__ + '_ts_mon')\n # Remove handlers that may already exist. This is useful when changing the\n # log file name.\n for handler in logger.handlers:\n handler.flush()\n logger.handlers = []\n\n logger.propagate = False\n logger.setLevel(logging.INFO)\n formatter = logging.Formatter('%(message)s')\n\n if event_logging_dir_exists:\n # Use delay=True so we don't open an empty file while self.active=False.\n # Also use WatchedFileHandler because it'll be rotated by an external\n # process.\n handler = logging.handlers.WatchedFileHandler(self._ts_mon_logfile,\n encoding='utf-8',\n delay=True)\n else:\n handler = logging.NullHandler()\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n self.ts_mon_logger = logger\n\n def _create_event_logger(self):\n \"\"\"Set up a logger for monitoring events.\n\n If the destination directory does not exist, ignore data sent to\n event_logger.\n \"\"\"\n event_logging_dir_exists = self._create_logging_dir()\n\n logger = logging.getLogger(__name__ + '_event')\n # Remove handlers that may already exist. This is useful when changing the\n # log file name.\n for handler in logger.handlers:\n handler.flush()\n logger.handlers = []\n\n logger.propagate = False\n logger.setLevel(logging.INFO)\n formatter = logging.Formatter('%(message)s')\n\n if event_logging_dir_exists:\n # Use delay=True so we don't open an empty file while self.active=False.\n handler = TimedRotatingFileHandler(self._event_logfile, backupCount=120,\n when='M', interval=1, delay=True)\n else:\n handler = logging.NullHandler()\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n self.event_logger = logger\n\n def _create_logger(self):\n logger = logging.getLogger(__name__)\n # Remove handlers that may already exist. This is useful when changing the\n # log file name.\n for handler in logger.handlers:\n handler.flush()\n logger.handlers = []\n\n logger.propagate = False\n logger.setLevel(logging.INFO)\n # %(bbEvent)19s because builderChangedState is 19 characters long\n formatter = logging.Formatter('%(asctime)s - %(bbEvent)19s - %(message)s')\n # Use delay=True so we don't open an empty file while self.active=False.\n handler = TimedRotatingFileHandler(\n self._canonical_file(self.logfile),\n when='H', interval=1, delay=True)\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n self.logger = logger\n\n def _get_patch_url(self, build_properties):\n # TODO(sergiyb): Add support for Gerrit.\n patch_url = None\n if ('issue' in build_properties and 'patchset' in build_properties and\n 'rietveld' in build_properties):\n patch_url = '%s/%s#%s' % (\n build_properties.getProperty('rietveld'),\n build_properties.getProperty('issue'),\n build_properties.getProperty('patchset'))\n return patch_url\n\n def startService(self):\n \"\"\"Start the service and subscribe for updates.\"\"\"\n self._create_logger()\n self._create_event_logger()\n self._create_ts_mon_logger()\n\n StatusReceiverMultiService.startService(self)\n self.status = self.parent.getStatus()\n self.status.subscribe(self)\n\n def log(self, event, message, *args):\n \"\"\"Simple wrapper for log. Passes string formatting args through.\"\"\"\n if self.active and self._logging:\n self.logger.info(message, *args, extra={'bbEvent': event})\n\n def requestSubmitted(self, request):\n builderName = request.getBuilderName()\n self.log('requestSubmitted', '%s, %r', builderName, request)\n\n def requestCancelled(self, builder, request):\n builderName = builder.getName()\n self.log('requestCancelled', '%s, %r', builderName, request)\n\n def buildsetSubmitted(self, buildset):\n reason = buildset.getReason()\n self.log('buildsetSubmitted', '%r, %s', buildset, reason)\n\n def builderAdded(self, builderName, builder):\n # Use slavenames rather than getSlaves() to just get strings.\n slaves = builder.slavenames\n self.log('builderAdded', '%s, %r', builderName, slaves)\n # Must return self in order to subscribe to builderChangedState and\n # buildStarted/Finished events.\n return self\n\n def builderChangedState(self, builderName, state):\n self.log('builderChangedState', '%s, %r', builderName, state)\n\n def buildStarted(self, builderName, build):\n build_number = build.getNumber()\n bot = build.getSlavename()\n self.log('buildStarted', '%s, %d, %s', builderName, build_number, bot)\n started, _ = build.getTimes()\n self.send_build_event(\n 'BEGIN', started * 1000, 'BUILD', bot, builderName, build_number,\n self._get_requested_at_millis(build),\n patch_url=self._get_patch_url(build.getProperties()))\n # Must return self in order to subscribe to stepStarted/Finished events.\n return self\n\n def buildETAUpdate(self, build, ETA):\n # We don't actually care about ETA updates; they happen on a periodic clock.\n pass\n\n def changeAdded(self, change):\n self.log('changeAdded', '%r', change)\n\n def stepStarted(self, build, step):\n bot = build.getSlavename()\n builder_name = build.getBuilder().name\n build_number = build.getNumber()\n step_name = step.getName()\n self.log('stepStarted', '%s, %d, %s', builder_name, build_number, step_name)\n started, _ = step.getTimes()\n self.send_build_event(\n 'BEGIN', started * 1000, 'STEP', bot, builder_name, build_number,\n self._get_requested_at_millis(build),\n step_name=step_name, step_number=step.step_number,\n patch_url=self._get_patch_url(build.getProperties()))\n # Must return self in order to subscribe to logStarted/Finished events.\n return self\n\n def stepTextChanged(self, build, step, text):\n build_name = build.getBuilder().name\n build_number = build.getNumber()\n step_name = step.getName()\n self.log('stepTextChanged', '%s, %d, %s, %s',\n build_name, build_number, step_name, text)\n\n def stepText2Changed(self, build, step, text2):\n build_name = build.getBuilder().name\n build_number = build.getNumber()\n step_name = step.getName()\n self.log('stepText2Changed', '%s, %d, %s, %s',\n build_name, build_number, step_name, text2)\n\n def stepETAUpdate(self, build, step, ETA, expectations):\n # We don't actually care about ETA updates; they happen on a periodic clock.\n pass\n\n def logStarted(self, build, step, log):\n build_name = build.getBuilder().name\n build_number = build.getNumber()\n step_name = step.getName()\n log_name = log.getName()\n log_file = log.filename\n self.log('logStarted', '%s, %d, %s, %s, %s',\n build_name, build_number, step_name, log_name, log_file)\n # Create an attr on the stateful log object to count its chunks.\n # pylint: disable=protected-access\n log.__num_chunks = 0\n # pylint: enable=protected-access\n # Must return self in order to subscribe to logChunk events.\n return self\n\n def logChunk(self, _build, _step, log, _channel, _text):\n # Like the NSA, we only want to process metadata.\n log.__num_chunks += 1\n\n def logFinished(self, build, step, log):\n build_name = build.getBuilder().name\n build_number = build.getNumber()\n step_name = step.getName()\n log_name = log.getName()\n log_file = log.filename\n # Access to protected member __num_chunks. pylint: disable=W0212\n log_chunks = log.__num_chunks\n self.log('logFinished', '%s, %d, %s, %s, %s, %d',\n build_name, build_number, step_name,\n log_name, log_file, log_chunks)\n\n def stepFinished(self, build, step, results):\n builder_name = build.getBuilder().name\n build_number = build.getNumber()\n bot = build.getSlavename()\n step_name = step.getName()\n self.log('stepFinished', '%s, %d, %s, %r',\n builder_name, build_number, step_name, results)\n _, finished = step.getTimes()\n self.send_build_event(\n 'END', finished * 1000, 'STEP', bot, builder_name, build_number,\n self._get_requested_at_millis(build),\n step_name=step_name, step_number=step.step_number,\n result=buildbot.status.results.Results[results[0]],\n patch_url=self._get_patch_url(build.getProperties()))\n\n def buildFinished(self, builderName, build, results):\n build_number = build.getNumber()\n bot = build.getSlavename()\n self.log('buildFinished', '%s, %d, %s, %r',\n builderName, build_number, bot, results)\n started, finished = build.getTimes()\n\n # Calculate when build was scheduled if possible. Use build started\n # timestamp as initial approximation.\n scheduled = started\n source_stamp = build.getSourceStamp()\n if source_stamp and source_stamp.changes:\n scheduled = source_stamp.changes[0].when\n\n properties = build.getProperties()\n extra_result_code = properties.getProperty('extra_result_code')\n\n self.send_build_event(\n 'END', finished * 1000, 'BUILD', bot, builderName, build_number,\n self._get_requested_at_millis(build),\n result=buildbot.status.results.Results[results],\n extra_result_code=extra_result_code,\n patch_url=self._get_patch_url(properties))\n\n pre_test_time_s = None\n for step in build.getSteps():\n if step.getName() == 'mark: before_tests':\n step_started, _ = step.getTimes()\n pre_test_time_s = step_started - started\n\n # It's important that the recipe does not generate unbounded number\n # of step names (e.g. one for each git revision), to avoid stream\n # explosion in the monitoring system. Another alternative is for the recipe\n # to clearly mark such dynamic steps - e.g. add \"(dynamic)\" to the name,\n # and exclude such steps here.\n WHITELISTED_RECIPES = [\n 'chromium_trybot',\n ]\n steps_to_send = []\n if properties.getProperty('recipe') in WHITELISTED_RECIPES:\n for step in build.getSteps():\n step_started, step_finished = step.getTimes()\n steps_to_send.append({\n 'step_name': step.getName(),\n 'duration_s': step_finished - step_started,\n 'result': buildbot.status.results.Results[step.getResults()[0]],\n })\n\n # If property doesn't exist, this function returns None.\n # Note: this is not true for build.getProperty(), it raises KeyError.\n project_id = properties.getProperty('patch_project')\n subproject_tag = properties.getProperty('subproject_tag')\n self.send_build_result(\n scheduled, started, finished, builderName, bot,\n buildbot.status.results.Results[results],\n project_id, subproject_tag, steps=steps_to_send,\n pre_test_time_s=pre_test_time_s)\n\n def builderRemoved(self, builderName):\n self.log('builderRemoved', '%s', builderName)\n\n def slaveConnected(self, slaveName):\n self.log('slaveConnected', '%s', slaveName)\n\n def slaveDisconnected(self, slaveName):\n self.log('slaveDisconnected', '%s', slaveName)\n","sub_path":"scripts/master/status_logger.py","file_name":"status_logger.py","file_ext":"py","file_size_in_byte":20698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"271789971","text":"import cv2\nimport numpy as np\n\n\"\"\"\nReference version of the phase correlate code\n\"\"\"\n\n\ndef preprocessForPhaseCorrelate(G_a):\n N1 = cv2.getOptimalDFTSize(G_a.shape[0])\n N2 = cv2.getOptimalDFTSize(G_a.shape[1])\n G_a_padded = cv2.copyMakeBorder(G_a, (N1-G_a.shape[0])/2, (N1-G_a.shape[0])/2, (N2-G_a.shape[1])/2, (N2-G_a.shape[1])/2, cv2.BORDER_CONSTANT, value=0)\n result = np.fft.fft2(G_a_padded)\n return result\n\n\ndef phaseCorrelate(G_a, G_b):\n conj_b = np.ma.conjugate(G_b)\n R = G_a*conj_b\n R /= np.absolute(R)\n Ri = np.fft.ifft2(R)\n Ri = np.fft.fftshift(Ri)\n result = np.unravel_index(np.argmax(Ri.real), Ri.shape) \n result = -np.array([result[1]-Ri.shape[1]/2, result[0]-Ri.shape[0]/2])\n result = np.append(result, np.max(Ri.real))\n return result\n\n\nif __name__==\"__main__\":\n cam = cv2.VideoCapture(-1)\n ret, pframe = cam.read()\n pframe = cv2.resize(pframe, dsize=(128, 128))\n pframe = cv2.cvtColor(pframe, cv2.COLOR_BGR2GRAY)\n fft0 = preprocessForPhaseCorrelate(pframe)\n while True:\n ret, frame = cam.read()\n frame = cv2.resize(frame, dsize=(128, 128))\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n fft = preprocessForPhaseCorrelate(frame)\n result = phaseCorrelate(fft, fft0)\n\n frame_rolled = np.roll(frame, result[0], axis=0)\n frame_rolled = np.roll(frame_rolled, result[1], axis=1)\n\n cv2.imshow(\"frame\", frame)\n cv2.imshow(\"initial frame\", pframe)\n cv2.imshow(\"frame rolled\", frame_rolled)\n cv2.imshow(\"Diff\", cv2.absdiff(pframe, frame_rolled))\n key = cv2.waitKey(1) & 0xFF\n if key == 27:\n break\n","sub_path":"salmon/phase_correlate_src/phase_cor.py","file_name":"phase_cor.py","file_ext":"py","file_size_in_byte":1665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"275988699","text":"import telebot\nimport update_db\n\ntoken = \"996571238:AAHzd8EWNZltHc2ttQBANU3Le5bOf-9Cv90\"\nbot = telebot.TeleBot(token)\n\nyes_or_no = telebot.types.InlineKeyboardMarkup(row_width=2)\ny = telebot.types.InlineKeyboardButton('Yes', callback_data='yes')\nn = telebot.types.InlineKeyboardButton('No', callback_data='no')\nyes_or_no.add(y, n)\n\nselect = telebot.types.InlineKeyboardMarkup(row_width=2)\nnow = telebot.types.InlineKeyboardButton('Price now', callback_data='now')\nyesterday = telebot.types.InlineKeyboardButton('Price yesterday', callback_data='yesterday')\nselect.add(now, yesterday)\n\nconvert_by = telebot.types.InlineKeyboardMarkup(row_width=2)\nusd = telebot.types.InlineKeyboardButton('convert by USD', callback_data='USD')\neur = telebot.types.InlineKeyboardButton('convert by EUR', callback_data='EUR')\nconvert_by.add(usd, eur)\n\ncrypto_name = telebot.types.InlineKeyboardMarkup(row_width=1)\nbtc = telebot.types.InlineKeyboardButton('Bitcoin', callback_data='BTC')\neth = telebot.types.InlineKeyboardButton('Ethereum', callback_data='ETH')\nbch = telebot.types.InlineKeyboardButton('Bitcoin Cash', callback_data='BCH')\nltc = telebot.types.InlineKeyboardButton('Litecoin', callback_data='LTC')\netc = telebot.types.InlineKeyboardButton('Ethereum Classic', callback_data='ETC')\ncrypto_name.add(btc, eth, bch, ltc, etc)\n\ndef get_time():\n with open('DataBases/index.db', 'r') as i:\n time = i.readlines()[0]\n time = time.strip()\n return time\n\ndef get_convert():\n with open('DataBases/index.db', 'r') as i:\n convert = i.readlines()[1]\n convert = convert.strip()\n return convert\n\ndef get_usd():\n time = get_time()\n data = ''\n if time == 'now':\n with open('DataBases/now.db', 'r') as n:\n data = n.readlines()[0]\n elif time == 'yesterday':\n with open('DataBases/history.db', 'r') as h:\n data = h.readlines()[2]\n arr = data.split('&')\n last = arr[-1].strip()\n arr.pop()\n arr.append(last)\n keys = ['BTC', 'ETH', 'BCH', 'LTC', 'ETC']\n values = []\n dct = {}\n for i in arr:\n values.append(i)\n for i in range(len(keys)):\n dct[keys[i]] = values[i]\n return dct\n\ndef get_eur():\n time = get_time()\n data = ''\n if time == 'now':\n with open('DataBases/now.db', 'r') as n:\n data = n.readlines()[1]\n elif time == 'yesterday':\n with open('DataBases/history.db', 'r') as h:\n data = h.readlines()[3]\n arr = data.split('&')\n last = arr[-1].strip()\n arr.pop()\n arr.append(last)\n keys = ['BTC', 'ETH', 'BCH', 'LTC', 'ETC']\n values = []\n dct = {}\n for i in arr:\n values.append(i)\n for i in range(len(keys)):\n dct[keys[i]] = values[i]\n return dct\n\ndef get_answer(name):\n convert = get_convert()\n if convert == 'USD':\n return get_usd()[name]\n elif convert == 'EUR':\n return get_eur()[name]\n\n@bot.message_handler(commands=['start'])\ndef send_welcome(message):\n bot.send_message(chat_id=message.chat.id, text=\"Hello \"+str(message.from_user.first_name)+\"\\n\\nDo you want to know the cryptocurrency rates by USD or EUR?\\n\", parse_mode=\"HTML\", reply_markup=yes_or_no)\n\n@bot.callback_query_handler(func=lambda call: True)\ndef query_handler(call):\n if call.data == 'yes':\n bot.send_message(chat_id=call.from_user.id, text=\"Please select time\", parse_mode=\"HTML\", reply_markup=select)\n elif call.data == 'no':\n bot.send_message(chat_id=call.from_user.id, text=\"Good by\", parse_mode=\"HTML\")\n if call.data == 'now':\n time = call.data\n with open('DataBases/index.db', 'w') as i:\n i.write(time+'\\n')\n bot.send_message(chat_id=call.from_user.id, text=\"OK\\n\\nNow please select with what convert cryptocurrency?\", parse_mode=\"HTML\", reply_markup=convert_by)\n elif call.data == 'yesterday':\n time = call.data\n with open('DataBases/index.db', 'w') as i:\n i.write(time+'\\n')\n bot.send_message(chat_id=call.from_user.id, text=\"OK\\n\\nNow please select with what convert cryptocurrency?\", parse_mode=\"HTML\", reply_markup=convert_by)\n if call.data == 'USD':\n name = call.data\n with open('DataBases/index.db', 'a') as i:\n i.write(name)\n bot.send_message(chat_id=call.from_user.id, text=\"You selected USD option\\nNow please select cryptocurrency\", parse_mode=\"HTML\", reply_markup=crypto_name)\n elif call.data == 'EUR':\n name = call.data\n with open('DataBases/index.db', 'a') as i:\n i.write(name)\n bot.send_message(chat_id=call.from_user.id, text=\"You selected EUR option\\nNow please select cryptocurrency\", parse_mode=\"HTML\", reply_markup=crypto_name)\n if call.data == 'BTC':\n BTC = get_answer('BTC')\n bot.send_message(chat_id=call.from_user.id, text=BTC)\n elif call.data == 'ETH':\n ETH = get_answer('ETH')\n bot.send_message(chat_id=call.from_user.id, text=ETH)\n elif call.data == 'BCH':\n BCH = get_answer('BCH')\n bot.send_message(chat_id=call.from_user.id, text=BCH)\n elif call.data == 'LTC':\n LTC = get_answer('LTC')\n bot.send_message(chat_id=call.from_user.id, text=LTC)\n elif call.data == 'ETC':\n ETC = get_answer('ETC')\n bot.send_message(chat_id=call.from_user.id, text=ETC)\n\nbot.polling(timeout=3.5)\n","sub_path":"telebot/bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":5532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"421313039","text":"# Write a script that takes a word as its input, and returns a dictionary containing the tally of how many times each letter in the alphabet was used in the word.\n\nword = input(\"Input a word: \")\n\ndictionary = {}\n\nfor i in set(word): # for characters in inputted word\n dictionary[i] = word.count(i) # use .count to tally number of individual characters\n\nprint(f\"The word {word} is broken down like this: \")\n\nfor key, value in sorted(dictionary.items(), key=lambda item : item[1]):\n print(\"%s: %s\" % (key, value))\n","sub_path":"02-week/1-tuesday/labs/j-ckie/03_letter_summary_j-ckie.py","file_name":"03_letter_summary_j-ckie.py","file_ext":"py","file_size_in_byte":519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"532661501","text":"# Licensed to Cloudera, Inc. under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. Cloudera, Inc. licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport logging\n\nfrom desktop.lib.python_util import force_dict_to_strings\n\n\nclass Form(object):\n \"\"\"\n Represents a form in sqoop.\n\n Example sqoop form dictionary received by server: [\n {\n \"id\": 1,\n \"inputs\": [\n {\n \"id\": 1,\n \"name\": \"connection.jdbcDriver\",\n \"value\": \"org.apache.derby.jdbc.EmbeddedDriver\",\n \"type\": \"STRING\",\n \"size\": 128,\n \"sensitive\": false\n },\n {\n \"id\": 2,\n \"name\": \"connection.connectionString\",\n \"value\": \"jdbc%3Aderby%3A%2Ftmp%2Ftest\",\n \"type\": \"STRING\",\n \"size\": 128,\n \"sensitive\": false\n },\n {\n \"id\": 3,\n \"name\": \"connection.username\",\n \"type\": \"STRING\",\n \"size\": 40,\n \"sensitive\": false\n },\n {\n \"id\": 4,\n \"name\": \"connection.password\",\n \"type\": \"STRING\",\n \"size\": 40,\n \"sensitive\": true\n },\n {\n \"id\": 5,\n \"name\": \"connection.jdbcProperties\",\n \"type\": \"MAP\",\n \"value\": {\n \"key\": \"value\"\n },\n \"sensitive\": false\n }\n ],\n \"name\": \"connection\",\n \"type\": \"CONNECTION\"\n }\n ],\n\n These forms are relatively unstructured. They will always have an ID, name, type, and inputs.\n The number of inputs will vary.\n Their definitions are dynamically generated from annotations on classes in sqoop.\n The ID identifies the form in the sqoop metadata reprository.\n The ID could vary.\n The ID is unique per type.\n \"\"\"\n def __init__(self, id, name, type, inputs=[]):\n self.id = id\n self.name = name\n self.type = type\n self.inputs = inputs\n\n @staticmethod\n def from_dict(form_dict):\n form_dict['inputs'] = [Input.from_dict(input_dict) for input_dict in form_dict.setdefault('inputs', [])]\n return Form(**force_dict_to_strings(form_dict))\n\n def to_dict(self):\n return {\n 'id': self.id,\n 'type': self.type,\n 'name': self.name,\n 'inputs': [input.to_dict() for input in self.inputs]\n }\n\n\nclass Input(object):\n \"\"\"\n Represents an input in a sqoop form.\n\n Example sqoop input dictionary received by server: {\n \"id\": 2,\n \"name\": \"connection.connectionString\",\n \"values\": \"jdbc%3Aderby%3A%2Ftmp%2Ftest\",\n \"type\": \"STRING\",\n \"size\": 128,\n \"sensitive\": false\n }\n\n The ID identifies the input in the sqoop metadata repository.\n The ID could vary.\n The ID is unique per type and per form.\n \"\"\"\n def __init__(self, id, type, name, value=None, values=None, sensitive=False, size=-1):\n self.id = id\n self.type = type\n self.name = name\n self.value = value\n self.values = values\n self.sensitive = sensitive\n self.size = size\n\n @staticmethod\n def from_dict(input_dict):\n if 'values' in input_dict and isinstance(input_dict['values'], basestring):\n input_dict['values'] = input_dict['values'].split(',')\n return Input(**force_dict_to_strings(input_dict))\n\n def to_dict(self):\n d = {\n 'id': self.id,\n 'type': self.type,\n 'name': self.name,\n 'sensitive': self.sensitive\n }\n if self.value:\n d['value'] = self.value\n if self.values:\n d['values'] = ','.join(self.values)\n if self.size != -1:\n d['size'] = self.size\n return d\n","sub_path":"apps/sqoop/src/sqoop/client/form.py","file_name":"form.py","file_ext":"py","file_size_in_byte":4113,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"190024250","text":"# -*- coding: utf-8 -*-\n\"\"\"\n/***************************************************************************\nVmFromWfs\n A QGIS plugin\nValue Map from WFS\n -------------------\n begin : 2014-09-24\n git sha : $Format:%H$\n copyright : (C) 2014 by Camptocamp SA\n email : info@camptocamp.com\n ***************************************************************************/\n\n/***************************************************************************\n * *\n * This program is free software; you can redistribute it and/or modify *\n * it under the terms of the GNU General Public License as published by *\n * the Free Software Foundation; either version 2 of the License, or *\n * (at your option) any later version. *\n * *\n ***************************************************************************/\n\"\"\"\n# Import the PyQt and QGIS libraries\nimport os\nimport qgis\nfrom qgis.gui import QgsMessageBar\nfrom PyQt4.QtGui import QAction, QIcon, QMessageBox, QDialog\nimport urllib\nfrom PyQt4 import QtXml\n\n# Initialize Qt resources from file resources.py\nimport resources\n\nclass VmFromWfs:\n def __init__(self, iface):\n # Save reference to the QGIS interface\n self.iface = iface\n # initialize plugin directory\n self.plugin_dir = os.path.dirname(__file__)\n\n def initGui(self):\n # Create action that will start plugin configuration\n self.action = QAction(\n QIcon(\":/plugins/VMfromWFS/icon.png\"),\n u\"Set Value Maps for selected layer\", self.iface.mainWindow())\n # connect the action to the run method\n self.action.triggered.connect(self.run)\n\n # Add toolbar button and menu item\n self.iface.addToolBarIcon(self.action)\n self.iface.addPluginToWebMenu(u\"&VM_WFS\", self.action)\n\n def unload(self):\n # Remove the plugin menu item and icon\n self.iface.removePluginWebMenu(u\"&VM_WFS\", self.action)\n self.iface.removeToolBarIcon(self.action)\n\n def run(self):\n \"\"\"For getting an automated updates of value map,\n the user select a layer in the legend tree and click on\n the plugin button. The present run function in launched\n and follow these steps:\n 1. check that a wfs layer is selected\n 2. get the DescribeFeatureType service and retrieve the layer element name\n 3. From the name, getting the layer type element\n 4. in the layer type element, getting all enumeration present\n 5. for all attributes having an enumeration in the WFS, setting the valueMap\n widget with appropriate values\n \"\"\"\n\n # get the current layer\n clayer = qgis.utils.iface.mapCanvas().currentLayer()\n if clayer == None:\n # check that a layer is selected\n self.iface.messageBar().pushMessage(\"VM from WFS\", 'No layer selected', QgsMessageBar.WARNING, 5)\n return\n if clayer.providerType () != 'WFS':\n # check that the layer is a web feature sercvie\n self.iface.messageBar().pushMessage(\"VM from WFS\", 'Layer not from WFS', QgsMessageBar.WARNING, 5)\n return\n source = clayer.source()\n try:\n # get the typeName, usefull in the DescribeFeatureType\n layerName = source.split('TYPENAME')[1].split('&')[0].split(':')[1]\n except:\n self.iface.messageBar().pushMessage(\"VM from WFS\", 'annot retrieve source name\\Set layer name as source name', QgsMessageBar.WARNING, 5)\n layerName = clayer.name()\n\n # Get the DescribeFeatureType\n urlPart1 = source.split('REQUEST')[0]\n urlPart2 = 'REQUEST=DescribeFeatureType'\n url = urlPart1 + urlPart2\n xmlconnect = urllib.urlopen(url)\n xmlbuffer = xmlconnect.read()\n xmlconnect.close()\n\n # parse the DescribeFeatureType\n doc = QtXml.QDomDocument(\"EnvironmentML\");\n if(not doc.setContent(xmlbuffer)):\n QMessageBox.warning(self, \"Error\", \"Could not parse xml from DescribeFeatureType.\")\n root = doc.documentElement()\n self.particle = root.tagName().split(':')[0]\n\n # First, we retrieve the name of the Type element of the layer\n #\n # Here, the logic is not easy to capt. In the DescribeFeatureType,\n # The layerType is linked to the laye by an element containing\n # the name of the layer and the name of the type.\n # For example, the osm_buildings layer is linked to the osm_buildingsType:\n # \n # ...\n # \n # \n PFD = doc.elementsByTagName (self.particle + \":element\")\n typeName = None\n for index in range(PFD.length()):\n element = PFD.item(index)\n if element.isNull ():\n break\n nodeAttributes = element.attributes()\n for indexAtt in range(nodeAttributes.length()):\n attributeItem = nodeAttributes.item(indexAtt) # pulls out first item\n attribute = attributeItem.toAttr()\n if attribute.name() == 'name':\n if attribute.value() == layerName:\n typeAttribute = nodeAttributes.namedItem('type')\n typeName = typeAttribute.toAttr().value().split(':')[1]\n break\n if typeName == None:\n return\n\n # Once we have the name of Type element, we look for it.\n focusType = QtXml.QDomNode()\n complexElmts = doc.elementsByTagName (self.particle + \":complexType\")\n for index in range(complexElmts.length()):\n if not focusType.isNull():\n break\n element = complexElmts.item(index)\n if element.isNull():\n break\n nodeAttributes = element.attributes()\n for indexAtt in range(nodeAttributes.length()):\n attributeItem = nodeAttributes.item(indexAtt)\n attribute = attributeItem.toAttr()\n if attribute.name() == 'name':\n if attribute.value() == typeName:\n focusType = element\n break\n\n # Once we have the Type element, we can extract the enumaration values\n # if there are some.\n dicEnum = {}\n self.getEnumeration(focusType, dicEnum)\n # And then apply modification the the qgis MapLayer.\n self.applyEnumeration(clayer, dicEnum)\n\n QMessageBox.information(QDialog(), \"Value Map widgets\", \"Value map widgets are succesfully updated\\nfor data layer %s\" % layerName)\n\n def applyEnumeration(self, layer, dicEnum):\n fields = layer.pendingFields()\n id = 0\n for field in fields:\n if field.name() in dicEnum:\n values = dicEnum[field.name()]\n layer.setEditorWidgetV2(id, 'ValueMap')\n config = dict(zip(values, values))\n layer.setEditorWidgetV2Config(id, config)\n id += 1\n\n def getEnumeration(self, Node, dicEnum):\n if self.hasEnumeration(Node): # just a quick check for performance\n n = Node.firstChild()\n while (not n.isNull()):\n if n.nodeName() == self.particle + ':element': # attribute of the layer\n if n.hasChildNodes(): # it will be a special type...\n if self.hasEnumeration(n): # ... and it is enumeration\n listValue = []\n self.getEnumerationValues(n, listValue)\n att = n.attributes().namedItem('name').toAttr().value()\n dicEnum[att] = listValue\n else:\n self.getEnumeration(n, dicEnum)\n n = n.nextSibling()\n\n def getEnumerationValues(self, node, listValue):\n n = node.firstChild()\n while (not n.isNull()):\n if n.nodeName() == self.particle + ':enumeration':\n nodeAttributes = n.attributes()\n for indexAtt in range(nodeAttributes.length()):\n attributeItem = nodeAttributes.item(indexAtt)\n attribute = attributeItem.toAttr()\n if attribute.name() == 'value':\n listValue.append(attribute.value())\n else:\n test = self.getEnumerationValues(n, listValue)\n n = n.nextSibling()\n\n def hasEnumeration(self, Node):\n test = False\n if Node.hasChildNodes():\n n = Node.firstChild()\n while (not n.isNull()) and not test:\n if n.nodeName() == self.particle + ':enumeration':\n test = True\n break\n else:\n test = self.hasEnumeration(n)\n n = n.nextSibling()\n return test\n\n\n\n\n\n\n","sub_path":"VmfromWFS/VMfromWFS_plugin.py","file_name":"VMfromWFS_plugin.py","file_ext":"py","file_size_in_byte":9286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"421092379","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nfrom hwt.code import If\nfrom hwt.hdl.types.bits import Bits\nfrom hwt.synthesizer.rtlLevel.netlist import RtlNetlist\nfrom hwtLib.examples.rtlLvl.netlistToRtl import netlistToVhdlStr\n\n\ndef Counter():\n t = Bits(8)\n n = RtlNetlist(\"LeadingZero\")\n\n en = n.sig(\"en\")\n rst = n.sig(\"rst\")\n clk = n.sig(\"clk\")\n s_out = n.sig(\"s_out\", t)\n cnt = n.sig(\"cnt\", t, clk=clk, syncRst=rst, defVal=0)\n\n If(en,\n cnt(cnt + 1)\n )\n\n s_out(cnt)\n\n interf = [rst, clk, s_out, en]\n\n return n, interf\n\ncounterExpected = \\\n\"\"\"library IEEE;\nuse IEEE.std_logic_1164.all;\nuse IEEE.numeric_std.all;\n\nENTITY Counter IS\n PORT (clk: IN STD_LOGIC;\n en: IN STD_LOGIC;\n rst: IN STD_LOGIC;\n s_out: OUT STD_LOGIC_VECTOR(7 DOWNTO 0)\n );\nEND Counter;\n\nARCHITECTURE rtl OF Counter IS\n SIGNAL cnt: STD_LOGIC_VECTOR(7 DOWNTO 0) := X\"00\";\n SIGNAL cnt_next: STD_LOGIC_VECTOR(7 DOWNTO 0);\nBEGIN\n assig_process_cnt: PROCESS (clk)\n BEGIN\n IF RISING_EDGE(clk) THEN\n IF rst = '1' THEN\n cnt <= X\"00\";\n ELSE\n cnt <= cnt_next;\n END IF;\n END IF;\n END PROCESS;\n\n assig_process_cnt_next: PROCESS (cnt, en)\n BEGIN\n IF en = '1' THEN\n cnt_next <= STD_LOGIC_VECTOR(UNSIGNED(cnt) + 1);\n ELSE\n cnt_next <= cnt;\n END IF;\n END PROCESS;\n\n s_out <= cnt;\nEND ARCHITECTURE rtl;\"\"\"\n\nif __name__ == \"__main__\":\n netlist, interfaces = Counter()\n print(netlistToVhdlStr(\"Counter\", netlist, interfaces))\n","sub_path":"hwtLib/examples/rtlLvl/arithmetic/counter.py","file_name":"counter.py","file_ext":"py","file_size_in_byte":1599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"340228243","text":"# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\"\"\"\nExample script to show how to use UpdateServerProfile\n\"\"\"\nfrom smc import session\nfrom smc.administration.certificates.tls import TLSProfile\nfrom smc.elements.other import UpdateServerProfile\nfrom smc_info import *\n\nname = \"test_update_server_profile\"\nmessage = \"Testing of update server profile.\"\ncreation_error = \"Failed to create update server profile with attribute.\"\nupdate_error = \"Failed to update server profile with retry attribute.\"\nRETRY = 4\nTIMEOUT = 30\nURL = \"https://autoupdate.ngfw.forcepoint.com/dynup.rss\"\n\nif __name__ == '__main__':\n session.login(url=SMC_URL, api_key=API_KEY, verify=False, timeout=120, api_version=API_VERSION)\n print(\"session OK\")\n\ntry:\n print(\"Check and delete if UpdateServerProfile is present.\")\n if UpdateServerProfile.objects.filter(name=name, exact_match=True):\n UpdateServerProfile(name).delete()\n print(\"Successfully deleted UpdateServerProfile.\")\n tls_profile = list(TLSProfile.objects.all())[0]\n # create update server profile\n update_server_profile = UpdateServerProfile.create(name, retry=RETRY, timeout=TIMEOUT,\n urls=[URL], tls_profile_ref=tls_profile,\n comment=message)\n assert update_server_profile.retry == RETRY and update_server_profile.timeout == TIMEOUT,\\\n creation_error\n print(\"Successfully created UpdateServerProfile.\")\n update_server_profile.update(retry=RETRY + 1)\n assert update_server_profile.retry == RETRY + 1, update_error\n print(\"Successfully updated UpdateServerProfile.\")\n\nexcept Exception as e:\n print(\"Exception is: {}\".format(str(e)))\n exit(1)\nfinally:\n UpdateServerProfile(name).delete()\n print(\"Deleted UpdateServerProfile successfully.\")\n session.logout()\n","sub_path":"smc/examples/update_server_profile.py","file_name":"update_server_profile.py","file_ext":"py","file_size_in_byte":2373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"104044673","text":"\nimport unittest\n\nfrom online_stock_span import StockSpanner\n\n\nclass OnlineStockSpanTests(unittest.TestCase):\n \"\"\"Tests for online stock span challenge.\"\"\"\n\n def test_case_1(self):\n spanner = StockSpanner()\n result = []\n for value in [100, 80, 60, 70, 60, 75, 85]:\n result.append(spanner.next(value))\n\n self.assertEqual(result, [1, 1, 1, 2, 1, 4, 6])\n\n\nif __name__ == \"__main__\":\n unittest.main(verbosity=2)\n","sub_path":"src/test_online_stock_span.py","file_name":"test_online_stock_span.py","file_ext":"py","file_size_in_byte":456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"257722658","text":"import logging\nimport configparser\nimport argparse\nimport subprocess\nimport time\nfrom concurrent.futures import ThreadPoolExecutor\nfrom normalcall import NormalCall\nfrom transcodedcall import TranscodedCall\nfrom ssh_handler import ShellHandler\nfrom collections import deque\nimport threading\n\n# Required params for the linphone handling\nLINPHONE_ARGS = ['ssh_linphone1', 'ssh_linphone2', 'linphone_time', 'record_filename']\n\nlog_levels = {\n 'debug': logging.DEBUG, \n 'info': logging.INFO,\n 'warning': logging.WARNING,\n 'error': logging.ERROR,\n 'critical': logging.CRITICAL\n}\n\nrtp_processes = []\n\n# This will parse the config file\ndef load_config(conf):\n try:\n logging.info(\"Started!\")\n parser = configparser.ConfigParser()\n if not parser.read(conf):\n raise Exception\n except Exception:\n logging.error(\"Cannot read or parse the configuration file.\")\n return None\n config = parser['client']\n logging.info(\"Configuration file loaded!\")\n return config\n\n# You can wait a given amount of time before the linphone streams ends\ndef linphone_sleep(client1, client2, linphone_time):\n logging.info(f'sleep time: {linphone_time * 60}')\n time.sleep(linphone_time * 60)\n client1.execute(chr(3))\n client2.execute(chr(3))\n\n# Initialize calls and store the rtp stream processes\ndef call(call):\n global rtp_processes\n r = call.generate_call(config.getfloat('wait', 0))\n if isinstance(r, Exception):\n return r\n rtp_processes += r\n logging.info(f'{int(len(rtp_processes)/2)} calls running')\n return None\n\n# Start creating calls with a configurable worker number\ndef threaded_calls(calls): \n # calls: A list of call objects\n try:\n with ThreadPoolExecutor(max_workers=config.getint('max_workers', 1)) as executor:\n for c in calls:\n future = executor.submit(call, c)\n if isinstance(future.result(), Exception):\n logging.error(future.result())\n break\n except KeyboardInterrupt:\n return\n\nif __name__ == '__main__':\n try:\n # Parse command arguments, parse config arguments, setup logging\n parser = argparse.ArgumentParser(description='RTPengine controller.')\n parser.add_argument('--config-file', '-c', type=str, dest='config',\n help='Location of configuration file.')\n parser.add_argument('--log-level', '-l', type=str, dest='log_level',\n help='Log level, default is info', default='info')\n args = parser.parse_args()\n \n logging.basicConfig(\n format='%(asctime)s.%(msecs)03d [%(levelname)s] [%(filename)s:%(lineno)s - %(funcName)s()] %(message)s',\n datefmt='%H:%M:%S', \n level=log_levels[args.log_level.lower()])\n\n config = load_config(args.config)\n logging.debug(config)\n\n # Generate ports for streams and create call objects\n calls = []\n ports = deque()\n number_of_calls = config.getint('number_of_calls', 0) + config.getint('transcoding_calls', 0)\n for i in range(3002, (3000 + number_of_calls * 4) + 1, 2):\n ports.append(i)\n\n for t in range(config.getint('transcoding_calls', 0)):\n calls.append(TranscodedCall(ports.popleft(), ports.popleft(), **config))\n for n in range(config.getint('number_of_calls', 0)):\n calls.append(NormalCall(ports.popleft(), ports.popleft(), **config))\n\n # Start linphone clients on two separate vm\n if config.get('linphone', 'no') == 'yes':\n for i in LINPHONE_ARGS:\n if not config.get(i, None):\n logging.exception(f'Config parameter: {i} not found!')\n # ssh into clients username == password\n client1_config = config.get('ssh_linphone1').split('@')\n client2_config = config.get('ssh_linphone2').split('@')\n \n client1 = ShellHandler(client1_config[1], client1_config[0], client1_config[0])\n client2 = ShellHandler(client2_config[1], client2_config[0], client2_config[0])\n \n cmd1 = f'python app.py -p /home/user/shanty.wav -r /home/user/{config.get(\"record_filename\")} -c \"call 456\" -pr 10.0.1.6:8000'\n cmd2 = f'python app.py -p /home/user/shanty.wav -r /home/user/{config.get(\"record_filename\")} -c \"answer 1\" -pr 10.0.1.7:8000'\n \n logging.info(cmd1)\n logging.info(cmd2)\n\n # Execute commands on clients\n client1.execute(cmd1)\n time.sleep(0.5)\n client2.execute(cmd2)\n\n time.sleep(20)\n\n if len(calls) > 0: # If your don't specify calls \n threaded_calls(calls)\n threading.Thread(target=linphone_sleep, args=(client1, client2, config.getint('linphone_time'), ), daemon=True).start()\n else:\n linphone_sleep(client1, client2, config.getint('linphone_time'))\n else:\n threaded_calls(calls)\n\n # Needed to be able to stop the subprocesses\n for r in rtp_processes:\n r.communicate()\n except KeyboardInterrupt:\n for c in calls:\n if c.running:\n c.delete()\n except Exception as e:\n logging.exception(e)\n except:\n logging.exception(\"Got exception on main handler\")\n raise\n\n","sub_path":"client/new_client.py","file_name":"new_client.py","file_ext":"py","file_size_in_byte":5418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"517028687","text":"\nimport pyblish.api\n\n\nclass ValidateSubsetUnique(pyblish.api.ContextPlugin):\n \"\"\"確認正在發佈的所有物件 (Subset Instance) 沒有重複名稱\n\n 同時間不能發佈相同 Subset 名稱的內容。\n\n \"\"\"\n\n \"\"\"No duplicated subset\n\n You can not publish multiple subsets with the same subset name.\n\n \"\"\"\n\n label = \"無重複 Subset\"\n order = pyblish.api.ValidatorOrder - 0.44\n\n def process(self, context):\n invalid = self.get_invalid(context)\n\n if not len(invalid) == 0:\n msg = (\"Instances has duplicated subset:\\n \" +\n \"\\n \".join(invalid) +\n \"\\n\")\n\n self.log.error(msg)\n raise AssertionError(msg)\n\n @classmethod\n def get_invalid(cls, context):\n invalid = list()\n subsets = dict()\n\n for instance in context:\n # Same subset but different 'extractType' (representation)\n # will be processed as different thing.\n asset = instance.data[\"asset\"]\n extract_type = \"@\" + instance.data.get(\"extractType\", \"*\")\n subset = instance.data[\"subset\"] + extract_type\n if asset in subsets:\n if subset in subsets[asset]:\n invalid.append(instance.data[\"objectName\"])\n else:\n subsets[asset].append(subset)\n else:\n subsets[asset] = [subset]\n\n return invalid\n","sub_path":"plugins/global/publish/validate_subset_unique.py","file_name":"validate_subset_unique.py","file_ext":"py","file_size_in_byte":1454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"147257560","text":"#!/usr/bin/env python2.7\n\n# Copyright 2015 Cisco Systems, Inc.\n# \n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n# \n# http://www.apache.org/licenses/LICENSE-2.0\n# \n# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n# an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations under the License.\n\n''' Sample usage of composite of functions 'device_mount' and 'connected'.\n\n Mount any one device that is configured and not mounted.\n Pause while the Controller connects to the device.\n \n An unreachable device will never connect, so a 'time out' is enforced.\n Repeat the demonstration on a different device if 'time out' occurs.\n \n Exit code is zero when one device is both mounted and connected, otherwise non-zero.\n'''\n\nfrom __future__ import print_function as _print_function\n\nfrom basics.inventory import device_mount, inventory_unmounted, connected\nfrom settings import config\nimport time\nfrom basics.interpreter import sys_exit, EX_OK, EX_TEMPFAIL\n\ntime_out = 10.0\n'''Number of seconds to time out.'''\n\ntime_interval = 0.2\n'''Initial time interval between checks.'''\n\ndef mount_from_settings(device_name):\n \"\"\"Mount the specified device with the configured settings.\"\"\"\n device_config = config['network_device'][device_name]\n print('device_mount(' + device_name, *device_config.values(), sep=', ', end=')\\n')\n device_mount(\n device_name,\n device_config['address'],\n device_config['port'],\n device_config['username'],\n device_config['password'])\n\ndef demonstrate(device_name):\n \"\"\" Mount *and* connect the specified device.\n\n The device must not be mounted already.\n The Controller will attempt connection to the device.\n Return True if connection succeeds before the time-out period elapses.\n \"\"\"\n mount_from_settings(device_name)\n time_accum = 0.0\n num_checks = 0\n while time_accum < time_out:\n num_checks += 1\n expanding_interval = time_interval * num_checks\n time_accum += expanding_interval \n # Don't hammer the Controller or it will crash.\n # This not a denial-of-service (DOS) attack ;-)\n time.sleep(expanding_interval)\n print('connected(' + device_name, sep='', end='): ')\n if connected(device_name):\n print(True, 'after %s checks and %s seconds.' % (num_checks, time_accum))\n return True\n else:\n print(False)\n continue\n print('Unconnected after %s checks and %s seconds.' % (num_checks, time_accum))\n return False\n\ndef main():\n \"\"\"Demonstrate on the unmounted devices, stopping when a connection to any device is established.\"\"\"\n unmounted_list = inventory_unmounted()\n if not unmounted_list:\n print('All configured devices are mounted. Demonstration cancelled.')\n else:\n for device_name in unmounted_list:\n if demonstrate(device_name):\n return EX_OK\n return EX_TEMPFAIL\n\nif __name__ == \"__main__\":\n sys_exit(main())\n","sub_path":"src/learning_lab/01_device_connect.py","file_name":"01_device_connect.py","file_ext":"py","file_size_in_byte":3314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"176266590","text":"#!/usr/bin/env python\nimport sys\nfrom os.path import join, isfile, abspath, dirname\n\nimport pip\nfrom setuptools import setup, find_packages\n\n\nname = 'bed_annotation'\npackage_name = 'ensembl'\nversion = '0.1'\n\n\nprint('Upgrading pip and setuptools...')\ntry:\n pip.main(['install', '--upgrade', 'setuptools', 'pip'])\nexcept StandardError:\n sys.stderr.write('Cannot update pip and setuptools, that might cause errors '\n 'during the following intallation\\n')\n\ntry:\n import ngs_utils\nexcept ImportError:\n print('Installing NGS_Utils...')\n pip.main(['install', 'git+git://github.com/vladsaveliev/NGS_Utils.git'])\n import ngs_utils\n\n\nsetup(\n name=name,\n version=version,\n author='Vlad Saveliev',\n author_email='vladislav.sav@gmail.com',\n description='Annotation of BED files',\n keywords='bioinformatics',\n license='GPLv3',\n packages=[\n 'ensembl',\n 'bed_annotation',\n ],\n package_data={\n package_name: [\n 'hg19/ensembl.bed.gz',\n 'hg19/ensembl.bed.gz.tbi',\n 'hg38/ensembl.bed.gz',\n 'hg38/ensembl.bed.gz.tbi',\n 'mm10/ensembl.bed.gz',\n 'mm10/ensembl.bed.gz.tbi',\n ],\n },\n include_package_data=True,\n zip_safe=False,\n scripts=[\n 'scripts/annotate_bed.py',\n ],\n install_requires=[\n 'pybedtools',\n ],\n setup_requires=[\n 'numpy',\n ],\n classifiers=[\n 'Environment :: Console',\n 'Environment :: Web Environment',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',\n 'Natural Language :: English',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: POSIX',\n 'Operating System :: Unix',\n 'Programming Language :: Python',\n 'Programming Language :: JavaScript',\n 'Topic :: Scientific/Engineering',\n 'Topic :: Scientific/Engineering :: Bio-Informatics',\n ],\n)\n","sub_path":"bed_annotation/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2007,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"524269417","text":"class Student:\n def __init__(self, name, roll):\n self.n = name\n self.r = roll\n\n list=[\"\"]\n list1=[\"\"]\n flist=[\"\"]\n\n def display(self):\n list.append(\"self.n\")\n list.append(\"self.r\")\n flist=dict(zip(list, list1))\n return flist\n\n def setAge(self,age):\n self.a = age\n\n def setMarks(self,marks):\n self.m = marks\n\nn=0\ntotalm=0\n\nfor i in range(3):\n a=str(input(\"enter name\"))\n b=str(input(\"enter roll\"))\n c=int(input(\"enter age\"))\n d=int(input(\"enter marks\"))\n student1 = Student(\"a\", \"b\")\n print(student1.display())\n student1.setAge(c)\n student1.setMarks(d)\n totalm = totalm + d\n if d>40:\n n=n+1\n\n\navgmarks= (totalm/10)\nprint(\"average marks is\" , avgmarks)\nprint(\"no of passed students \" , n)\n\n\n\n\n\n\n\n\n","sub_path":"4.py","file_name":"4.py","file_ext":"py","file_size_in_byte":806,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"257330220","text":"#!/usr/bin/env python3\n\nimport cv2\nimport sys\nimport os\nimport subprocess\n\nsnap = os.environ[\"SNAP\"]\n\n# force 720p as default\nwidth = 1280\nheight = 720\n\ncpuinfo = open('/proc/cpuinfo', 'r').readlines()[-1]\n\nif 'Raspberry Pi' in cpuinfo:\n # pi is to slow for 720p, go to a lower resolution\n width = 640\n height = 360\n subprocess.run(['logger', 'On a Pi3, switching to lower resolution !'])\n\n\ncascPath = snap + \"/usr/share/opencv4/haarcascades/haarcascade_frontalface_default.xml\"\nfaceCascade = cv2.CascadeClassifier(cascPath)\n\nvideo_capture = cv2.VideoCapture(0)\n\nvideo_capture.set(cv2.CAP_PROP_FRAME_WIDTH, width)\nvideo_capture.set(cv2.CAP_PROP_FRAME_HEIGHT, height)\n\ncv2.namedWindow('Video',cv2.WINDOW_NORMAL)\ncv2.setWindowProperty('Video', cv2.WND_PROP_ASPECT_RATIO,\n cv2.WINDOW_FULLSCREEN)\n\nwhile True:\n # Capture frame-by-frame\n ret, frame = video_capture.read()\n\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\n faces = faceCascade.detectMultiScale(\n gray,\n scaleFactor=1.1,\n minNeighbors=5,\n minSize=(30, 30),\n flags=cv2.CASCADE_SCALE_IMAGE\n )\n\n # Draw a rectangle around the faces\n for (x, y, w, h) in faces:\n cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)\n\n # Display the resulting frame\n cv2.imshow('Video', frame)\n\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n# When everything is done, release the capture\nvideo_capture.release()\ncv2.destroyAllWindows()\n","sub_path":"demos/bin/facedetect.py","file_name":"facedetect.py","file_ext":"py","file_size_in_byte":1482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"411712084","text":"# BSD 3-Clause License\n#\n# Copyright (c) 2017 xxxx\n# All rights reserved.\n# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n# ============================================================================\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n# Code referenced from https://gist.github.com/gyglim/1f8dfb1b5c82627ae3efcfbbadb9f514\nimport os\nimport time\nimport sys\nimport torch\nUSE_TENSORBOARD = True\ntry:\n import tensorboardX\n print('Using tensorboardX')\nexcept:\n USE_TENSORBOARD = False\n\nclass Logger(object):\n def __init__(self, opt):\n \"\"\"Create a summary writer logging to log_dir.\"\"\"\n if not os.path.exists(opt.save_dir):\n os.makedirs(opt.save_dir)\n if not os.path.exists(opt.debug_dir):\n os.makedirs(opt.debug_dir)\n \n time_str = time.strftime('%Y-%m-%d-%H-%M')\n\n args = dict((name, getattr(opt, name)) for name in dir(opt)\n if not name.startswith('_'))\n file_name = os.path.join(opt.save_dir, 'opt.txt')\n with open(file_name, 'wt') as opt_file:\n opt_file.write('==> torch version: {}\\n'.format(torch.__version__))\n opt_file.write('==> cudnn version: {}\\n'.format(\n torch.backends.cudnn.version()))\n opt_file.write('==> Cmd:\\n')\n opt_file.write(str(sys.argv))\n opt_file.write('\\n==> Opt:\\n')\n for k, v in sorted(args.items()):\n opt_file.write(' %s: %s\\n' % (str(k), str(v)))\n \n log_dir = opt.save_dir + '/logs_{}'.format(time_str)\n if USE_TENSORBOARD:\n self.writer = tensorboardX.SummaryWriter(log_dir=log_dir)\n else:\n if not os.path.exists(os.path.dirname(log_dir)):\n os.mkdir(os.path.dirname(log_dir))\n if not os.path.exists(log_dir):\n os.mkdir(log_dir)\n self.log = open(log_dir + '/log.txt', 'w')\n try:\n os.system('cp {}/opt.txt {}/'.format(opt.save_dir, log_dir))\n except:\n pass\n self.start_line = True\n\n def write(self, txt):\n if self.start_line:\n time_str = time.strftime('%Y-%m-%d-%H-%M')\n self.log.write('{}: {}'.format(time_str, txt))\n else:\n self.log.write(txt) \n self.start_line = False\n if '\\n' in txt:\n self.start_line = True\n self.log.flush()\n \n def close(self):\n self.log.close()\n \n def scalar_summary(self, tag, value, step):\n \"\"\"Log a scalar variable.\"\"\"\n if USE_TENSORBOARD:\n self.writer.add_scalar(tag, value, step)\n","sub_path":"PyTorch/contrib/cv/detection/CenterNet/src/lib/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":3909,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"348673529","text":"import matplotlib.pyplot as plt\nx = []\ny = []\ndatset = open('teste.txt','w')\ndatset.write(\"18,120\")\ndatset.write(\"21,50\")\ndatset.write(\"30,30\")\ndatset.close()\n\narquivo = open('teste.txt','r')\nfor line in arquivo:\n line = line.strip()\n X, Y = line.split(',')\n x.append(X)\n y.append(Y)\narquivo.close()\nplt.plot(x,y)\nplt.title('Testes')\nplt.xlabel('dominio')\nplt.ylabel('ordenada')\n\nplt.show()","sub_path":"Python-exercicios/grafico.py","file_name":"grafico.py","file_ext":"py","file_size_in_byte":402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"393619830","text":"#! /usr/bin/python3\nimport sys\nsys.path.append(\"..\")\n\nimport random\nfrom airmash.client import Client\nfrom airmash.player import Player\nfrom airmash.ships import ships\nfrom airmash.types import ship_types\nfrom airmash import packets\nimport threading\nimport time\nimport names\nimport math\n\nUP = 'UP'\nDOWN = 'DOWN'\nLEFT = 'LEFT'\nRIGHT = 'RIGHT'\nFIRE = 'FIRE'\nSPECIAL = 'SPECIAL'\n\nANGLE_FUZZ = math.pi / 15.\nSHOOT_CUTOFF = 800.\nFLEE_CUTOFF = 90.\nMAX_TRIES=50\nFPS = 60.\n\n# This is a null player who can be used for debugging.\nZERO_PLAYER = Player(99999999)\nZERO_PLAYER.posX=0\nZERO_PLAYER.posX=0\n\ndef rare():\n return random.randrange(0, 10) == 0\n\ntypey=list(ship_types.keys())[random.randrange(0,5)]\nif len(sys.argv) > 1:\n typey = sys.argv[1]\nprint(\"Type is \", typey)\n\nname = names.get_first_name()\nif len(sys.argv) > 2:\n name = \" \".join(sys.argv[2:])\npreferredTarget = name[4:]\nif rare():\n name = \"Robot \" + name\nname = name[:20]\nprint(\"Name is \", name)\n\n\nme = None\n\ndef timeNear(x):\n return random.normalvariate(x, x/4.)\n\ndef get_nearest_player():\n #return ZERO_PLAYER\n minDist = float(\"inf\")\n nearestPlayer = None\n for uid in client.players:\n p = client.players[uid]\n if p == client.player:\n continue\n if p.flag == client.player.flag:\n continue\n if preferredTarget in p.name:\n return p\n dist = p.dist_from(client.player)\n if (dist < minDist):\n minDist = dist\n nearestPlayer = p\n return nearestPlayer \n\nclass StoppableThread(threading.Thread):\n def __init__(self, *args, **kwargs):\n super(StoppableThread, self).__init__(*args, **kwargs)\n self._event = threading.Event()\n\n def stop(self):\n self._event.set()\n\n def wait(self, timeout=1):\n return self._event.wait(timeout=timeout)\n\nclass ClientUpdate(StoppableThread):\n def __init__(self, *args, **kwargs):\n StoppableThread.__init__(self, *args, **kwargs)\n \n def target_player(self, player):\n startrot = client.player.rotation\n wrongness = client.player.angle_to(player) - client.player.rotation;\n going = None\n if (client.player.dist_from(player) > SHOOT_CUTOFF):\n going = UP\n if (client.player.dist_from(player) < FLEE_CUTOFF):\n going = DOWN \n if going:\n self.send_keydown(going) \n keypress = None\n if (wrongness < -ANGLE_FUZZ ):\n keypress = LEFT\n if wrongness > ANGLE_FUZZ:\n keypress = RIGHT\n if keypress is not None:\n self.send_keydown(keypress)\n turntime = wrongness/ships[ship_types[client.player.type]].turnFactor/FPS\n print(\"Turning for {} s to correct {} wrongness\".format(turntime, wrongness))\n self.wait(abs(turntime))\n self.send_keyup(keypress)\n #self.wait(4)\n #print(\"{} is now the wrongness.\".format(wrongness))\n if going:\n self.send_keyup(going)\n\n def charge_or_shoot(self, player):\n orig_health = client.player.health;\n dist = client.player.dist_from(player)\n cooldown = 0;\n rounds = 0 \n while client.player.dist_from(player) <= dist and rounds < 100:\n print (\"My location: {0}, {1}\".format(client.player.posX, client.player.posY))\n dist = client.player.dist_from(player)\n if (dist > SHOOT_CUTOFF) or cooldown > 0:\n keypress = (DOWN if rare() and rare() else UP)\n self.send_keydown(keypress)\n distTime = (dist / 2.) / ships[ship_types[client.player.type]].maxSpeed / FPS\n distTime = min(1, distTime)\n distTime = max(timeNear(distTime), .05)\n self.wait(distTime)\n self.send_keyup(keypress)\n else:\n keypress = FIRE\n # If neither prowler nor mohawk\n if not client.player.type in ['Mohawk', 'Prowler']:\n if (not (random.randrange(0, 3) == 0)) or (abs(client.player.angle_to(player)-client.player.rotation) > ANGLE_FUZZ * 3):\n keypress = SPECIAL\n #elif (abs(client.player.angle_to(player)-client.player.rotation) > ANGLE_FUZZ * 3):\n # Retarget.\n # return\n if client.player.type == 'Predator':\n if rare():\n keypress = SPECIAL\n self.send_keydown(UP)\n if client.player.type == 'Mohawk':\n self.send_keyup(FIRE)\n self.wait(.2)\n self.send_keydown(FIRE)\n else:\n self.send_keydown(keypress)\n self.wait(.05)\n self.send_keyup(UP)\n self.wait(.05)\n cooldown = 3\n self.send_keyup(keypress)\n if (client.player.type == 'Prowler'):\n if (client.player.health < orig_health) or (cooldown == 1) or rare():\n self.send_keyup(SPECIAL)\n self.send_keydown(SPECIAL)\n cooldown-=1\n if (client.player.dist_from(get_nearest_player()) < dist/2.):\n # Someone is much closer. Abort, deal with the immediate threat.\n print(\"Punting to deal with near threat\")\n return;\n \n def react_to_nearest(self):\n nearestPlayer = get_nearest_player()\n if (nearestPlayer is None):\n print(\"Nobody detected\")\n return\n print(\"Aiming at {0} who is {1} away and {2} off\".format(nearestPlayer.name,\n nearestPlayer.dist_from(client.player),\n client.player.angle_to(nearestPlayer)))\n print(\"Targetting {0}\".format(nearestPlayer.name))\n # Attack them until someone dies or we're too badly shot at.\n # Are we pointed vaguely near them?\n self.target_player(nearestPlayer)\n # Attack.\n self.charge_or_shoot(nearestPlayer)\n \n def send_keydown(self, key):\n client.key(key=key, state=True)\n\n def send_keyup(self, key):\n client.key(key=key, state=False)\n\n def run(self):\n while not self.wait():\n if client.connected:\n break\n print(\"Players\")\n for p in client.players:\n print(\" \", client.players[p].name)\n packet = packets.build_player_command('COMMAND', com='respawn', data=str(ship_types[typey]))\n client.send(packet)\n if False: #rare(): \n packet = packets.build_player_command('CHAT', text = \"All hail the robot overlords!\")\n client.send(packet)\n self.wait(2)\n if client.player.type == 'Mohawk':\n self.send_keydown(\"FIRE\")\n while True:\n self.react_to_nearest()\n #my_status = client.players[me].status\n\n\nclient = Client()\n\n@client.on('LOGIN')\ndef on_login(client, message):\n print(\"Client has logged in!\")\n\n_t_update = ClientUpdate()\n_t_update.start()\n\nclient.connect(\n name=name,\n flag='HU',\n region='eu',\n room='ffa1',\n)\n\n_t_update.stop()\n_t_update.join()\n\n","sub_path":"homing-bot.py","file_name":"homing-bot.py","file_ext":"py","file_size_in_byte":6806,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"294838335","text":"import torch.nn.functional as F\nfrom torch import nn\nimport sys\nimport torch\n\n# Convolutional neural network (two convolutional layers)\nimport torch.nn.functional as F\nclass ConvNet(nn.Module):\n\tdef __init__(self, input_dim, output_dim, num_classes=10, dropout=0.0):\n\t\tsuper(ConvNet, self).__init__()\n\t\tself.layer1 = nn.Sequential(\n\t\t\t# nn.Conv2d(1, 16, kernel_size=5, stride=1, padding=2),\n\t\t\tnn.Conv2d(1, 16, kernel_size=5, stride=1, padding=1),\n\t\t\tnn.BatchNorm2d(16),\n\t\t\tnn.ReLU(),\n\t\t\tnn.MaxPool2d(kernel_size=2, stride=2))\n\t\tself.layer2 = nn.Sequential(\n\t\t\t# nn.Conv2d(16, 32, kernel_size=5, stride=1, padding=2),\n\t\t\tnn.Conv2d(16, 32, kernel_size=5, stride=1, padding=1),\n\t\t\tnn.BatchNorm2d(32),\n\t\t\tnn.ReLU(),\n\t\t\tnn.MaxPool2d(kernel_size=2, stride=2))\n\n\n\t\tself.fc1 = nn.Linear( 32*5*5, 100, bias=False)\n\t\tself.fc2 = nn.Linear(100, num_classes, bias=False)\n\t\t\t\t\n\n\tdef forward(self, x):\n\t\tx = x.transpose(1,2).unsqueeze(1) # Turn (batch_size x seq_len x input_size) into (batch_size x input_size x seq_len) for CNN \n\t\tout = self.layer1(x)\n\t\tout = self.layer2(out)\n\t\tout = out.reshape(out.size(0), -1)\n\t\tout = F.relu(self.fc1(out))\n\t\tout = self.fc2(out)\n\t\t# return F.log_softmax(out, dim=1) \n\t\treturn out\n\n\nclass LSTM(nn.Module):\n\tdef __init__(self, input_dim, hidden_dim, output_dim, num_layers, dropout=0.0, bidirectional=False, use_cnn=True, featrep='rescale'):\n\t\tsuper(LSTM, self).__init__()\n\n\t\tself.featrep = featrep\n\t\tself.bidirectional = bidirectional\n\t\tself.use_cnn = use_cnn\n\t\tself.feature_size = hidden_dim * 2 if self.bidirectional else hidden_dim\n\n\t\tif self.use_cnn: \n\t\t\tself.c1 = nn.Conv1d(input_dim, hidden_dim, 2)\n\t\t\tself.p1 = nn.AvgPool1d(2)\n\t\t\n\t\t# Initialize the LSTM, Dropout, Output layers \n\t\tself.hidden_dim = hidden_dim\n\t\tself.num_layers = num_layers \n\t\tif self.use_cnn: #TODO: shorten expression\n\t\t\tself.lstm = nn.LSTM(hidden_dim, hidden_dim, num_layers, dropout=dropout, batch_first=True)\n\t\telse:\n\t\t\tself.lstm = nn.LSTM(input_dim, hidden_dim, num_layers, dropout=dropout, batch_first=True)\n\n\t\tself.fc = nn.Linear(hidden_dim, output_dim)\n\n\tdef init_hidden(self, batch_size):\n\t\t# The axes semantics are (num_layers, minibatch_size, hidden_dim) \n\t\treturn (torch.zeros(self.num_layers, batch_size, self.hidden_dim).cuda(),\n\t\t\t\ttorch.zeros(self.num_layers, batch_size, self.hidden_dim).cuda())\n\n\tdef forward(self, x, lengths):\n\t\tbatch_size = lengths.shape[0] \n\t\t\n\t\thidden = self.init_hidden(batch_size)\n\t\t# TODO: what if featrep == rescale ()\n\t\tif self.use_cnn: # In case we use cnn\n\t\t\tc = self.c1(x)\n\t\t\tp = self.p1(c)\n\t\t\t\n\t\t\tp = p.transpose(1, 2) # Turn (batch_size x hidden_size x seq_len) back into (batch_size x seq_len x hidden_size) for LSTM\n\t\t\tlengths = torch.tensor( [p.size(1)]*batch_size ) # constant length(=p.size(1)=hidden_size) for all the mel-specs of our batch \n\t\t\tlstm_out, hidden = self.lstm(p, hidden) \n\t\telse:\n\t\t\tlstm_out, hidden = self.lstm(x.transpose(1,2), hidden) # remember that: batch_first=True (check init_hidden). Also, lstm_out.size() = x = (batch_size x seq_len x hidden_size)\n\t\t\n\t\tif self.featrep == 'zeropad':\n\t\t\tlstm_out = self.last_timestep(lstm_out, lengths) \n\t\telse:\n\t\t\tlstm_out = lstm_out[:,-1,:] \n\n\t\tfc_outputs = self.fc(lstm_out) \n\t\treturn fc_outputs\n\n\tdef last_timestep(self, outputs, lengths):\n\t\t\"\"\"\n\t\t\tReturns the last output of the LSTM taking into account the zero padding\n\t\t\"\"\" \n\t\t\n\t\tif self.bidirectional:\n\t\t\tforward, backward = self.split_directions(outputs)\n\t\t\tlast_forward = self.last_by_index(forward, lengths, self.use_cnn)\n\t\t\tlast_backward = backward[:, 0, :]\n\t\t\t# Concatenate and return - maybe add more functionalities like average\n\t\t\treturn torch.cat((last_forward, last_backward), dim=-1)\n\t\telse:\n\t\t\treturn self.last_by_index(outputs, lengths, self.use_cnn)\n\n\t@staticmethod\n\tdef split_directions(outputs):\n\t\tdirection_size = int(outputs.size(-1) / 2)\n\t\tforward = outputs[:, :, :direction_size]\n\t\tbackward = outputs[:, :, direction_size:]\n\t\treturn forward, backward\n\n\t@staticmethod\n\tdef last_by_index(outputs, lengths, use_cnn):\n\t\t# if use_cnn: # If we've used use_cnn cnn\n\t\t# \treturn outputs[:, -1, :]\n\t\t# else:\n\t\t\t# Index of the last output for each sequence.\n\t\t\tidx = (lengths - 1).view(-1, 1).expand(outputs.size(0),\n\t\t\t\t\t\t\t\t\t\t\t\t outputs.size(2)).unsqueeze(1)\n\t\t\treturn outputs.gather(1, idx).squeeze() \n\n\n\nclass FUZZY(nn.Module):\n\tdef __init__(self, input_dim, hidden_dim, output_dim, num_layers, dropout=0.0, bidirectional=False, use_cnn=True, featrep='rescale'):\n\t\tsuper(FUZZY, self).__init__()\n\t\tself.cnn = ConvNet(input_dim=input_dim, output_dim=output_dim, num_classes=10, dropout=0.0) \n\t\tself.lstm = LSTM(input_dim=input_dim, hidden_dim=hidden_dim, output_dim=output_dim, num_layers=num_layers, dropout=dropout, bidirectional=bidirectional, use_cnn=use_cnn, featrep=featrep)\n\t\n\t\tself.fc = nn.Linear(hidden_dim, output_dim)\n\n\t\tself.linear = nn.Sequential(nn.Linear(20, 100), nn.ReLU())\n\n\t\tself.olinear = nn.Linear(100, output_dim, nn.ReLU())\n\n\tdef forward(self, static_inputs, trail_inputs, lengths):\n\t\t\"\"\"Inputs have to have dimension (N, C_in, L_in)\"\"\"\n\t\t### CNN ###\n\t\to1 = self.cnn(static_inputs) # input should have dimension (N, C, L)\n\n\t\t# ### LSTM ###\n\t\to2 = self.lstm(trail_inputs, lengths)\n\n\t\t### COMBINED ###\n\t\to = torch.cat((o1, o2),1)\n\t\to = self.linear(o)\n\t\tout = self.olinear(o) \n\t\treturn F.log_softmax(out, dim=1)\n\n","sub_path":"exp1/fuzzy_LSTMWithCNN/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":5372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"427451911","text":"\"\"\" game_greenie.py - GolfGame class.\"\"\"\nfrom .game import GolfGame\n\n\nclass GreenieGame(GolfGame):\n \"\"\"Basic Par 3 games.\"\"\"\n\n short_description = \"Greenie\"\n description = \"\"\"\nClosest shot to the pin on par 3 (on the green) and makes a par or better wins the hole.\n\nOptions:\n double_birdie: Birdies are worth double points. \n carry_over: If nobody wins a par 3 then carries over to next par 3.\n last_par_3_carry: If nobody wins last par 3 then carry over to next hole and on green in regulation quailifies. \n\"\"\"\n\n def __init__(self, golf_round, scores, **kwargs):\n super(GreenieGame, self).__init__(golf_round, scores, **kwargs)\n self._carry_over = kwargs.get(\"carry_over\", True)\n self._double_birdie = kwargs.get(\"double_birdie\", True)\n self._last_par_3_carry = kwargs.get(\"last_par_3_carry\", True)\n\n def start(self):\n \"\"\"Start the game.\"\"\"\n for pl in self.scores:\n # points won\n pl.dct_points = self._init_dict()\n pl.dct_money = self._init_dict(score_type=float) if self._wager else None\n self._carry = 0\n self._next_hole = 0\n self._use_green_in_regulation = False\n self._thru = 0\n # add header to scorecard\n self.dctScorecard[\"header\"] = \"{0:*^98}\".format(\n \" \" + self.short_description + \" \"\n )\n\n def setOptions(self, options):\n \"\"\"Additional options set for each add score.\"\"\"\n super(GreenieGame, self).setOptions(options)\n self._closest_to_pin = options.get(\"closest_to_pin\")\n\n def addScore(self, index, lstGross):\n \"\"\"add scores for a hole.\n \n Args:\n index: hole index [0..holes-1]\n lstGross: list of gross scores for all players.\n \"\"\"\n self._thru = index + 1\n if (\n self.golf_round.course.holes[index].isPar(3)\n or self._use_green_in_regulation\n ):\n par = self.golf_round.course.holes[index].par\n if (\n self._closest_to_pin is not None\n and lstGross[self._closest_to_pin] <= par\n ):\n winner = self.scores[self._closest_to_pin]\n # only get points on par 3\n value = 1 if par == 3 else 0\n winner.dct_points[\"holes\"][index] = value + self._carry\n self._carry = 0\n if self._double_birdie and lstGross[self._closest_to_pin] < par:\n # birdie or better\n winner.dct_points[\"holes\"][index] *= 2\n self._update_totals(winner.dct_points)\n if self._wager:\n winner.dct_money[\"holes\"][index] = winner.dct_points[\"holes\"][\n index\n ] * len(self.scores)\n self._update_totals(winner.dct_money)\n else:\n if self._carry_over and par == 3:\n self._carry += 1\n if (\n self._last_par_3_carry\n and self.golf_round.course.lastPar(3) == index\n ):\n self._use_green_in_regulation = True\n\n self._next_hole += 1\n if self._next_hole == 18:\n self._next_hole = None\n\n def getScorecard(self, **kwargs):\n \"\"\"Scorecard with all players.\"\"\"\n lstPlayers = []\n for n, score in enumerate(self.scores):\n dct = {\"player\": score.player}\n dct[\"in\"] = score.dct_points[\"in\"]\n dct[\"out\"] = score.dct_points[\"out\"]\n dct[\"total\"] = score.dct_points[\"total\"]\n # build line for stdout\n line = \"{:<6}\".format(score.player.nick_name)\n for point in score.dct_points[\"holes\"][:9]:\n line += \" {:>3}\".format(point) if point is not None else \" \"\n line += \" {:>4}\".format(dct[\"out\"])\n for point in score.dct_points[\"holes\"][9:]:\n line += \" {:>3}\".format(point) if point is not None else \" \"\n line += \" {:>4} {:>4}\".format(dct[\"in\"], dct[\"total\"])\n dct[\"line\"] = line\n lstPlayers.append(dct)\n self.dctScorecard[\"players\"] = lstPlayers\n return self.dctScorecard\n\n def getLeaderboard(self, **kwargs):\n board = []\n sort_type = kwargs.get(\"sort_type\", \"points\")\n if sort_type == \"money\" and self._wager:\n self.dctLeaderboard[\"hdr\"] = \"Pos Name Money Thru\"\n scores = sorted(\n self.scores, key=lambda score: score.dct_money[\"total\"], reverse=True\n )\n sort_by = \"money\"\n else:\n self.dctLeaderboard[\"hdr\"] = \"Pos Name Points Thru\"\n scores = sorted(\n self.scores, key=lambda score: score.dct_points[\"total\"], reverse=True\n )\n sort_by = \"total\"\n pos = 1\n prev_total = None\n for sc in scores:\n score_dct = {\n \"player\": sc.player,\n \"total\": sc.dct_points[\"total\"],\n \"money\": sc.dct_money[\"total\"] if self._wager else None,\n }\n if prev_total != None and score_dct[sort_by] != prev_total:\n pos += 1\n prev_total = score_dct[sort_by]\n score_dct[\"pos\"] = pos\n score_dct[\"thru\"] = self._thru\n if sort_by == \"money\":\n money = (\n \"--\"\n if score_dct[\"money\"] == 0.0\n else \"${:<2g}\".format(score_dct[\"money\"])\n )\n score_dct[\"line\"] = \"{:<3} {:<6} {:^5} {:>4}\".format(\n score_dct[\"pos\"],\n score_dct[\"player\"].nick_name,\n money,\n score_dct[\"thru\"],\n )\n else:\n score_dct[\"line\"] = \"{:<3} {:<6} {:>5} {:>4}\".format(\n score_dct[\"pos\"],\n score_dct[\"player\"].nick_name,\n score_dct[\"total\"],\n score_dct[\"thru\"],\n )\n board.append(score_dct)\n self.dctLeaderboard[\"leaderboard\"] = board\n return self.dctLeaderboard\n\n def getStatus(self, **kwargs):\n \"\"\"Scorecard with all players.\"\"\"\n if self._next_hole is None:\n self.dctStatus[\"next_hole\"] = None\n self.dctStatus[\"line\"] = \"Round complete\"\n else:\n self.dctStatus[\"next_hole\"] = self._next_hole + 1\n line = \"\"\n if self.golf_round.course.holes[self._next_hole].isPar(3):\n self.dctStatus[\"par\"] = 3\n self.dctStatus[\"handicap\"] = self.golf_round.course.holes[\n self._next_hole\n ].handicap\n line = \"Hole {} Par {} Hdcp {} \".format(\n self.dctStatus[\"next_hole\"],\n self.dctStatus[\"par\"],\n self.dctStatus[\"handicap\"],\n )\n line += \"Carry:{}\".format(self._carry)\n if self._wager and self._carry:\n line += \" ${:<6g}\".format(self._carry * self._wager * len(self.scores))\n if self._use_green_in_regulation:\n line += \" Use all greens\"\n self.dctStatus[\"line\"] = line\n return self.dctStatus\n\n @property\n def total_payout(self):\n \"\"\"Overload to only count Par 3 holes.\"\"\"\n # calc total payout, game only uses Par 3\n if self._wager:\n holes = [hole for hole in self.golf_round.course.holes if hole.par == 3]\n return len(holes) * self._wager * len(self.scores)\n return None\n","sub_path":"golf_db/game_greenie.py","file_name":"game_greenie.py","file_ext":"py","file_size_in_byte":7662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"99057655","text":"\"\"\"\nStand alone samples for IXN package functionality.\n\nSetup:\nTwo IXN ports connected back to back.\n\n@author yoram@ignissoft.com\n\"\"\"\n\nimport sys\nfrom os import path\nimport unittest\nimport logging\nimport time\n\nfrom ixnetwork.ixn_app import init_ixn\nfrom ixnetwork.ixn_statistics_view import IxnPortStatistics, IxnTrafficItemStatistics\nfrom trafficgenerator.tgn_utils import ApiType\n\n\n# API type = tcl, python or rest. The default is tcl with DEBUG log messages for best visibility.\napi = ApiType.tcl\ntcl_port = 8009\nlog_level = logging.DEBUG\n\ninstall_dir = 'C:/Program Files (x86)/Ixia/IxNetwork/8.01-GA'\n\nport1_location = '192.168.42.61/2/1'\nport2_location = '192.168.42.61/2/2'\n\nixn_config_file = path.join(path.dirname(__file__), 'configs/test_config.ixncfg')\n\n\nclass IxnSamples(unittest.TestCase):\n\n def setUp(self):\n super(IxnSamples, self).setUp()\n logger = logging.getLogger('log')\n logger.setLevel(log_level)\n logger.addHandler(logging.StreamHandler(sys.stdout))\n self.ixn = init_ixn(api, logger, install_dir)\n self.ixn.connect(tcl_port=tcl_port)\n\n def tearDown(self):\n self.ixn.disconnect()\n super(IxnSamples, self).tearDown()\n\n def load_config(self):\n self.ixn.new_config()\n self.ixn.load_config(ixn_config_file)\n self.ixn.commit()\n\n def objects_access(self):\n self.load_config()\n\n # You can read all objects by calling the general method get_children\n ports = self.ixn.root.get_children('vport')\n assert(len(ports) == 2)\n # After the objects have been read from IXN you can retrieve them from memory (much faster)\n ports = self.ixn.root.get_objects_by_type('vport')\n assert(len(ports) == 2)\n # If you are not sure if objects have been read from IXN yet (best method for static configurations)\n ports = self.ixn.root.get_objects_or_children_by_type('vport')\n assert(len(ports) == 2)\n\n # Now we can iterate and print all objects:\n print('Name\\tObject Reference\\tPython Object')\n for port in ports:\n print('{}\\t{}\\t{}'.format(port.obj_name(), port.obj_ref(), port))\n\n # But... frequently used objects (like ports...) can be accessed specifically:\n ports = self.ixn.root.get_ports()\n assert(len(ports) == 2)\n\n # Now we can iterate and print all objects:\n print('Name\\tObject Reference\\tPython Object')\n for name, obj in ports.items():\n print('{}\\t{}\\t{}'.format(name, obj.obj_ref(), obj))\n\n def get_set_attribute(self):\n self.load_config()\n interface = self.ixn.root.get_ports()['Port 1'].get_interfaces()['Int 1-1']\n\n # Get all attributes\n print(interface.get_attributes())\n\n # Get group of attributes\n print(interface.get_attributes('type', 'mtu'))\n\n # Get specific attribute\n print('mtu: ' + interface.get_attribute('mtu'))\n\n # Special cases - name and enabled:\n print('name: ' + interface.get_name())\n print('enabled: ' + str(interface.get_enabled()))\n\n # Set attribute\n interface.set_attributes(mtu=1234)\n assert(int(interface.get_attribute('mtu')) == 1234)\n\n # And again, special case for enabled\n interface.set_enabled(False)\n assert(not interface.get_enabled())\n\n def reserve_ports(self):\n self.load_config()\n self.ports = self.ixn.root.get_children('vport')\n self.ixn.reserve({self.ixn.root.get_object_by_name('Port 1'): port1_location,\n self.ixn.root.get_object_by_name('Port 2'): port2_location}, force=True)\n\n def protocols(self):\n self.reserve_ports()\n self.ixn.send_arp_ns()\n self.ixn.protocols_start()\n\n def traffic(self):\n self.reserve_ports()\n self.ixn.traffic_apply()\n self.ixn.l23_traffic_start()\n time.sleep(8)\n self.ixn.l23_traffic_stop()\n time.sleep(2)\n port_stats = IxnPortStatistics(self.ixn.root)\n port_stats.read_stats()\n ti_stats = IxnTrafficItemStatistics(self.ixn.root)\n ti_stats.read_stats()\n print(port_stats.get_object_stats('Port 1'))\n print(port_stats.get_counters('Frames Tx.'))\n assert(ti_stats.get_counter('Traffic Item 1', 'Rx Frames') == 1600)\n\n def quick_test(self):\n global ixn_config_file\n ixn_config_file = path.join(path.dirname(__file__), 'configs/quick_tests.ixncfg')\n self.reserve_ports()\n print(self.ixn.root.get_quick_tests())\n self.ixn.quick_test_apply('QuickTest3')\n print(self.ixn.quick_test_start('QuickTest3', blocking=True))\n\n def inventory(self):\n\n chassis = self.ixn.root.hw.get_chassis(port1_location.split('/')[0])\n chassis.get_inventory()\n\n print('Full Inventory')\n print('=' * len('Full Inventory'))\n for module_name, module in chassis.cards.items():\n print('Card ' + str(module_name))\n for port_name in module.ports:\n print('Port ' + str(port_name))\n","sub_path":"ixnetwork/samples/ixn_samples.py","file_name":"ixn_samples.py","file_ext":"py","file_size_in_byte":5072,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"366711507","text":"import asyncio\r\nimport aiohttp\r\nimport time\r\n'''\r\nrequest['url']\r\nrequest['method']\r\nrequest['data']\r\n'''\r\n@asyncio.coroutine\r\ndef get_page(url,postdata=None):\r\n\theadersDict={\"User-Agent\":\"Mozilla/5.0 (Windows NT 5.1; rv:37.0) Gecko/20100101 Firefox/37.0\"}\r\n\tif postdata == None:\r\n\t\tresponse = yield from aiohttp.request('GET', url,headers=headersDict)\r\n\telse:\r\n\t\tresponse = yield from aiohttp.request('POST', url,data=postdata)\r\n\treturn(yield from response.text())\r\n\r\n\r\ndef request_list(requestList,maxNum=20):\r\n\tloop = asyncio.get_event_loop()\r\n\tf = asyncio.wait([get_page(request) for request in requestList])\r\n\tloop.run_until_complete(f)\r\n","sub_path":"http.py","file_name":"http.py","file_ext":"py","file_size_in_byte":643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"50720633","text":"#!/usr/bin/env python\n\nKEEPALIVED_CONFIG_FILE = \"/etc/keepalived/keepalived.conf\"\n\nCONSOLE_DEBUG = False\n\nVERBOSE = False\n\nPARAM_DEBUG = False\n\nSETUP_LOCK = \"/tmp/setup-lock\"\n\nMIN_CPU_CORES = 2\nMIN_MEM_IN_MB = 2048\n\nLESS_PROMPT = \"Press [UP],[DOWN],[PAGE UP],[PAGE DOWN] to scroll\\. Press 'Q' to quit\\.\"\nTEXT_EULA = \"data/eula.txt\"\nTEXT_WELCOME = \"data/welcome.txt\"\n","sub_path":"appliance/appliance-package/src/main/python/firstboot/env.py","file_name":"env.py","file_ext":"py","file_size_in_byte":475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"520816114","text":"from django.contrib import admin\n\n# Register your models here.\nfrom .models import Customer,Menu,Category,Order\n\n\n#class CustomerAdmin(admin.ModelAdmin):\n #fields = ['fname','oname','emp_id','mob','email','image','password','order_id','reg_id']\n #readonly_fields = ['reg_id']\n\n\n #class Meta:\n #model= Customer\n\n\n\n\n#class ProductInline(admin.TabularInline):\n #model = Menu\n #extra = 3\n\n#class CategoryAdmin(admin.ModelAdmin):\n #list_display = ['name','product_count']\n\n #fieldsets = (\n #(\n #None,{\n #'fields':('name',)\n #}\n #),\n #)\n #inlines = (ProductInline,)\n\n #def product_count(self,obj):\n #return obj.product_set.count()\n\n #def get_ordering(self,request):\n #return ('name')\n\nclass MenuAdmin(admin.ModelAdmin):\n list_display = ['name','price','slug','category','image']\n\n class Meta:\n model= Menu\n\nadmin.site.register(Customer)#,CustomerAdmin)\n#admin.site.register(Category,CategoryAdmin)\nadmin.site.register(Category)\nadmin.site.register(Menu,MenuAdmin)\n\n\n\n\n\n\nadmin.site.register(Order)","sub_path":"home/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1108,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"231770723","text":"# import packages\nfrom selenium import webdriver\nfrom lxml import html\nimport requests\nimport pandas as pd\n\n##-----------------------------------------------------------------------------------------------------------\n## Crawl for company names\n# disable picture\nchrome_options = webdriver.ChromeOptions()\nprefs = {\"profile.managed_default_content_settings.images\": 2}\nchrome_options.add_experimental_option(\"prefs\", prefs)\nbrowser = webdriver.Chrome(options=chrome_options)\n# browser = webdriver.Chrome() # open web page\nbrowser.implicitly_wait(10) # wait for web page to load\n\ncompany_names = [] # save company names in a list\n# crawl all company names from A-Z\nfor i in range(65, 91):\n url = 'https://www.thestar.com.my/business/marketwatch/stock-list/?alphabet=' + chr(i)\n browser.get(url)\n\n name_list = browser.find_elements_by_xpath('//table[@class=\"market-trans\"]//tr[@class=\"linedlist\"]/td/a')\n for name in name_list:\n if name.text != '':\n name_text = name.text.replace(\"&\", \"%26\")\n company_names.append(name_text)\n\n# crawl all company names in 0-9\nurl1 = 'https://www.thestar.com.my/business/marketwatch/stock-list/?alphabet=0-9'\nbrowser.get(url1)\nname1_list = browser.find_elements_by_xpath('//table[@class=\"market-trans\"]//tr[@class=\"linedlist\"]/td/a')\nfor name1 in name1_list:\n if name1.text != '':\n name1_text = name1.text.replace(\"&\", \"%26\")\n company_names.append(name1_text)\n\n# print(company_names)\nbrowser.close()\n\n# save as links for crawling all the information\ncompany_links = []\nfor n in company_names:\n link = 'https://www.thestar.com.my/business/marketwatch/stocks/?qcounter=' + n\n company_links.append(link)\n\n##-----------------------------------------------------------------------------------------------------------\n## Crawl for stock price\n\nheader={'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.121 Safari/537.36',\n 'Referer':'https://googleads.g.doubleclick.net/pagead/ads?client=ca-pub-2305666475781689&output=html&h=90&slotname=9390552809&adk=3116781417&adf=1662620477&w=970&lmt=1552753083&guci=2.2.0.0.2.2.0.0&format=970x90&url=http%3A%2F%2Fwww.investalks.com%2Fforum%2Fforum.php%3Fmod%3Dforumdisplay%26fid%3D7%26filter%3Dtypeid%26typeid%3D17&flash=0&wgl=1&dt=1552753083912&bpp=42&bdt=108&fdt=46&idt=22&shv=r20190313&cbv=r20190131&saldr=aa&abxe=1&correlator=6683659723222&frm=20&pv=2&ga_vid=691979923.1552569252&ga_sid=1552752744&ga_hid=1232295072&ga_fc=1&iag=0&icsg=12206&dssz=10&mdo=0&mso=0&u_tz=480&u_his=3&u_java=0&u_h=864&u_w=1536&u_ah=824&u_aw=1536&u_cd=24&u_nplug=3&u_nmime=4&adx=34&ady=100&biw=1026&bih=350&scr_x=0&scr_y=0&eid=21060853&oid=3&rx=0&eae=0&fc=656&brdim=426%2C33%2C426%2C33%2C1536%2C0%2C1057%2C735%2C1042%2C350&vis=1&rsz=%7C%7CeE%7C&abl=CS&ppjl=f&pfx=0&fu=16&bc=7&ifi=1&uci=1.afatrway4czq&fsb=1&xpc=kkAgITwzNl&p=http%3A//www.investalks.com&dtd=90'}\n\n\nclass AppCrawler:\n def __init__(self, starting_url, depth):\n self.starting_url = starting_url\n self.depth = depth\n self.apps = []\n\n def crawl(self):\n self.get_app_from_link(self.starting_url)\n return\n\n def get_app_from_link(self, link):\n start_page = requests.get(link,headers = header)\n tree = html.fromstring(start_page.text)\n\n name = tree.xpath('//h1[@class=\"stock-profile f16\"]/text()')[0]\n price = tree.xpath('//td[@id=\"slcontent_0_ileft_0_lastdonetext\"]/text()')[0]\n code = tree.xpath('//li[@class=\"f14\"]/text()')[1]\n date = tree.xpath('//span[@id=\"slcontent_0_ileft_0_datetxt\"]/text()')[0]\n time = tree.xpath('//span[@id=\"slcontent_0_ileft_0_timetxt\"]/text()')[0]\n open_price = tree.xpath('//td[@id=\"slcontent_0_ileft_0_opentext\"]/text()')[0]\n low = tree.xpath('//td[@id=\"slcontent_0_ileft_0_lowtext\"]/text()')[0]\n high = tree.xpath('//td[@id=\"slcontent_0_ileft_0_hightext\"]/text()')[0]\n vol = tree.xpath('//td[@id=\"slcontent_0_ileft_0_voltext\"]/text()')[0]\n buy_vol = tree.xpath('//td[@id=\"slcontent_0_ileft_0_buyvol\"]/text()')[0]\n sell_vol = tree.xpath('//td[@id=\"slcontent_0_ileft_0_sellvol\"]/text()')[0]\n\n name_list.append(name)\n print(name)\n code_list.append(code[3:])\n # print(code[3:])\n price_list.append(price)\n # print(price)\n date_list.append(date[10:21])\n # print(date[10:21])\n time_list.append(time)\n # print(time)\n open_price_list.append(open_price)\n # print(open_price)\n low_list.append(low)\n # print(low)\n high_list.append(high)\n # print(high)\n vol_list.append(vol)\n # print(vol)\n buy_vol_list.append(buy_vol)\n # print(buy_vol)\n sell_vol_list.append(sell_vol)\n # print(sell_vol)\n\n return\n\n\nclass App:\n\n def __init__(self, name, code, price, links):\n self.name = name\n self.code = code\n self.price = price\n self.links = links\n\n def __str__(self):\n return (\"Name: \" + self.name.encode('UTF-8') +\n \"\\r\\nCode: \" + self.developer.encode('UTF-8') +\n \"\\r\\nPrice: \" + self.price.encode('UTF-8') + \"\\r\\n\")\n\n\n# create list for all the variables\nname_list = []\ncode_list = []\nprice_list = []\ndate_list = []\ntime_list = []\nopen_price_list = []\nlow_list = []\nhigh_list = []\nvol_list = []\nbuy_vol_list = []\nsell_vol_list = []\n\nfor l in company_links:\n crawler = AppCrawler(l, 0)\n crawler.crawl()\n\n##-----------------------------------------------------------------------------------------------------------\n## Save data as .csv file\nna = name_list\nco = code_list\nda = date_list\nti = time_list\nop = open_price_list\nlo = low_list\nhi = high_list\npr = price_list\nvo = vol_list\nbv = buy_vol_list\nsv = sell_vol_list\n\ndataframe = pd.DataFrame({'name':na,'code':co,'date':da,'time':ti,'open':op,'low':lo,'high':hi,'price':pr,'volume':vo,'buy/volum':bv,'sell/volum':sv})\n\n# save data\nprint(dataframe)\n# dataframe.to_csv(\"Day_9.csv\",index=False,sep=',')\n","sub_path":"Milestone 1_Acquisition of data/stock_crawl.py","file_name":"stock_crawl.py","file_ext":"py","file_size_in_byte":6066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"124879331","text":"import argparse\nimport asyncio\nimport logging\nimport time\n\nfrom pokeretriever.PokeObjectRequest import PokeObjectRequest\n\n\nclass Request:\n \"\"\"\n Represents a Request to get Pokemon-related data from an API.\n \"\"\"\n def __init__(self):\n \"\"\"\n Initialize Request details.\n \"\"\"\n self.mode = None\n self.is_input_file = None\n self.input_content = None\n self.expanded = None\n self.output = None\n\n def __str__(self):\n return f\"Mode: {self.mode}\\n\" \\\n f\"is_input_file: {self.is_input_file}\\n\" \\\n f\"input_content: {self.input_content}\\n\" \\\n f\"expanded: {self.expanded}\\n\" \\\n f\"output: {self.output}\"\n\n\ndef setup_request_commandline():\n \"\"\"\n Uses argparse module to accept arguments provided through command\n line. Provided arguments are parsed and passed into a Request object.\n :return: a Request with provided arguments\n \"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument(\"mode\",\n choices=(\"pokemon\", \"ability\", \"move\"))\n\n input_group = parser.add_mutually_exclusive_group(required=True)\n input_group.required = True\n input_group.add_argument(\"--inputfile\")\n input_group.add_argument(\"--inputdata\")\n\n parser.add_argument(\"--expanded\",\n action=\"store_true\")\n\n parser.add_argument(\"--output\")\n\n try:\n args = parser.parse_args()\n request = Request()\n request.mode = args.mode\n request.is_input_file = True if args.inputdata is None else False\n request.input_content = args.inputfile if args.inputdata is None else args.inputdata\n request.expanded = args.expanded\n request.output = args.output\n return request\n except Exception as e:\n print(f\"Error! Could not read arguments.\\n{e}\")\n quit()\n\n\nclass Pokedex:\n \"\"\"\n Represents a Pokedex, executing the request and outputting the\n requested details.\n \"\"\"\n def execute_request(self, request):\n \"\"\"\n Executes the request and passes result to a print output method.\n :param request: a Request\n :return: None\n \"\"\"\n request.input_content = self.read_file(request.input_content) if request.is_input_file else [\n request.input_content]\n poke_request = PokeObjectRequest(request.mode, request.input_content, request.expanded)\n loop = asyncio.get_event_loop()\n result = loop.run_until_complete(poke_request.execute())\n self.print_output(result, request.output)\n\n @staticmethod\n def read_file(file_path):\n \"\"\"\n Reads a file from a file path and splits data from the file into\n a list.\n :param file_path: a str\n :return: data as a list of strings\n \"\"\"\n with open(file_path, mode=\"r\", encoding=\"utf-8\") as f:\n data = f.read()\n return data.split(\"\\n\")\n\n @staticmethod\n def print_output(pokedex_items, file_path):\n \"\"\"\n Print requested Pokedex items.\n :param pokedex_items: list of PokedexObject items\n :param file_path: a str\n :return: None\n \"\"\"\n timestamp = time.strftime(\"%d/%m/%Y %H:%M\")\n if file_path is None:\n print(f\"Timestamp: {timestamp}\")\n print(f\"Number of requests: {len(pokedex_items)}\")\n for item in pokedex_items:\n print(item.__str__())\n return\n with open(file_path, mode=\"w\", encoding=\"utf-8\") as f:\n f.write(f\"Timestamp: {timestamp}\\n\")\n f.write(f\"Number of requests: {len(pokedex_items)}\\n\")\n for item in pokedex_items:\n f.write(item.__str__())\n f.write(\"\\n\")\n\n\ndef main(request):\n \"\"\"\n Drives the program.\n :param request: a Request\n :return: None\n \"\"\"\n logging.basicConfig(level=logging.DEBUG)\n pokedex = Pokedex()\n pokedex.execute_request(request)\n\n\nif __name__ == '__main__':\n request = setup_request_commandline()\n main(request)\n","sub_path":"pokedex/pokedex.py","file_name":"pokedex.py","file_ext":"py","file_size_in_byte":4065,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"128266105","text":"import discord\nimport random\nimport time\nfrom translate import Translator\nfrom discord.ext import commands\nfrom discord.ext.commands import has_role\n\n\nbot = commands.Bot(command_prefix = '!')\n\n@bot.event #on_ready\nasync def on_ready():\n print(\"working\")\n\n\n@bot.command() #help\nasync def h(ctx):\n await ctx.channel.purge(limit=1)\n await ctx.send(\"\"\"Use '!' to start a command\\n\n Available commands:\\n\n !cluck (eg !cluck hello) transforms text into 'cluck' format\\n\n !roll (eg !roll 100) rolls a random number in range of value\\n\n !gamble (eg !gamble Meatball cook 100 gold) will roll random and pick a winner\\n\n !cointoss will flip a coin\\n\n !reverse (eg !reverse hello) will reverse your text\\n\n !eightball (eg !eightball will i win) tell your fortune on yes/no questions\\n\n !translate (eg !translate french you french pig!) translates your text\\n\n !rps (eg !rps rock) play rock, paper, scissors with the bot\n \"\"\")\n\n\n@bot.command() #!cluck\nasync def cluck(ctx, *, userString):\n await ctx.channel.purge(limit=1)\n ret = \"\"\n i = True\n for char in userString:\n if i:\n ret += char.upper()\n else:\n ret += char.lower()\n if char != ' ':\n i = not i\n await ctx.send(ret)\n\n\n@bot.command() #!clear\n@commands.has_role(\"botMaster\")\nasync def clear(ctx, amount=1):\n await ctx.channel.purge(limit=1)\n await ctx.channel.purge(limit=amount)\n\n\n@bot.command() #!spamMove\n@commands.has_role(\"botMaster\")\nasync def spamMove(ctx, member: discord.Member, number=2):\n await ctx.channel.purge(limit=1)\n channelList = [] #gets list of all channels\n for guild in bot.guilds:\n for channel in guild.voice_channels:\n channelList.append(channel)\n\n i = 0\n while i < number: \n await member.edit(voice_channel=random.choice(channelList)) #move user to random channel n times\n i += 1\n\n\n@bot.command() #!dm\n@commands.has_role(\"botMaster\")\nasync def dm(ctx, member: discord.Member, amount=1, sleep=0, *, userString):\n await ctx.channel.purge(limit=1)\n i = 0\n while i < amount:\n await member.send(f\"**{userString}**\")\n time.sleep(sleep)\n i += 1\n \n \n@bot.command() #!roll\nasync def roll(ctx, value):\n await ctx.channel.purge(limit=1)\n roll = random.randint(0, int(value))\n await ctx.send(\"You rolled \" + str(roll))\n\n\n@bot.command() #!gamble\nasync def gamble(ctx, members: commands.Greedy[discord.Member], *, userString):\n players = [[\"\", 0] for u in range(len(members))]\n i = 0\n for m in members:\n roll = random.randint(0, 100)\n await ctx.send(f\"{m} rolled: \" + str(roll))\n players[i] = [f\"{m}\", roll]\n i += 1\n \n winner = players[0][0]\n i = 0\n for w,p in players:\n if p > players[i][1]:\n winner = w\n elif p == players[i][1] and w != players[i][1]:\n winner += \" drew with \" + w\n i += 1\n \n await ctx.send(\"The winner is \" + winner + \"!\")\n \n \n@bot.command() #!cointoss\nasync def cointoss(ctx):\n coin = [\"heads\", \"tails\"]\n await ctx.send(random.choice(coin))\n \n \n@bot.command() #!reverse\nasync def reverse(ctx, *, userString):\n await ctx.channel.purge(limit=1)\n string = str(userString) [::-1]\n await ctx.send(string)\n \n \n@bot.command() #!eightball\nasync def eightball(ctx):\n responses = [\"It is certain\",\n \"Without a doubt\",\n \"You may rely on it\",\n \"Yes definitely\",\n \"It is decidedly so\",\n \"As I see it, yes\",\n \"Most likely\",\n \"Yes\",\n \"Outlook good\",\n \"Signs point to yes\",\n \"Reply hazy try again\",\n \"Better not tell you now\",\n \"Ask again later\",\n \"Cannot predict now\",\n \"Concentrate and ask again\",\n \"Don’t count on it\",\n \"Outlook not so good\",\n \"My sources say no\",\n \"Very doubtful\",\n \"My reply is no\"]\n \n await ctx.send(random.choice(responses))\n \n@bot.command() #!translate\nasync def translate(ctx, lang, *, userString):\n translator = Translator(to_lang=f\"{lang}\")\n translation = translator.translate(f\"{userString}\")\n await ctx.send(translation)\n\n@bot.command() #!rps\nasync def rps(ctx, userChoice):\n options = [\"rock\",\n \"paper\",\n \"scissors\"]\n\n cpuChoice = random.choice(options)\n\n await ctx.send(\"Rock...\")\n time.sleep(1)\n await ctx.send(\"Paper...\")\n time.sleep(1)\n await ctx.send(\"Scissors...\")\n time.sleep(1)\n\n if userChoice == \"rock\" and cpuChoice == \"rock\":\n await ctx.send(\"cpu chose rock too! it's a draw!\")\n elif userChoice == \"rock\" and cpuChoice == \"paper\":\n await ctx.send(\"cpu chose paper! you lose!\")\n elif userChoice == \"rock\" and cpuChoice == \"scissors\":\n await ctx.send(\"cpu chose scissors! you win!\")\n elif userChoice == \"paper\" and cpuChoice == \"paper\":\n await ctx.send(\"cpu choose paper too! it's a draw!\")\n elif userChoice == \"paper\" and cpuChoice == \"rock\":\n await ctx.send(\"cpu chose rock! you lose!\")\n elif userChoice == \"paper\" and cpuChoice == \"scissors\":\n await ctx.send(\"cpu chose scissors! you win!\")\n elif userChoice == \"scissors\" and cpuChoice == \"scissors\":\n await ctx.send(\"cpu choose scissors too! it's a draw!\")\n elif userChoice == \"scissors\" and cpuChoice == \"rock\":\n await ctx.send(\"cpu chose scissors! you lose!\")\n elif userChoice == \"scissors\" and cpuChoice == \"paper\":\n await ctx.send(\"cpu chose paper! you win!\")\n else:\n await ctx.send(\"you didn't pick a valid choice\")\n\n\n\n","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":6259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"385465287","text":"# -*- coding: utf-8 -*-\n# Copyright 2023 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nfrom __future__ import annotations\n\nfrom typing import MutableMapping, MutableSequence\n\nfrom google.protobuf import timestamp_pb2 # type: ignore\nimport proto # type: ignore\n\n__protobuf__ = proto.module(\n package=\"google.cloud.osconfig.v1\",\n manifest={\n \"GetOSPolicyAssignmentReportRequest\",\n \"ListOSPolicyAssignmentReportsRequest\",\n \"ListOSPolicyAssignmentReportsResponse\",\n \"OSPolicyAssignmentReport\",\n },\n)\n\n\nclass GetOSPolicyAssignmentReportRequest(proto.Message):\n r\"\"\"Get a report of the OS policy assignment for a VM instance.\n\n Attributes:\n name (str):\n Required. API resource name for OS policy assignment report.\n\n Format:\n ``/projects/{project}/locations/{location}/instances/{instance}/osPolicyAssignments/{assignment}/report``\n\n For ``{project}``, either ``project-number`` or\n ``project-id`` can be provided. For ``{instance_id}``,\n either Compute Engine ``instance-id`` or ``instance-name``\n can be provided. For ``{assignment_id}``, the\n OSPolicyAssignment id must be provided.\n \"\"\"\n\n name: str = proto.Field(\n proto.STRING,\n number=1,\n )\n\n\nclass ListOSPolicyAssignmentReportsRequest(proto.Message):\n r\"\"\"List the OS policy assignment reports for VM instances.\n\n Attributes:\n parent (str):\n Required. The parent resource name.\n\n Format:\n ``projects/{project}/locations/{location}/instances/{instance}/osPolicyAssignments/{assignment}/reports``\n\n For ``{project}``, either ``project-number`` or\n ``project-id`` can be provided. For ``{instance}``, either\n ``instance-name``, ``instance-id``, or ``-`` can be\n provided. If '-' is provided, the response will include\n OSPolicyAssignmentReports for all instances in the\n project/location. For ``{assignment}``, either\n ``assignment-id`` or ``-`` can be provided. If '-' is\n provided, the response will include\n OSPolicyAssignmentReports for all OSPolicyAssignments in the\n project/location. Either {instance} or {assignment} must be\n ``-``.\n\n For example:\n ``projects/{project}/locations/{location}/instances/{instance}/osPolicyAssignments/-/reports``\n returns all reports for the instance\n ``projects/{project}/locations/{location}/instances/-/osPolicyAssignments/{assignment-id}/reports``\n returns all the reports for the given assignment across all\n instances.\n ``projects/{project}/locations/{location}/instances/-/osPolicyAssignments/-/reports``\n returns all the reports for all assignments across all\n instances.\n page_size (int):\n The maximum number of results to return.\n filter (str):\n If provided, this field specifies the criteria that must be\n met by the ``OSPolicyAssignmentReport`` API resource that is\n included in the response.\n page_token (str):\n A pagination token returned from a previous call to the\n ``ListOSPolicyAssignmentReports`` method that indicates\n where this listing should continue from.\n \"\"\"\n\n parent: str = proto.Field(\n proto.STRING,\n number=1,\n )\n page_size: int = proto.Field(\n proto.INT32,\n number=2,\n )\n filter: str = proto.Field(\n proto.STRING,\n number=3,\n )\n page_token: str = proto.Field(\n proto.STRING,\n number=4,\n )\n\n\nclass ListOSPolicyAssignmentReportsResponse(proto.Message):\n r\"\"\"A response message for listing OS Policy assignment reports\n including the page of results and page token.\n\n Attributes:\n os_policy_assignment_reports (MutableSequence[google.cloud.osconfig_v1.types.OSPolicyAssignmentReport]):\n List of OS policy assignment reports.\n next_page_token (str):\n The pagination token to retrieve the next\n page of OS policy assignment report objects.\n \"\"\"\n\n @property\n def raw_page(self):\n return self\n\n os_policy_assignment_reports: MutableSequence[\n \"OSPolicyAssignmentReport\"\n ] = proto.RepeatedField(\n proto.MESSAGE,\n number=1,\n message=\"OSPolicyAssignmentReport\",\n )\n next_page_token: str = proto.Field(\n proto.STRING,\n number=2,\n )\n\n\nclass OSPolicyAssignmentReport(proto.Message):\n r\"\"\"A report of the OS policy assignment status for a given\n instance.\n\n Attributes:\n name (str):\n The ``OSPolicyAssignmentReport`` API resource name.\n\n Format:\n ``projects/{project_number}/locations/{location}/instances/{instance_id}/osPolicyAssignments/{os_policy_assignment_id}/report``\n instance (str):\n The Compute Engine VM instance name.\n os_policy_assignment (str):\n Reference to the ``OSPolicyAssignment`` API resource that\n the ``OSPolicy`` belongs to.\n\n Format:\n ``projects/{project_number}/locations/{location}/osPolicyAssignments/{os_policy_assignment_id@revision_id}``\n os_policy_compliances (MutableSequence[google.cloud.osconfig_v1.types.OSPolicyAssignmentReport.OSPolicyCompliance]):\n Compliance data for each ``OSPolicy`` that is applied to the\n VM.\n update_time (google.protobuf.timestamp_pb2.Timestamp):\n Timestamp for when the report was last\n generated.\n last_run_id (str):\n Unique identifier of the last attempted run\n to apply the OS policies associated with this\n assignment on the VM.\n This ID is logged by the OS Config agent while\n applying the OS policies associated with this\n assignment on the VM. NOTE: If the service is\n unable to successfully connect to the agent for\n this run, then this id will not be available in\n the agent logs.\n \"\"\"\n\n class OSPolicyCompliance(proto.Message):\n r\"\"\"Compliance data for an OS policy\n\n Attributes:\n os_policy_id (str):\n The OS policy id\n compliance_state (google.cloud.osconfig_v1.types.OSPolicyAssignmentReport.OSPolicyCompliance.ComplianceState):\n The compliance state of the OS policy.\n compliance_state_reason (str):\n The reason for the OS policy to be in an unknown compliance\n state. This field is always populated when\n ``compliance_state`` is ``UNKNOWN``.\n\n If populated, the field can contain one of the following\n values:\n\n - ``vm-not-running``: The VM was not running.\n - ``os-policies-not-supported-by-agent``: The version of\n the OS Config agent running on the VM does not support\n running OS policies.\n - ``no-agent-detected``: The OS Config agent is not\n detected for the VM.\n - ``resource-execution-errors``: The OS Config agent\n encountered errors while executing one or more resources\n in the policy. See ``os_policy_resource_compliances`` for\n details.\n - ``task-timeout``: The task sent to the agent to apply the\n policy timed out.\n - ``unexpected-agent-state``: The OS Config agent did not\n report the final status of the task that attempted to\n apply the policy. Instead, the agent unexpectedly started\n working on a different task. This mostly happens when the\n agent or VM unexpectedly restarts while applying OS\n policies.\n - ``internal-service-errors``: Internal service errors were\n encountered while attempting to apply the policy.\n os_policy_resource_compliances (MutableSequence[google.cloud.osconfig_v1.types.OSPolicyAssignmentReport.OSPolicyCompliance.OSPolicyResourceCompliance]):\n Compliance data for each resource within the\n policy that is applied to the VM.\n \"\"\"\n\n class ComplianceState(proto.Enum):\n r\"\"\"Possible compliance states for an os policy.\n\n Values:\n UNKNOWN (0):\n The policy is in an unknown compliance state.\n\n Refer to the field ``compliance_state_reason`` to learn the\n exact reason for the policy to be in this compliance state.\n COMPLIANT (1):\n Policy is compliant.\n The policy is compliant if all the underlying\n resources are also compliant.\n NON_COMPLIANT (2):\n Policy is non-compliant.\n The policy is non-compliant if one or more\n underlying resources are non-compliant.\n \"\"\"\n UNKNOWN = 0\n COMPLIANT = 1\n NON_COMPLIANT = 2\n\n class OSPolicyResourceCompliance(proto.Message):\n r\"\"\"Compliance data for an OS policy resource.\n\n .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields\n\n Attributes:\n os_policy_resource_id (str):\n The ID of the OS policy resource.\n config_steps (MutableSequence[google.cloud.osconfig_v1.types.OSPolicyAssignmentReport.OSPolicyCompliance.OSPolicyResourceCompliance.OSPolicyResourceConfigStep]):\n Ordered list of configuration completed by\n the agent for the OS policy resource.\n compliance_state (google.cloud.osconfig_v1.types.OSPolicyAssignmentReport.OSPolicyCompliance.OSPolicyResourceCompliance.ComplianceState):\n The compliance state of the resource.\n compliance_state_reason (str):\n A reason for the resource to be in the given compliance\n state. This field is always populated when\n ``compliance_state`` is ``UNKNOWN``.\n\n The following values are supported when\n ``compliance_state == UNKNOWN``\n\n - ``execution-errors``: Errors were encountered by the\n agent while executing the resource and the compliance\n state couldn't be determined.\n - ``execution-skipped-by-agent``: Resource execution was\n skipped by the agent because errors were encountered\n while executing prior resources in the OS policy.\n - ``os-policy-execution-attempt-failed``: The execution of\n the OS policy containing this resource failed and the\n compliance state couldn't be determined.\n exec_resource_output (google.cloud.osconfig_v1.types.OSPolicyAssignmentReport.OSPolicyCompliance.OSPolicyResourceCompliance.ExecResourceOutput):\n ExecResource specific output.\n\n This field is a member of `oneof`_ ``output``.\n \"\"\"\n\n class ComplianceState(proto.Enum):\n r\"\"\"Possible compliance states for a resource.\n\n Values:\n UNKNOWN (0):\n The resource is in an unknown compliance state.\n\n To get more details about why the policy is in this state,\n review the output of the ``compliance_state_reason`` field.\n COMPLIANT (1):\n Resource is compliant.\n NON_COMPLIANT (2):\n Resource is non-compliant.\n \"\"\"\n UNKNOWN = 0\n COMPLIANT = 1\n NON_COMPLIANT = 2\n\n class OSPolicyResourceConfigStep(proto.Message):\n r\"\"\"Step performed by the OS Config agent for configuring an\n ``OSPolicy`` resource to its desired state.\n\n Attributes:\n type_ (google.cloud.osconfig_v1.types.OSPolicyAssignmentReport.OSPolicyCompliance.OSPolicyResourceCompliance.OSPolicyResourceConfigStep.Type):\n Configuration step type.\n error_message (str):\n An error message recorded during the\n execution of this step. Only populated if errors\n were encountered during this step execution.\n \"\"\"\n\n class Type(proto.Enum):\n r\"\"\"Supported configuration step types\n\n Values:\n TYPE_UNSPECIFIED (0):\n Default value. This value is unused.\n VALIDATION (1):\n Checks for resource conflicts such as schema\n errors.\n DESIRED_STATE_CHECK (2):\n Checks the current status of the desired\n state for a resource.\n DESIRED_STATE_ENFORCEMENT (3):\n Enforces the desired state for a resource\n that is not in desired state.\n DESIRED_STATE_CHECK_POST_ENFORCEMENT (4):\n Re-checks the status of the desired state.\n This check is done for a resource after the\n enforcement of all OS policies.\n This step is used to determine the final desired\n state status for the resource. It accounts for\n any resources that might have drifted from their\n desired state due to side effects from executing\n other resources.\n \"\"\"\n TYPE_UNSPECIFIED = 0\n VALIDATION = 1\n DESIRED_STATE_CHECK = 2\n DESIRED_STATE_ENFORCEMENT = 3\n DESIRED_STATE_CHECK_POST_ENFORCEMENT = 4\n\n type_: \"OSPolicyAssignmentReport.OSPolicyCompliance.OSPolicyResourceCompliance.OSPolicyResourceConfigStep.Type\" = proto.Field(\n proto.ENUM,\n number=1,\n enum=\"OSPolicyAssignmentReport.OSPolicyCompliance.OSPolicyResourceCompliance.OSPolicyResourceConfigStep.Type\",\n )\n error_message: str = proto.Field(\n proto.STRING,\n number=2,\n )\n\n class ExecResourceOutput(proto.Message):\n r\"\"\"ExecResource specific output.\n\n Attributes:\n enforcement_output (bytes):\n Output from enforcement phase output file (if\n run). Output size is limited to 100K bytes.\n \"\"\"\n\n enforcement_output: bytes = proto.Field(\n proto.BYTES,\n number=2,\n )\n\n os_policy_resource_id: str = proto.Field(\n proto.STRING,\n number=1,\n )\n config_steps: MutableSequence[\n \"OSPolicyAssignmentReport.OSPolicyCompliance.OSPolicyResourceCompliance.OSPolicyResourceConfigStep\"\n ] = proto.RepeatedField(\n proto.MESSAGE,\n number=2,\n message=\"OSPolicyAssignmentReport.OSPolicyCompliance.OSPolicyResourceCompliance.OSPolicyResourceConfigStep\",\n )\n compliance_state: \"OSPolicyAssignmentReport.OSPolicyCompliance.OSPolicyResourceCompliance.ComplianceState\" = proto.Field(\n proto.ENUM,\n number=3,\n enum=\"OSPolicyAssignmentReport.OSPolicyCompliance.OSPolicyResourceCompliance.ComplianceState\",\n )\n compliance_state_reason: str = proto.Field(\n proto.STRING,\n number=4,\n )\n exec_resource_output: \"OSPolicyAssignmentReport.OSPolicyCompliance.OSPolicyResourceCompliance.ExecResourceOutput\" = proto.Field(\n proto.MESSAGE,\n number=5,\n oneof=\"output\",\n message=\"OSPolicyAssignmentReport.OSPolicyCompliance.OSPolicyResourceCompliance.ExecResourceOutput\",\n )\n\n os_policy_id: str = proto.Field(\n proto.STRING,\n number=1,\n )\n compliance_state: \"OSPolicyAssignmentReport.OSPolicyCompliance.ComplianceState\" = proto.Field(\n proto.ENUM,\n number=2,\n enum=\"OSPolicyAssignmentReport.OSPolicyCompliance.ComplianceState\",\n )\n compliance_state_reason: str = proto.Field(\n proto.STRING,\n number=3,\n )\n os_policy_resource_compliances: MutableSequence[\n \"OSPolicyAssignmentReport.OSPolicyCompliance.OSPolicyResourceCompliance\"\n ] = proto.RepeatedField(\n proto.MESSAGE,\n number=4,\n message=\"OSPolicyAssignmentReport.OSPolicyCompliance.OSPolicyResourceCompliance\",\n )\n\n name: str = proto.Field(\n proto.STRING,\n number=1,\n )\n instance: str = proto.Field(\n proto.STRING,\n number=2,\n )\n os_policy_assignment: str = proto.Field(\n proto.STRING,\n number=3,\n )\n os_policy_compliances: MutableSequence[OSPolicyCompliance] = proto.RepeatedField(\n proto.MESSAGE,\n number=4,\n message=OSPolicyCompliance,\n )\n update_time: timestamp_pb2.Timestamp = proto.Field(\n proto.MESSAGE,\n number=5,\n message=timestamp_pb2.Timestamp,\n )\n last_run_id: str = proto.Field(\n proto.STRING,\n number=6,\n )\n\n\n__all__ = tuple(sorted(__protobuf__.manifest))\n","sub_path":"google/cloud/osconfig_v1/types/os_policy_assignment_reports.py","file_name":"os_policy_assignment_reports.py","file_ext":"py","file_size_in_byte":18754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"173510071","text":"#!/usr/bin/python3\n\"\"\"\nThe TestConsole Module defines a TestConsole class that\ninherits from unittest.TestCase for testing Hosh class, an interactive\nshell based on cmd.Cmd\n\"\"\"\nimport sys\nimport unittest\nfrom unittest import mock\nfrom unittest.mock import patch\nfrom io import StringIO\nfrom test import support\nfrom test.support import captured_stdout, captured_stderr\nfrom console import Hosh\n\n\"\"\"\nsubclassing from TestCase which is a base class of \"unittest\"\n\"\"\"\nclass TestConsole(unittest.TestCase): \n \"\"\"\n Create automated tests for interactive shell based on cmd module\n \"\"\"\n def setUp(self):\n \"\"\"setup method for Console Test Class\"\"\"\n self.mock_stdin = unittest.mock.create_autospec(sys.stdin)\n self.mock_stdout =unittest.mock.create_autospec(sys.stdout)\n\n def create(self, server=None):\n \"\"\"create method is a helper function testing Hosh class\"\"\"\n return Hosh(stdin=self.mock_stdin, stdout=self.mock_stdout)\n\n def _last_write(self, nr=None):\n \"\"\":return: last 'n' output lines, incomplete\"\"\"\n if nr is None:\n return self.mock_stdout.write.call_args[0][0]\n return \"\".join(map(lambda c: c[0][0], self.mock_stdout.write.call_args_list[-nr:]))\n\n \n\n def test_exit(self):\n \"\"\"exit command\"\"\"\n cli = self.create()\n self.assertTrue(cli.onecmd(\"quit\"))\n\n def test_EOF(self):\n \"\"\"exit command\"\"\"\n cli = self.create()\n self.assertTrue(cli.onecmd(\"EOF\"))\n\n def test_create_object(self):\n \"\"\"test method for create_method\"\"\"\n \"\"\"patch replaces std.out with StringIO(\"file as object\")\"\"\"\n cli = self.create()\n my_input = 'Review'\n with patch('sys.stdout', new=StringIO()) as fakeOutput:\n self.assertFalse(cli.onecmd(my_input))\n self.assertEqual('Review', 'Review')\n\n def test_show_object(self):\n \"\"\"test method for do_show\"\"\"\n cli = self.create()\n my_input = 'Review'\n with patch('sys.stdout', new=StringIO()) as fakeOutput:\n self.assertFalse(cli.onecmd(my_input))\n self.assertEqual('*** Unknown syntax: review', '*** Unknown syntax: review')\n \n\n def test_destroy_object(self):\n \"\"\"test method for do_destroy\"\"\"\n cli = self.create()\n bad_input = 'destroy BaseModel This-is-testing-98'\n with patch('sys.stdout', new=StringIO()) as fakeOutput:\n self.assertFalse(cli.onecmd(bad_input))\n self.assertEqual('** no instance found **', '** no instance found **')\n\n def test_update(self):\n \"\"\"test do_update\"\"\"\n cli = self.create()\n bad_input = 'update BaseModel This-is-testing-98'\n with patch('sys.stdout', new=StringIO()) as fakeOutput:\n self.assertFalse(cli.onecmd(bad_input))\n self.assertEqual('** no instance found **', '** no instance found **')\n\n\n\n def test_help(self):\n \"\"\"test method for help output\"\"\"\n cli = self.create()\n expected = \"EOF all create destroy help quit show update\\n\\n\"\n with patch('sys.stdout', new=StringIO()) as fakeOutput:\n self.assertFalse(cli.onecmd(\"help\"))\n self.assertEqual(expected, expected)\n \n\n def test_all(self):\n \"\"\"\n test do_all\n store output in fakeOutput\n \"\"\"\n cli = self.create()\n with patch('sys.stdout', new=StringIO()) as fakeOutput:\n self.assertFalse(cli.onecmd('classname.all()'))\n self.assertEqual('*** Unknown syntax: classname.all()',\n '*** Unknown syntax: classname.all()')\n\nsuite = unittest.TestLoader().loadTestsFromTestCase(TestConsole)\nunittest.TextTestRunner(verbosity = 2).run(suite)\n","sub_path":"tests/test_console.py","file_name":"test_console.py","file_ext":"py","file_size_in_byte":3727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"120093724","text":"from flask import Flask, render_template, request, redirect, flash\nfrom flask_sqlalchemy import SQLAlchemy\n\napp = Flask(__name__)\n\napp.secret_key = 'naosei'\napp.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql://ncsbulqz:2QrPNdtts-s3z6mtlJdXL6dA_DNy93on@kesavan.db.elephantsql.com/ncsbulqz'\ndb = SQLAlchemy(app)\n\nclass Catalogo(db.Model):\n id = db.Column(db.Integer, primary_key=True, autoincrement=True)\n nome = db.Column(db.String(100), nullable=False)\n imagem = db.Column(db.String(300), nullable=False)\n descricao = db.Column(db.String(500), nullable=False)\n link = db.Column(db.String(200), nullable=False)\n \n\n def __init__(self,nome,imagem,descricao,link):\n self.nome = nome\n self.imagem = imagem\n self.descricao = descricao\n self.link = link\n\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n@app.route('/participantes')\ndef participantes():\n return render_template('participantes.html')\n\n@app.route('/sinopse/')\ndef sinopse(id):\n catalogo = Catalogo.query.get(id)\n return render_template('sinopse.html', catalogo=catalogo)\n\n@app.route('/sobre')\ndef sobre():\n return render_template('sobre.html')\n\n@app.route('/layout')\ndef layout():\n catalogo = Catalogo.query.all()\n return render_template('layout.html', catalogo=catalogo)\n\n@app.route('/trailer/')\ndef trailer(id):\n catalogo = Catalogo.query.get(id)\n return render_template('trailer.html', catalogo=catalogo)\n\n\n@app.route('/new', methods=['GET', 'POST'])\ndef new():\n if request.method == 'POST':\n link_video = request.form['link']\n catalogo = Catalogo(\n request.form['nome'],\n request.form['imagem'],\n request.form['descricao'],\n link_video[-11:],\n )\n db.session.add(catalogo)\n db.session.commit()\n flash('Catalogo adicionado com sucesso')\n return render_template('adicionar.html')\n return render_template('adicionar.html')\n\n@app.route('/editar/', methods=['POST', 'GET'])\ndef editar(id):\n catalogo = Catalogo.query.get(id)\n if request.method == 'POST':\n link_video = request.form['link']\n catalogo.nome = request.form['nome']\n catalogo.descricao = request.form['descricao']\n catalogo.imagem = request.form['imagem']\n catalogo.link = link_video[-11:]\n db.session.commit()\n return redirect(f'/sinopse/{id}')\n return render_template('editar.html', catalogo=catalogo, filmeDelete='')\n\n\n@app.route('/apagar/')\ndef apagar(id):\n filmeDelete = Catalogo.query.get(id)\n catalogo = Catalogo.query.all()\n return render_template('editar.html', filmeDelete=filmeDelete, catalogo=catalogo)\n\n@app.route('/delete/')\ndef delete(id):\n catalogo = Catalogo.query.get(id)\n db.session.delete(catalogo)\n db.session.commit()\n return redirect('/layout')\n\n\n\nif __name__ == '__main__':\n db.create_all()\n app.run(debug=True)","sub_path":"Catalogo/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"50207416","text":"# Задача 5 Создать прямоугольную матрицу A, имеющую N строк и M столбцов со\r\n# случайными элементами. Определить средние значения по всем строкам и\r\n# столбцам матрицы. Результат оформить в виде матрицы из N + 1 строк и M\r\n# + 1 столбцов.\r\n\r\nimport numpy as np\r\nimport random\r\n\r\nN = random.randint(2, 5)\r\nM = random.randint(2, 5)\r\nprint(\"N =\", str(N), \" - \", \"M =\", str(M))\r\nA = np.random.randint(-50, 50, (N, M))\r\nprint(str(A) + \"\\n\")\r\n\r\nM_sr = np.mean(A, axis=0)\r\nN_sr = np.mean(A, axis=1)\r\n\r\nN_sr = np.append(N_sr, None)\r\n\r\nA = np.vstack((A, M_sr))\r\nA = np.hstack((A, N_sr.reshape(-1, 1)))\r\nprint(A)","sub_path":"Курсовая работа часть 2.1. Работа с матрицами/5.py","file_name":"5.py","file_ext":"py","file_size_in_byte":777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"366136454","text":"def game():\n text = \"\"\"\n ██████╗ ██████╗ ████████╗ █████╗ \n ██╔══██╗██╔═══██╗╚══██╔══╝██╔══██╗\n ██║ ██║██║ ██║ ██║ ███████║\n ██║ ██║██║ ██║ ██║ ██╔══██║\n ██████╔╝╚██████╔╝ ██║ ██║ ██║ v0.5.1\n ╚═════╝ ╚═════╝ ╚═╝ ╚═╝ ╚═╝ by Mekel Ilyasa\n \"\"\"\n \n heroSelection = \"\"\"\n Pilih hero :\n 1. Juggernaut\n 2. Bloodseeker\n 3. Venomancer\n 4. Necrophos\n 5. Wraith King\n \"\"\"\n print(text)\n print(heroSelection)\n\ndef dota():\n class Hero():\n\n def __init__(self, name, health, mana, attack, armor, magic, magicResistance):\n self.__name = name\n self.__health = health\n self.__mana = mana\n self.__attack = attack\n self.__armor = armor\n self.__magic = magic\n self.__magicResistance = magicResistance\n\n def attacking(self, lawan):\n print(self.__name + ' menyerang ' + lawan.__name)\n lawan.blocking(self, self.__attack)\n \n def blocking(self, lawan, attackLawan):\n damage = attackLawan/self.__armor\n print('Damage diterima : ' + str(damage))\n self.__health = (self.__health - damage)\n print('Health Power ' + self.__name + ' tersisa ' + str(self.__health) + '\\n')\n\n def magicAttack(self, lawan):\n print(self.__name + ' menyerang ' + lawan.__name + ' dengan magic skill')\n lawan.magicResistance(self, self.__magic)\n\n def magicResistance(self, lawan, magicLawan):\n magicDamage = magicLawan/self.__magicResistance\n print('Magic damage diterima : ' + str(magicDamage))\n self.__health = (self.__health - magicDamage)\n print('Health Power ' + self.__name + ' tersisa ' + str(self.__health) + '\\n')\n\n def getName(self):\n return self.__name\n \n def getHealth(self):\n return self.__health\n\n def getMana():\n return self.__mana\n\n def getAttack():\n return self.__attack\n\n def getArmor():\n return self.__armor\n\n\n Juggernaut = Hero('Juggernaut',100,70,28,6,20,5)\n Bloodseeker = Hero('Bloodseeker',100,62,32,7,19,6)\n Venomancer = Hero('Venomancer',100,59,35,7,24,7)\n Necrophos = Hero('Necrophos',100,89,32,8,22,8)\n\n Juggernaut.attacking(Bloodseeker)\n Venomancer.attacking(Necrophos)\n Juggernaut.attacking(Necrophos)\n Necrophos.magicAttack(Juggernaut)\n\ndef main():\n game()\n dota()\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"Dota.py","file_name":"Dota.py","file_ext":"py","file_size_in_byte":2862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"211753458","text":"import json\nfrom pathlib import Path\n\nimport gym\nfrom gym import spaces\n\nfrom .devices import Devices\nfrom .env_config import EnvConfig\nfrom gym_d2d.action import Action\nfrom gym_d2d.device import BaseStation, UserEquipment\nfrom gym_d2d.id import Id\nfrom gym_d2d.link_type import LinkType\nfrom gym_d2d.position import Position, get_random_position, get_random_position_nearby\nfrom gym_d2d.simulator import D2DSimulator\n\n\nBASE_STATION_ID = 'mbs'\nEPISODE_LENGTH = 10\n\n\nclass D2DEnv(gym.Env):\n metadata = {'render.modes': ['human']}\n\n def __init__(self, env_config=None) -> None:\n super().__init__()\n self.config = EnvConfig(**env_config or {})\n self.devices = self._create_devices()\n traffic_model = self.config.traffic_model(self.devices.bs, list(self.devices.cues.values()),\n self.config.num_rbs)\n path_loss = self.config.path_loss_model(self.config.carrier_freq_GHz)\n self.simulator = D2DSimulator(self.devices.to_dict(), traffic_model, path_loss)\n\n self.obs_fn = self.config.obs_fn(self.simulator, self.devices)\n self.observation_space = self.obs_fn.get_obs_space(self.config.__dict__)\n # +1 because include max value, i.e. from [0, ..., max]\n num_tx_pwr_actions = self.config.due_max_tx_power_dBm - self.config.due_min_tx_power_dBm + 1\n self.action_space = spaces.Discrete(self.config.num_rbs * num_tx_pwr_actions)\n self.reward_fn = self.config.reward_fn(self.simulator, self.devices)\n self.num_steps = 0\n\n def _create_devices(self) -> Devices:\n \"\"\"Initialise small base stations, cellular UE & D2D UE pairs in the simulator as per the env config.\n\n :returns: A tuple containing a list of base station, CUE & a dict of DUE pair IDs created.\n \"\"\"\n\n base_cfg = {\n 'num_subcarriers': self.config.num_subcarriers,\n 'subcarrier_spacing_kHz': self.config.subcarrier_spacing_kHz,\n }\n\n # create macro base station\n config = self.config.devices[BASE_STATION_ID]['config'] if BASE_STATION_ID in self.config.devices else base_cfg\n bs = BaseStation(Id(BASE_STATION_ID), config)\n\n # create cellular UEs\n cues = {}\n default_cue_cfg = {**base_cfg, **{'max_tx_power_dBm': self.config.cue_max_tx_power_dBm}}\n for i in range(self.config.num_cellular_users):\n cue_id = Id(f'cue{i:02d}')\n config = self.config.devices[cue_id]['config'] if cue_id in self.config.devices else default_cue_cfg\n cues[cue_id] = UserEquipment(cue_id, config)\n\n # create D2D UEs\n dues = {}\n due_cfg = {**base_cfg, **{'max_tx_power_dBm': self.config.due_max_tx_power_dBm}}\n for i in range(0, (self.config.num_d2d_pairs * 2), 2):\n due_tx_id, due_rx_id = Id(f'due{i:02d}'), Id(f'due{i + 1:02d}')\n\n due_tx_config = self.config.devices[due_tx_id]['config'] if due_tx_id in self.config.devices else due_cfg\n due_tx = UserEquipment(due_tx_id, due_tx_config)\n\n due_rx_config = self.config.devices[due_rx_id]['config'] if due_rx_id in self.config.devices else due_cfg\n due_rx = UserEquipment(due_rx_id, due_rx_config)\n\n dues[(due_tx.id, due_rx.id)] = due_tx, due_rx\n\n return Devices(bs, cues, dues)\n\n def reset(self):\n self.num_steps = 0\n for device in self.simulator.devices.values():\n if device.id == BASE_STATION_ID:\n pos = Position(0, 0) # assume MBS fixed at (0,0) and everything else builds around it\n elif device.id in self.config.devices:\n pos = Position(*self.config.devices[device.id]['position'])\n elif any(device.id in d for d in [self.devices.cues, self.devices.due_pairs]):\n pos = get_random_position(self.config.cell_radius_m)\n elif device.id in self.devices.due_pairs_inv:\n due_tx_id = self.devices.due_pairs_inv[device.id]\n due_tx = self.simulator.devices[due_tx_id]\n pos = get_random_position_nearby(self.config.cell_radius_m, due_tx.position, self.config.d2d_radius_m)\n else:\n raise ValueError(f'Invalid configuration for device \"{device.id}\".')\n device.set_position(pos)\n\n self.simulator.reset()\n # take a step with random D2D actions to generate initial SINRs\n random_actions = {due_id: self._extract_action(due_id, self.action_space.sample())\n for due_id in self.devices.due_pairs.keys()}\n results = self.simulator.step(random_actions)\n obs = self.obs_fn.get_state(results)\n return obs\n\n def step(self, actions):\n due_actions = {due_id: self._extract_action(due_id, int(action_idx)) for due_id, action_idx in actions.items()}\n results = self.simulator.step(due_actions)\n self.num_steps += 1\n obs = self.obs_fn.get_state(results)\n rewards = self.reward_fn(results)\n game_over = {'__all__': self.num_steps >= EPISODE_LENGTH}\n\n info = {}\n sum_cue_sinr, sum_system_sinr = 0.0, 0.0\n sum_cue_rate_bps, sum_due_rate_bps, sum_system_rate_bps = 0.0, 0.0, 0.0\n sum_cue_capacity, sum_due_capacity, sum_system_capacity = 0.0, 0.0, 0.0\n for ((tx_id, rx_id), sinr_db), capacity in zip(results['sinrs_db'].items(), results['capacity_mbps'].values()):\n sum_system_sinr += sinr_db\n sum_system_rate_bps += results['rate_bps'][(tx_id, rx_id)]\n sum_system_capacity += capacity\n if tx_id in self.devices.due_pairs:\n info[tx_id] = {\n 'rb': due_actions[tx_id].rb,\n 'tx_pwr_dbm': due_actions[tx_id].tx_pwr_dBm,\n 'due_sinr_db': sinr_db,\n 'due_rate_bps': results['rate_bps'][(tx_id, rx_id)],\n 'due_capacity_mbps': capacity,\n }\n sum_due_rate_bps += results['rate_bps'][(tx_id, rx_id)]\n sum_due_capacity += capacity\n else:\n sum_cue_sinr += sinr_db\n sum_cue_rate_bps += results['rate_bps'][(tx_id, rx_id)]\n sum_cue_capacity += capacity\n\n aggregate_info = {\n 'env_mean_cue_sinr_db': sum_cue_sinr / len(self.devices.cues),\n 'env_mean_system_sinr_db': sum_system_sinr / (len(self.devices.cues) + len(self.devices.due_pairs)),\n 'env_sum_cue_rate_bps': sum_cue_rate_bps,\n 'env_sum_due_rate_bps': sum_due_rate_bps,\n 'env_sum_system_rate_bps': sum_system_rate_bps,\n 'env_sum_cue_capacity_mbps': sum_cue_capacity,\n 'env_sum_due_capacity_mbps': sum_due_capacity,\n 'env_sum_system_capacity_mbps': sum_system_capacity,\n }\n if self.config.compressed_info:\n for tx_id, tx_info in info.items():\n tx_info.update(aggregate_info)\n else:\n info['__env__'] = aggregate_info\n\n return obs, rewards, game_over, info\n\n def _extract_action(self, due_tx_id: Id, action_idx: int) -> Action:\n rb = action_idx % self.config.num_rbs\n tx_pwr_dBm = (action_idx // self.config.num_rbs) + self.config.due_min_tx_power_dBm\n return Action(due_tx_id, self.devices.due_pairs[due_tx_id], LinkType.SIDELINK, rb, tx_pwr_dBm)\n\n def render(self, mode='human'):\n obs = self.obs_fn.get_state({}) # @todo need to find a way to handle SINRs here\n print(obs)\n\n def save_device_config(self, config_file: Path) -> None:\n \"\"\"Save the environment's device configuration in a JSON file.\n\n :param config_file: The filepath to save to.\n \"\"\"\n config = {device.id: {\n 'position': device.position.as_tuple(),\n 'config': device.config,\n } for device in self.simulator.devices.values()}\n with config_file.open(mode='w') as fid:\n json.dump(config, fid)\n","sub_path":"src/gym_d2d/envs/d2d_env.py","file_name":"d2d_env.py","file_ext":"py","file_size_in_byte":8007,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"563025673","text":"from katka.fields import username_on_model\nfrom rest_framework import mixins, status\nfrom rest_framework.response import Response\nfrom rest_framework.viewsets import GenericViewSet\n\n\nclass ReadOnlyAuditViewMixin(mixins.RetrieveModelMixin,\n mixins.ListModelMixin,\n GenericViewSet):\n model = None\n\n def get_queryset(self):\n return self.model.objects.exclude(deleted=True)\n\n\nclass UpdateAuditMixin(mixins.UpdateModelMixin, GenericViewSet):\n\n def update(self, request, *args, **kwargs):\n with username_on_model(self.model, request.user.username):\n return super().update(request, *args, **kwargs)\n\n\nclass AuditViewSet(mixins.CreateModelMixin,\n UpdateAuditMixin,\n mixins.DestroyModelMixin,\n ReadOnlyAuditViewMixin):\n\n def create(self, request, *args, **kwargs):\n with username_on_model(self.model, request.user.username):\n return super().create(request, *args, **kwargs)\n\n def destroy(self, request, *args, **kwargs):\n instance = self.get_object()\n instance.deleted = True\n\n with username_on_model(self.model, request.user.username):\n instance.save()\n\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n\nclass FilterViewMixin:\n parameter_lookup_map = None\n\n \"\"\"\n Uses the Serializer fields to construct GET Parameter filtering\n \"\"\"\n def get_queryset(self):\n queryset = super().get_queryset()\n\n # Fetch distinct for all model fields, to prevent duplications due to SQL Joins\n queryset = queryset.distinct()\n\n # Allow filtering on any field in serializer\n all_fields = self.serializer_class.Meta.fields\n filter_fields_lookup = {field: field for field in all_fields}\n filter_fields_keys = list(all_fields)\n\n # Also support a mapping from query parameter to django query field\n if self.parameter_lookup_map:\n filter_fields_keys += self.parameter_lookup_map.keys()\n filter_fields_lookup.update(self.parameter_lookup_map)\n\n # Loop through the keys to maintain proper order\n filters = {}\n for query_param in filter_fields_keys:\n django_lookup_field = filter_fields_lookup[query_param]\n value = self.request.query_params.get(query_param, None)\n if value is not None:\n filters[django_lookup_field] = value\n\n if filters:\n queryset = queryset.filter(**filters)\n\n return queryset\n","sub_path":"katka/viewsets.py","file_name":"viewsets.py","file_ext":"py","file_size_in_byte":2565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"430888227","text":"import os\r\nimport tensorflow as tf\r\nfrom object_detection.utils import config_util\r\n\r\nfrom object_detection.utils import label_map_util\r\nfrom object_detection.utils import visualization_utils as viz_utils\r\nfrom object_detection.builders import model_builder\r\nfrom datetime import datetime\r\n\r\n\r\ndef save_frame(image,classes,score,boxes):\r\n for i in range(10):\r\n if(classes[i]==1 and score[i]>0.9):\r\n \r\n now = datetime.now()\r\n dt_string = now.strftime(\"%d_%m_%Y_%H_%M_%S\")\r\n\r\n file.write('%s\\n' %dt_string)\r\n \r\n file_name=os.path.join(dt_string+'.jpg')\r\n cv2.imwrite(file_name,image)\r\n\r\nfile=open('No Mask List.txt', 'w')\r\n\r\nWORKSPACE_PATH = 'Tensorflow/workspace'\r\nSCRIPTS_PATH = 'Tensorflow/scripts'\r\nAPIMODEL_PATH = 'Tensorflow/models'\r\nANNOTATION_PATH = WORKSPACE_PATH+'/annotations'\r\nIMAGE_PATH = WORKSPACE_PATH+'/images'\r\nMODEL_PATH = WORKSPACE_PATH+'/models'\r\nPRETRAINED_MODEL_PATH = WORKSPACE_PATH+'/pre-trained-models'\r\nCONFIG_PATH = MODEL_PATH+'/my_ssd_mobnet/pipeline.config'\r\nCHECKPOINT_PATH = MODEL_PATH+'/my_ssd_mobnet/'\r\n\r\nCUSTOM_MODEL_NAME = 'my_ssd_mobnet'\r\n\r\n\r\nconfigs = config_util.get_configs_from_pipeline_file(CONFIG_PATH)\r\ndetection_model = model_builder.build(model_config=configs['model'], is_training=False)\r\n\r\n# Restore checkpoint\r\nckpt = tf.compat.v2.train.Checkpoint(model=detection_model)\r\nckpt.restore(os.path.join(CHECKPOINT_PATH, 'ckpt-6')).expect_partial()\r\n\r\n@tf.function\r\ndef detect_fn(image):\r\n image, shapes = detection_model.preprocess(image)\r\n prediction_dict = detection_model.predict(image, shapes)\r\n detections = detection_model.postprocess(prediction_dict, shapes)\r\n return detections\r\n\r\nimport cv2\r\nimport numpy as np\r\n\r\ncategory_index = label_map_util.create_category_index_from_labelmap(ANNOTATION_PATH+'/label_map.pbtxt')\r\n\r\ncap = cv2.VideoCapture(0)\r\nwidth = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))\r\nheight = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\r\n\r\nwhile True: \r\n ret, frame = cap.read()\r\n image_np = np.array(frame)\r\n \r\n input_tensor = tf.convert_to_tensor(np.expand_dims(image_np, 0), dtype=tf.float32)\r\n detections = detect_fn(input_tensor)\r\n \r\n num_detections = int(detections.pop('num_detections'))\r\n detections = {key: value[0, :num_detections].numpy()\r\n for key, value in detections.items()}\r\n detections['num_detections'] = num_detections\r\n\r\n # detection_classes should be ints.\r\n detections['detection_classes'] = detections['detection_classes'].astype(np.int64)\r\n\r\n label_id_offset = 1\r\n image_np_with_detections = image_np.copy()\r\n\r\n viz_utils.visualize_boxes_and_labels_on_image_array(\r\n image_np_with_detections,\r\n detections['detection_boxes'],\r\n detections['detection_classes']+label_id_offset,\r\n detections['detection_scores'],\r\n category_index,\r\n use_normalized_coordinates=True,\r\n max_boxes_to_draw=1,\r\n min_score_thresh=.7,\r\n agnostic_mode=False,\r\n )\r\n\r\n cv2.imshow('Mask Detector', cv2.resize(image_np_with_detections, (800, 600)))\r\n save_frame(image_np,detections['detection_classes'],detections['detection_scores'],detections['detection_boxes'])\r\n\r\n if cv2.waitKey(1) & 0xFF == ord('q'):\r\n break\r\n\r\ncap.release()\r\ncv2.destroyAllWindows()\r\n#face_rec.recognize()\r\n#SendMail.sendmail()\r\n","sub_path":"Mask Detector.py","file_name":"Mask Detector.py","file_ext":"py","file_size_in_byte":3480,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"69404723","text":"from django.core.management import BaseCommand\r\nfrom django.db.models import Max\r\nfrom cats.models import Hunting, Cat\r\nimport csv\r\n\r\nclass Command(BaseCommand):\r\n def add_arguments(self, parser): \r\n parser.add_argument(\"-k\", \"--kot\", \r\n help=\"Wybor kota\", \r\n default=1, \r\n type=str)\r\n\r\n def handle(self, *args, **options):\r\n cat = (\r\n Cat.objects.filter(cat_name=options['kot'])\r\n .aggregate(num=Max(\"id\"))\r\n .get(\"num\")\r\n )\r\n hunting = (\r\n Hunting.objects.filter(cat_id=cat).count()\r\n )\r\n cat1 = (\r\n Cat.objects.filter(cat_name=options['kot'])\r\n .values_list('cat_name')\r\n )\r\n f = open('plik.csv', 'w')\r\n csv_writer = csv.writer(f)\r\n csv_writer.writerow(['Imię kota', 'Liczba polowań'])\r\n for name in cat1:\r\n csv_writer.writerow([name, hunting])\r\n \r\n ","sub_path":"cats/management/commands/eksportowanie.py","file_name":"eksportowanie.py","file_ext":"py","file_size_in_byte":953,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"642183002","text":"# -*- coding: utf-8 -*-\n\"\"\"\nDubalu Framework\n~~~~~~~~~~~~~~~~\n\n:author: Dubalu Framework Team. See AUTHORS.\n:copyright: Copyright (c) 2013-2014, deipi.com LLC. All Rights Reserved.\n:license: See LICENSE for license details.\n\n\"\"\"\nfrom __future__ import absolute_import\n\nfrom django import template\n\nfrom templatetag_sugar.parser import Optional, Variable, Assignment\n\n\nregister = template.Library()\n\n\n@register.advanced_tag(takes_context=True,\n syntax=[Optional([Variable('value', is_content=True)]), 'as', Assignment()],\n blocks=Optional())\ndef capture(context, body, value=None):\n \"\"\"\n {% capture myvar as var_name %}\n\n {% capture \"string\" as var_name %}\n\n {% capture as var_name %}\n ....\n {% endcapture %}\n \"\"\"\n if body:\n value = body()\n return value\n","sub_path":"dubalusim/dubalu/dfw/core/templatetags/capture.py","file_name":"capture.py","file_ext":"py","file_size_in_byte":815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"539394109","text":"from config import *\r\nimport view\r\nimport model\r\nimport gui\r\n\r\nclass GameApp:\r\n def __init__(self, shaders, framebuffer):\r\n\r\n self.shader3DTextured = shaders[0]\r\n self.shader3DColored = shaders[1]\r\n self.shader2DTextured = shaders[2]\r\n self.shader3DBillboard = shaders[6]\r\n self.shader3DCubemap = shaders[7]\r\n self.shader3DLightMap = shaders[8]\r\n\r\n self.multisampleFBO = framebuffer[0]\r\n self.regularCBMultisampled = framebuffer[1]\r\n self.brightCBMultisampled = framebuffer[2]\r\n self.MultisampleDepthStencilBuffer = framebuffer[3]\r\n self.singlesampleFBO = framebuffer[4]\r\n self.regularCB = framebuffer[5]\r\n self.brightCB = framebuffer[6]\r\n\r\n self.shadowMapResolution = 2048\r\n self.make_shadow_map()\r\n\r\n pg.mouse.set_visible(False)\r\n self.lastTime = 0\r\n self.currentTime = 0\r\n self.numFrames = 0\r\n self.frameTime = 0\r\n self.lightCount = 0\r\n self.resetLights()\r\n self.create_objects()\r\n\r\n def make_shadow_map(self):\r\n self.depthMapFBO = glGenFramebuffers(1)\r\n glBindFramebuffer(GL_FRAMEBUFFER, self.depthMapFBO)\r\n self.depthMap = glGenTextures(1)\r\n glBindTexture(GL_TEXTURE_2D, self.depthMap)\r\n glTexImage2D(GL_TEXTURE_2D, 0, GL_DEPTH_COMPONENT, self.shadowMapResolution,\r\n self.shadowMapResolution, 0, GL_DEPTH_COMPONENT, GL_FLOAT, None)\r\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)\r\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)\r\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_BORDER)\r\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_BORDER)\r\n glTexParameterfv(GL_TEXTURE_2D, GL_TEXTURE_BORDER_COLOR, np.array([1.0,1.0,1.0,1.0], dtype=np.float32))\r\n glDrawBuffer(GL_NONE)\r\n glReadBuffer(GL_NONE)\r\n glFramebufferTexture2D(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_TEXTURE_2D, self.depthMap, 0)\r\n\r\n def create_objects(self):\r\n self.wood_texture = view.Material(\"gfx/crate\")\r\n monkey_model = view.ObjModel(\"models\", \"monkey.obj\", self.shader3DTextured, self.wood_texture)\r\n self.monkey = model.Monkey(np.array([0,0,0],dtype=np.float32), monkey_model)\r\n self.cube = model.Cube(self.shader3DTextured, self.wood_texture,[1,1,0.5])\r\n self.player = model.Player([0,0,1.2])\r\n self.light = model.Light([self.shader3DColored, self.shader3DTextured,self.shader3DBillboard], [0.2, 0.7, 0.8], [1,1.7,1.5], 2, self.lightCount)\r\n self.lightCount += 1\r\n self.light2 = model.Light([self.shader3DColored, self.shader3DTextured, self.shader3DBillboard], [0.9, 0.4, 0.0], [0,1.7,0.5], 2, self.lightCount)\r\n self.lightCount += 1\r\n self.screen = view.TexturedQuad(0, 0, 2, 2, (self.regularCB, self.brightCB), self.shader2DTextured)\r\n self.hud_texture = view.SimpleMaterial(\"gfx/hud\")\r\n self.hud = view.TexturedQuad(0, 0, 2, 2, (self.hud_texture.texture,), self.shader2DTextured)\r\n self.smokeTexture = view.SimpleMaterial(\"gfx/smoke\")\r\n self.smoke = view.BillBoard(1,1,self.smokeTexture, self.shader3DBillboard)\r\n ground_model = view.ObjModel(\"models\", \"ground.obj\", self.shader3DTextured, self.wood_texture)\r\n self.ground = model.Ground(np.array([0,0,0],dtype=np.float32), ground_model)\r\n self.skyBoxTexture = view.CubeMapMaterial(\"gfx/sky\")\r\n skyBoxModel = view.CubeMapModel(self.shader3DCubemap, 100,100,100,1,1,1, self.skyBoxTexture)\r\n self.skyBox = model.skyBox(skyBoxModel)\r\n\r\n def resetLights(self):\r\n glUseProgram(self.shader3DBillboard)\r\n for i in range(8):\r\n glUniform1i(glGetUniformLocation(self.shader3DBillboard,f\"lights[{i}].enabled\"),0)\r\n glUseProgram(self.shader3DTextured)\r\n for i in range(8):\r\n glUniform1i(glGetUniformLocation(self.shader3DTextured,f\"lights[{i}].enabled\"),0)\r\n\r\n def mainLoop(self):\r\n result = CONTINUE\r\n #check events\r\n for event in pg.event.get():\r\n if (event.type == pg.KEYDOWN and event.key==pg.K_ESCAPE):\r\n result = EXIT\r\n if (event.type == pg.KEYDOWN and event.key==pg.K_m):\r\n result = OPEN_MENU\r\n self.handleMouse()\r\n self.handleKeys()\r\n #update objects\r\n self.light.update()\r\n self.light2.update()\r\n self.player.update([self.shader3DColored, self.shader3DTextured, self.shader3DBillboard, self.shader3DCubemap])\r\n\r\n #first pass: capture shadow map\r\n lightProjection = pyrr.matrix44.create_orthogonal_projection(-10,10,-10,10,1.0,20.0, dtype = np.float32)\r\n lightPosition = 10 * np.array([1, -0.5, 1], dtype=np.float32)\r\n lookTarget = np.array([0,0,0],dtype=np.float32)\r\n globalUp = np.array([0,0,1],dtype=np.float32)\r\n lightView = pyrr.matrix44.create_look_at(lightPosition, lookTarget, globalUp, dtype=np.float32)\r\n lightSpaceTransform = pyrr.matrix44.multiply(lightView,lightProjection)\r\n glUseProgram(self.shader3DLightMap)\r\n glUniformMatrix4fv(glGetUniformLocation(self.shader3DLightMap,\"lightSpaceTransform\"),\r\n 1,GL_FALSE, lightSpaceTransform)\r\n glViewport(0,0,self.shadowMapResolution, self.shadowMapResolution)\r\n glBindFramebuffer(GL_FRAMEBUFFER, self.depthMapFBO)\r\n glClear(GL_DEPTH_BUFFER_BIT)\r\n self.renderScene(self.shader3DLightMap)\r\n\r\n #second pass: render (3D)\r\n glViewport(0,0,SCREEN_WIDTH, SCREEN_HEIGHT)\r\n glBindFramebuffer(GL_FRAMEBUFFER, self.multisampleFBO)\r\n glDrawBuffers(2, (GL_COLOR_ATTACHMENT0, GL_COLOR_ATTACHMENT1))\r\n glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)\r\n glUseProgram(self.shader3DTextured)\r\n glActiveTexture(GL_TEXTURE2)\r\n glBindTexture(GL_TEXTURE_2D, self.depthMap)\r\n glUniformMatrix4fv(glGetUniformLocation(self.shader3DTextured,\"lightSpaceTransform\"),\r\n 1, GL_FALSE, lightSpaceTransform)\r\n self.renderScene()\r\n\r\n #bounce multisampled frame down to single sampled\r\n glBindFramebuffer(GL_READ_FRAMEBUFFER, self.multisampleFBO)\r\n glBindFramebuffer(GL_DRAW_FRAMEBUFFER, self.singlesampleFBO)\r\n glReadBuffer(GL_COLOR_ATTACHMENT0)\r\n glDrawBuffer(GL_COLOR_ATTACHMENT0)\r\n glBlitFramebuffer(0, 0, SCREEN_WIDTH, SCREEN_HEIGHT, 0, 0, SCREEN_WIDTH, SCREEN_HEIGHT, GL_COLOR_BUFFER_BIT, GL_NEAREST)\r\n glReadBuffer(GL_COLOR_ATTACHMENT1)\r\n glDrawBuffer(GL_COLOR_ATTACHMENT1)\r\n glBlitFramebuffer(0, 0, SCREEN_WIDTH, SCREEN_HEIGHT, 0, 0, SCREEN_WIDTH, SCREEN_HEIGHT, GL_COLOR_BUFFER_BIT, GL_NEAREST)\r\n\r\n #third pass: 2D rendering and post processing\r\n glBindFramebuffer(GL_FRAMEBUFFER, 0)\r\n glClear(GL_COLOR_BUFFER_BIT)\r\n glDisable(GL_DEPTH_TEST)\r\n self.screen.draw()\r\n self.hud.draw()\r\n pg.display.flip()\r\n #timing\r\n self.showFrameRate()\r\n return result\r\n \r\n def renderScene(self,shader=None):\r\n glEnable(GL_DEPTH_TEST)\r\n glDisable(GL_CULL_FACE)\r\n self.skyBox.draw(self.player.position)\r\n glEnable(GL_CULL_FACE)\r\n self.cube.draw(shader)\r\n self.light.draw(shader)\r\n self.light2.draw(shader)\r\n self.monkey.draw(shader)\r\n self.ground.draw(shader)\r\n if shader is None:\r\n self.smoke.draw(np.array([-1,0.5,0.5],dtype=np.float32), self.player.position)\r\n\r\n def handleKeys(self):\r\n keys = pg.key.get_pressed()\r\n if keys[pg.K_w]:\r\n self.player.move(0, 0.0025*self.frameTime)\r\n return\r\n if keys[pg.K_a]:\r\n self.player.move(90, 0.0025*self.frameTime)\r\n return\r\n if keys[pg.K_s]:\r\n self.player.move(180, 0.0025*self.frameTime)\r\n return\r\n if keys[pg.K_d]:\r\n self.player.move(-90, 0.0025*self.frameTime)\r\n return\r\n\r\n def handleMouse(self):\r\n (x,y) = pg.mouse.get_pos()\r\n theta_increment = self.frameTime * 0.05 * (SCREEN_WIDTH / 2 - x)\r\n phi_increment = self.frameTime * 0.05 * (SCREEN_HEIGHT / 2 - y)\r\n self.player.increment_direction(theta_increment, phi_increment)\r\n pg.mouse.set_pos((SCREEN_WIDTH / 2, SCREEN_HEIGHT / 2))\r\n\r\n def showFrameRate(self):\r\n self.currentTime = pg.time.get_ticks()\r\n delta = self.currentTime - self.lastTime\r\n if (delta >= 1000):\r\n framerate = int(1000.0 * self.numFrames/delta)\r\n pg.display.set_caption(f\"Running at {framerate} fps.\")\r\n self.lastTime = self.currentTime\r\n self.numFrames = -1\r\n self.frameTime = float(1000.0 / max(60,framerate))\r\n self.numFrames += 1\r\n\r\n def quit(self):\r\n self.wood_texture.destroy()\r\n self.monkey.destroy()\r\n self.cube.destroy()\r\n self.light.destroy()\r\n self.light2.destroy()\r\n self.cube.destroy()\r\n self.screen.destroy()\r\n self.smoke.destroy()\r\n self.ground.destroy()\r\n self.skyBox.destroy()\r\n glDeleteTextures(1, [self.depthMap,])\r\n glDeleteFramebuffers(1, [self.depthMapFBO,])\r\n\r\nclass MenuApp:\r\n def __init__(self, shaders):\r\n self.shader2DColored = shaders[3]\r\n self.shaderText = shaders[4]\r\n self.particleShader = shaders[5]\r\n\r\n pg.mouse.set_visible(True)\r\n self.lastTime = 0\r\n self.currentTime = 0\r\n self.numFrames = 0\r\n self.frameTime = 0\r\n self.create_objects()\r\n\r\n def create_objects(self):\r\n\r\n self.font = view.SimpleMaterial(\"gfx/font\")\r\n\r\n self.textLines = []\r\n\r\n newgame_label = gui.TextLine(self.font, \"New Game\", self.shaderText, [0.04, 0.04], [-0.15, 0.3], [0,0,0])\r\n self.textLines.append(newgame_label)\r\n exit_label = gui.TextLine(self.font, \"Exit\", self.shaderText, [0.04, 0.04], [-0.15, -0.3], [0,0,0])\r\n self.textLines.append(exit_label)\r\n title = gui.TextLine(self.font, \"Monke Madness\", self.shaderText, [0.08, 0.08], [-0.5, 0.7], [1,0,0])\r\n self.textLines.append(title)\r\n\r\n self.buttons = []\r\n\r\n newgame_button = gui.Button((0,0.3), (0.4, 0.1), (1, 1, 0), self.shader2DColored)\r\n newgame_button.clickAction = gui.new_game_click\r\n newgame_button.label = newgame_label\r\n self.buttons.append(newgame_button)\r\n\r\n exit_button = gui.Button((0,-0.3), (0.4, 0.1), (1, 1, 0), self.shader2DColored)\r\n exit_button.clickAction = gui.exit_click\r\n exit_button.label = exit_label\r\n self.buttons.append(exit_button)\r\n\r\n createInfo = model.ParticleEmitter2DCreateInfo()\r\n createInfo.color = (255,255,0)\r\n createInfo.layer = 1\r\n createInfo.lifetime = 600\r\n createInfo.pos = (0,0)\r\n createInfo.rate = 0.1\r\n createInfo.shader = self.particleShader\r\n createInfo.size = 10\r\n createInfo.velocity_field = velocity_field1\r\n createInfo.offsetFunction = offset_function1\r\n self.layer1emitter = model.ParticleEmitter2D(createInfo)\r\n createInfo.color = (0,0,255)\r\n createInfo.layer = 2\r\n createInfo.rate = 0.2\r\n createInfo.velocity_field = velocity_field2\r\n createInfo.offsetFunction = offset_function2\r\n self.layer2emitter = model.ParticleEmitter2D(createInfo)\r\n\r\n def mainLoop(self):\r\n result = CONTINUE\r\n #check events\r\n for event in pg.event.get():\r\n if (event.type == pg.MOUSEBUTTONDOWN and event.button==1):\r\n result = self.handleMouseClick()\r\n if (event.type == pg.KEYDOWN and event.key==pg.K_ESCAPE):\r\n result = EXIT\r\n self.handleMouseMove()\r\n #update\r\n self.layer1emitter.update()\r\n self.layer2emitter.update()\r\n #render\r\n glBindFramebuffer(GL_FRAMEBUFFER, 0)\r\n glClear(GL_COLOR_BUFFER_BIT)\r\n glDisable(GL_DEPTH_TEST)\r\n glDisable(GL_CULL_FACE)\r\n self.layer2emitter.draw()\r\n self.layer1emitter.draw()\r\n for button in self.buttons:\r\n button.draw()\r\n for line in self.textLines:\r\n line.draw()\r\n pg.display.flip()\r\n\r\n #timing\r\n self.showFrameRate()\r\n return result\r\n\r\n def handleMouseMove(self):\r\n (x,y) = pg.mouse.get_pos()\r\n x -= SCREEN_WIDTH / 2\r\n x /= SCREEN_WIDTH / 2\r\n y -= SCREEN_HEIGHT / 2\r\n y /= -SCREEN_HEIGHT / 2\r\n\r\n for button in self.buttons:\r\n button.handle_mouse_movement((x,y))\r\n \r\n def handleMouseClick(self):\r\n (x,y) = pg.mouse.get_pos()\r\n x -= SCREEN_WIDTH / 2\r\n x /= SCREEN_WIDTH / 2\r\n y -= SCREEN_HEIGHT / 2\r\n y /= -SCREEN_HEIGHT / 2\r\n\r\n for button in self.buttons:\r\n result = button.handle_mouse_click((x,y))\r\n if result != CONTINUE:\r\n return result\r\n return CONTINUE\r\n\r\n def showFrameRate(self):\r\n self.currentTime = pg.time.get_ticks()\r\n delta = self.currentTime - self.lastTime\r\n if (delta >= 1000):\r\n framerate = int(1000.0 * self.numFrames/delta)\r\n pg.display.set_caption(f\"Running at {framerate} fps.\")\r\n self.lastTime = self.currentTime\r\n self.numFrames = -1\r\n self.frameTime = float(1000.0 / max(60,framerate))\r\n self.numFrames += 1\r\n\r\n def quit(self):\r\n self.layer2emitter.destroy()\r\n self.layer1emitter.destroy()\r\n for button in self.buttons:\r\n button.destroy()\r\n for line in self.textLines:\r\n line.destroy()\r\n self.font.destroy()","sub_path":"pyopengl/19 - Shadows/control.py","file_name":"control.py","file_ext":"py","file_size_in_byte":13773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"226842539","text":"from tparser import parse\nimport os\nimport pickle\nimport re\nimport nltk.classify\n\ntrain_path = os.path.join(\"wedt\",\"training\")\nclassifier_path = os.path.join(\"wedt\",\"classifiers\")\n\ndef gather_topics(addresslist, site):\n\t\"\"\"Downloads, parses and pickles all the topics from a list of addresses.\n\t\n\taddresslist\t- a list of web addresses\n\tsite\t\t- site name (for bundling topics)\n\t\"\"\"\n\tfor address in addresslist:\n\t\tparsed = parse(address, True)\n\t\twith open(os.path.join(train_path,site,re.findall('[^/]*$',address)[0]), 'w') as file:\n\t\t\tpickle.dump(parsed, file)\n\ndef train_classifier(classifier, directory, feature, name=None, scorethreshold=None):\n\t\"\"\"Creates and trains a NLTK classifier from nltk.classify package.\n\t\n\tclassifier\t- a classifier class that supports training\n\tdirectory\t- directory containing the training set (inside wedt/training)\n\tfeature\t\t- feature set function (features.py)\n\t\"\"\"\n\tif classifier==\"MaxEnt\":\n\t\tfrom nltk.classify.maxent import MaxentClassifier as Classifier\n\telif classifier==\"PositiveNaiveBayes\":\n\t\tfrom nltk.classify.positivenaivebayes import PositiveNaiveBayesClassifier as Classifier\n\telse:\n\t\tfrom nltk.classify.naivebayes import NaiveBayesClassifier as Classifier\n\tfeaturesets = get_featuresets(directory, feature, scorethreshold)\n\tc = Classifier.train(featuresets)\n\tif name:\n\t\twith open(os.path.join(classifier_path, name), 'w') as file:\n\t\t\tpickle.dump((c,feature), file)\n\treturn c\n\ndef get_featuresets(directory, feature, scorethreshold):\n\tfeaturesets = []\n\tfor filename in os.listdir(os.path.join(train_path,directory)):\n\t\twith open(os.path.join(train_path, directory, filename), 'r') as file:\n\t\t\ttopic, scores, classes = pickle.load(file)\n\t\t\tif scorethreshold:\n\t\t\t\tscores = map(float,scores)\n\t\t\t\tclasses = [\"acc\" if s/max(scores) > scorethreshold else c for s,c in zip(scores,classes)]\n\t\t\tfeaturesets.extend( nltk.classify.util.apply_features(feature, zip(((topic, t) for t in topic), classes)) )\n\treturn featuresets","sub_path":"WEDT/wedt/wedt/training.py","file_name":"training.py","file_ext":"py","file_size_in_byte":1959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"636023671","text":"# coding:utf-8\r\nimport os\r\nimport numpy as np\r\nfrom torchvision.datasets import ImageFolder\r\nimport torchvision.transforms as transforms\r\nimport pickle\r\n\r\n\"\"\"\r\n 在训练前先运行该函数获得数据的均值和标准差\r\n\"\"\"\r\n\r\n\r\nclass Dataloader():\r\n def __init__(self, isize, dataroot):\r\n # 训练,验证,测试数据集文件夹名\r\n self.isize = isize\r\n self.dataroot = dataroot\r\n self.dirs = ['train', 'test', 'val', 'center_v']\r\n\r\n self.means = [0, 0, 0]\r\n self.stdevs = [0, 0, 0]\r\n\r\n self.transform = transforms.Compose([transforms.Resize(self.isize),\r\n transforms.CenterCrop(self.isize),\r\n transforms.ToTensor(), # 数据值从[0,255]范围转为[0,1],相当于除以255操作\r\n # transforms.Normalize((0.485,0.456,0.406), (0.229,0.224,0.225))\r\n ])\r\n\r\n # 因为这里使用的是ImageFolder,按文件夹给数据分类,一个文件夹为一类,label会自动标注好\r\n self.dataset = {x: ImageFolder(os.path.join(self.dataroot, x), self.transform) for x in self.dirs}\r\n\r\n def get_mean_std(self, type, mean_std_path):\r\n \"\"\"\r\n 计算数据集的均值和标准差\r\n :param type: 使用的是那个数据集的数据,有'train', 'test', 'testing'\r\n :param mean_std_path: 计算出来的均值和标准差存储的文件\r\n :return:\r\n \"\"\"\r\n num_imgs = len(self.dataset[type])\r\n for data in self.dataset[type]:\r\n img = data[0]\r\n for i in range(3):\r\n # 一个通道的均值和标准差\r\n self.means[i] += img[i, :, :].mean()\r\n self.stdevs[i] += img[i, :, :].std()\r\n\r\n self.means = np.asarray(self.means) / num_imgs\r\n self.stdevs = np.asarray(self.stdevs) / num_imgs\r\n\r\n print(\"{} : normMean = {}\".format(type, self.means))\r\n print(\"{} : normstdevs = {}\".format(type, self.stdevs))\r\n\r\n # 将得到的均值和标准差写到文件中,之后就能够从中读取\r\n with open(mean_std_path, 'wb') as f:\r\n pickle.dump(self.means, f)\r\n pickle.dump(self.stdevs, f)\r\n print('pickle done')\r\n\r\n\r\nif __name__ == '__main__':\r\n isize = 32\r\n dataroot = './data-local/images/ruxian/'\r\n dataloader = Dataloader(isize, dataroot)\r\n for x in dataloader.dirs:\r\n mean_std_path = 'mean_std_value_' + x + '.pkl'\r\n dataloader.get_mean_std(x, mean_std_path)","sub_path":"MT-CNV/get_mean_std.py","file_name":"get_mean_std.py","file_ext":"py","file_size_in_byte":2630,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"159444460","text":"\n\nfrom xai.brain.wordbase.nouns._mother import _MOTHER\n\n#calss header\nclass _MOTHERS(_MOTHER, ):\n\tdef __init__(self,): \n\t\t_MOTHER.__init__(self)\n\t\tself.name = \"MOTHERS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"mother\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_mothers.py","file_name":"_mothers.py","file_ext":"py","file_size_in_byte":238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"320740857","text":"# 2644.py\n# 2018.06.23\n\nimport sys\nimport collections\n\nr = sys.stdin.readline\n\ndef bfs(graph, start, end):\n\tqueue = collections.deque([start])\n\tchecked[start] = 0\n\twhile queue:\n\t\tnode = queue.popleft()\n\t\tif node == end:\n\t\t\tbreak\n\t\tfor v in graph[node]:\n\t\t\tif checked[v] == -1:\n\t\t\t\tchecked[v] = checked[node] + 1\n\t\t\t\tqueue.append(v)\n\n\nn = int(r())\nstart, end = map(int, r().split())\ngraph = collections.defaultdict(list)\nchecked = [-1] * (n+1)\nfor _ in range(int(r())):\n\tv, u = map(int, r().split())\n\tgraph[v].append(u)\n\tgraph[u].append(v)\nbfs(graph, start, end)\nprint(checked[end])\n\n# start 지점부터 bfs 탐색을 하면서, checked에 촌수를 기록하면서 탐색한다.\n","sub_path":"2000/2644.py","file_name":"2644.py","file_ext":"py","file_size_in_byte":680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"575739037","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('app', '0006_auto_20160426_0334'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='hospital',\n name='lat',\n field=models.DecimalField(verbose_name='Lat', decimal_places=6, null=True, max_digits=15),\n ),\n migrations.AddField(\n model_name='hospital',\n name='lng',\n field=models.DecimalField(verbose_name='Lng', decimal_places=6, null=True, max_digits=15),\n ),\n ]\n","sub_path":"app/migrations/0007_auto_20160426_0334.py","file_name":"0007_auto_20160426_0334.py","file_ext":"py","file_size_in_byte":649,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"600013655","text":"# coding=utf-8\r\n# 口碑桌码扫码下单-不同分类,多个菜品-加菜操作\r\n\r\nfrom selenium import webdriver\r\nimport unittest, time, kbshop_login, kbshop_info, re, chromeconf, mylog, logging\r\n\r\n# exchange_test_address, environment, atuo_order, loop_num = kbshop_info.exchange_url()\r\nadd_goods, addgoods_loop_num = kbshop_info.add_goods_info()\r\nchrome_address = chromeconf.chrome_info()\r\nchoice_p, choice_p_confirm = kbshop_info.person_xpath_info()\r\nxc_class, xc_name, lc_class, lc_name, yc_class, yc_name, xhl = kbshop_info.cp_xpath_info()\r\nlog = mylog.Logger('SweepH5.log', logging.INFO, logging.INFO)\r\n\r\n\r\nclass SweepH5(unittest.TestCase):\r\n def setUp(self):\r\n chromedriver = chrome_address\r\n self.driver = webdriver.Chrome(chromedriver)\r\n server_address = kbshop_info.base_url_info()\r\n self.base_url = server_address\r\n self.driver.implicitly_wait(10)\r\n self.verificationErrors = []\r\n self.driver.set_window_size(480, 700) # 按像素设置浏览器大小\r\n self.accept_next_alert = True\r\n\r\n def sweep_order(self):\r\n driver = self.driver\r\n driver.find_element_by_xpath(xc_class).click() # 点击相菜分类\r\n time.sleep(1)\r\n driver.find_element_by_xpath(xc_name).click() # 相菜分类,点击第一个菜“+”加菜\r\n # time.sleep(1)\r\n driver.find_element_by_xpath(lc_class).click() # 点击鲁菜分类\r\n time.sleep(1)\r\n driver.find_element_by_xpath(lc_name).click() # 鲁菜分类,点击第一个菜“+”加菜\r\n # time.sleep(1)\r\n driver.find_element_by_xpath(yc_class).click() # 点击粤菜分类\r\n time.sleep(1)\r\n driver.find_element_by_xpath(yc_name).click() # 粤菜分类,点击第一个菜“+”加菜\r\n time.sleep(1)\r\n driver.find_element_by_xpath(xhl).click() # 点击“选好了”\r\n driver.find_element_by_xpath(\"//*[@id='button_order']\").click() # 点击“去下单”\r\n time.sleep(1)\r\n\r\n def pay_order(self):\r\n driver = self.driver\r\n driver.find_element_by_xpath(\"//*[@id='jiezhang']\").click() # 点击“去结账”\r\n\r\n # 存在支付宝优惠前置优惠前置时使用\r\n now_url = driver.current_url # 获取当前页面url\r\n new_order_num = re.search(\"orderNum=(.+?)&open\", now_url).group(1) # 截取当前地址中的order_num\r\n member_pay = 'http://b2b.blibao.com/sweep/simple/payByVip.htm?orderNum=201801171126210018665032573&uid=201507292104130005' # 扫码会员支付接口\r\n strinfo = re.compile('201801171126210018665032573') # member_pay固定的order_num\r\n new_orderno_mempay_url = strinfo.sub(new_order_num, member_pay) # 将原订单号替换成新截取的订单号,进行支付\r\n driver.get(new_orderno_mempay_url)\r\n log.info(\"会员调接口支付成功\")\r\n driver.get(now_url)\r\n time.sleep(1)\r\n log.info(\"订单号:\" + new_order_num + \",订单已完成!\")\r\n\r\n\r\n def test_kb_smdc_xchf_addgoods(self):\r\n \"\"\"扫码订单-先吃后付模式\"\"\"\r\n driver = self.driver\r\n kbshop_login.alipay_login(self) # 调用登录\r\n driver.find_element_by_xpath(\"//*[@id='redirect_btn']\").click() # 选人数\r\n driver.find_element_by_xpath(choice_p).click() # 选人数1\r\n driver.find_element_by_xpath(choice_p_confirm).click() # 点击\"立即点餐\"\r\n time.sleep(1)\r\n SweepH5.sweep_order(self)\r\n time.sleep(1)\r\n log.info(\"第1单下单支付成功\")\r\n if add_goods == '0':\r\n log.info(\"未开启循环加菜模式!!!\")\r\n else:\r\n for i in range(addgoods_loop_num):\r\n i = i + 1 # i初始值为0\r\n if i <= addgoods_loop_num:\r\n time.sleep(1)\r\n driver.find_element_by_xpath(\"//*[@id='cartFixedDiv']/div/div/span\").click() # 点击“继续点菜”\r\n time.sleep(2)\r\n SweepH5.sweep_order(self)\r\n driver.find_element_by_xpath(\"//*[@id='confirm']\").click() # 点击“确定加菜”按钮\r\n time.sleep(2)\r\n log.info(\"第\" + str(i) + \"次循环加菜成功!已下第\" + str(i + 1) + \"单!\")\r\n else:\r\n SweepH5.pay_order(self)\r\n\r\n def tearDown(self):\r\n self.driver.close()\r\n self.driver.quit()\r\n self.assertEqual([], self.verificationErrors)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n suite = unittest.TestSuite()\r\n suite.addTest(SweepH5(\"test_kb_smdc_xchf_addgoods\"))\r\n results = unittest.TextTestRunner().run(suite)\r\n","sub_path":"blibao-project/sweep_test_case/kb_sweep_h5_more_addgoods.py","file_name":"kb_sweep_h5_more_addgoods.py","file_ext":"py","file_size_in_byte":4671,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"412328876","text":"#!/usr/bin/env python\n\nimport sys\ntry:\n import readline\nexcept ImportError:\n import pyreadline as readline\nimport os\n\nimport code\nimport rlcompleter\n\nlib_path = os.path.abspath(os.path.join('..', 'src'))\nsys.path.append(lib_path)\n\nlib_path = os.path.abspath(os.path.join('..', '..', 'ARSDKBuildUtils', 'Utils', 'Python'))\nsys.path.append(lib_path)\n\nfrom Bybop_Discovery import *\nimport Bybop_Device\n\nprint('Searching for devices')\n\nfrom zeroconf import ZeroconfServiceTypes\nprint('\\n'.join(ZeroconfServiceTypes.find()))\nprint('done.')\n\ndiscovery = Discovery([DeviceID.BEBOP_DRONE, DeviceID.JUMPING_SUMO, DeviceID.AIRBORNE_NIGHT, DeviceID.JUMPING_NIGHT])\n\ndiscovery.wait_for_change()\n\ndevices = discovery.get_devices()\n\n#discovery.stop()\n\nif not devices:\n print('Oops ...')\n sys.exit(1)\n\ndevice = devices.itervalues().next()\n\nprint('Will connect to ' + get_name(device))\n\nd2c_port = 43210\ncontroller_type = \"PC\"\ncontroller_name = \"bybop shell\"\n\ndrone = Bybop_Device.create_and_connect(device, d2c_port, controller_type, controller_name)\n\nif drone is None:\n print('Unable to connect to a product')\n sys.exit(1)\n\ndrone.dump_state()\n\nvars = globals().copy()\nvars.update(locals())\nreadline.set_completer(rlcompleter.Completer(vars).complete)\nreadline.parse_and_bind(\"tab: complete\")\nshell = code.InteractiveConsole(vars)\n\n# drone.jump(0) # jump forward\n# drone.jump(1) # jump up\n# drone.move_forward(20) # move forwards\n# drone.move_forward(-20) # move backwards\n# drone.move(0,50) # turn right?\n# drone.move(0,-50) # turn left?\n# drone.spin() # spin around\n# drone.simpleAnimation(0)\n# drone.simpleAnimation(9)\n# Currently known values:\n# - 0 : stop\n# - 1 : spin\n# - 2 : tap\n# - 3 : slowshake\n# - 4 : metronome\n# - 5 : ondulation\n# - 6 : spinjump\n# - 7 : spintoposture\n# - 8 : spiral\n# - 9 : slalom\n# \"\"\"\n\nshell.interact()\ndrone.stop()\n","sub_path":"src/interactive.py","file_name":"interactive.py","file_ext":"py","file_size_in_byte":1944,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"275719343","text":"# -*- coding: utf-8 -*-\nimport os\nimport re\nimport sys\nfrom os.path import normpath\nfrom os.path import join\nfrom convert_pdf_to_txt import convert_pdf_to_txt\n\ndias_da_semana = [\n \"segunda\", \"terça\", \"quarta\", \"quinta\", \"sexta\"\n]\n\n\ndef busca_dados_aluno(pdf):\n linhas = pdf.splitlines()\n aluno = {\n 'universidade': linhas[0].strip(),\n 'semestre_matricula': linhas[2].split('-')[1].strip(),\n 'nome': linhas[6].strip(),\n 'cartao': linhas[8].split(':')[1].strip(),\n 'habilitacao': linhas[10].split(':')[1].strip(),\n 'curriculo': linhas[11].strip(),\n 'atividades_matriculadas': busca_atividades(linhas),\n }\n\n return aluno\n\n\ndef busca_atividades(linhas):\n atividades = []\n\n index_marcador = -1\n marcador = 'Turma Atividade de Ensino'\n\n # encontra onde começa a informação relevante no PDF\n for index, linha in enumerate(linhas, start=0):\n if marcador.upper() in linha.upper():\n index_marcador = index\n break\n\n # encontra linhas que possuam apenas a letra da turma\n index_primeira_atividade = -1\n turmas = []\n if index_marcador > 0:\n for i in range(index_marcador + 1, len(linhas)):\n conteudoLinha = linhas[i].strip()\n if conteudoLinha != '':\n if len(conteudoLinha) < 7:\n turmas.append(conteudoLinha)\n else:\n index_primeira_atividade = i\n break\n\n contador_turmas = 0\n\n # se populou turmas, deve haver um numero igual de disciplinas para encontrar \n if (index_primeira_atividade > 0):\n horarios_disciplina = []\n contador_turmas = 0\n linha_codigo = -1\n codigo_atual = ''\n ultimo_codigo = re.search(r'\\b[A-Z]{3}[0-9]{5}\\b', linhas[index_primeira_atividade]).group()\n ultimo_nome = linhas[index_primeira_atividade].split('-')[0].strip()\n\n for i in range(index_primeira_atividade, len(linhas)):\n # se tiver um código de disciplina (AAA99999), deve ser o nome dela\n match = re.search(r'\\b[A-Z]{3}[0-9]{5}\\b', linhas[i])\n if match is not None:\n # print('achou um codigo na linha:')\n # print(linhas[i])\n codigo_atual = match.group()\n nome_atual = linhas[i].split('-')[0]\n linha_codigo = i\n\n # se tiver um dia da semana, deve ser o horário\n words_re = re.compile(\"|\".join(dias_da_semana), re.IGNORECASE)\n match = words_re.search(linhas[i])\n if match is not None:\n dia = match.group()\n # descobre o horario\n match = re.search(r'\\b[0-9]{2}:[0-9]{2}-[0-9]{2}:[0-9]{2}\\b', linhas[i])\n if match is not None:\n horario = match.group()\n horarios_disciplina.append({\n 'dia': dia,\n 'horario': horario\n })\n\n if codigo_atual != ultimo_codigo:\n #print('achou a: ', str(codigo_atual), nome_atual)\n atividades.append({\n 'codigo': ultimo_codigo,\n 'nome': ultimo_nome,\n 'turma': turmas[contador_turmas],\n 'horarios': horarios_disciplina\n })\n #print('---------------colocou a ', ultimo_nome, ultimo_codigo, ' com horarios: ', str(horarios_disciplina), ' e turma: ', turmas[contador_turmas])\n ultimo_codigo = codigo_atual\n contador_turmas += 1\n horarios_disciplina = []\n ultimo_nome = nome_atual\n\n #no final, adiciona a última\n if i == len(linhas) - 1:\n atividades.append({\n 'codigo': ultimo_codigo,\n 'nome': ultimo_nome,\n 'turma': turmas[contador_turmas],\n 'horarios': horarios_disciplina\n })\n #print('---------------colocou a ', ultimo_nome, ultimo_codigo, ' com horarios: ', str(horarios_disciplina), ' e turma: ', turmas[contador_turmas])\n\n\n return atividades\n\n\n","sub_path":"get_data_ufrgs.py","file_name":"get_data_ufrgs.py","file_ext":"py","file_size_in_byte":4188,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"155006879","text":"from generic.configs.common.settings import *\n\nDEBUG = False\nTEMPLATE_DEBUG = True\n\nMEDIA_URL = 'http://generic.com/site_media/'\nADMIN_MEDIA_PREFIX = 'http://generic.com/site_media/grappelli/'\n\n# Database\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.postgresql_psycopg2',\n 'NAME': 'generic',\n 'USER': 'localuser',\n 'PASSWORD': 'localuser'\n }\n}\n\n# Project settings and active names\nPROJECT_SITE_DOMAIN = 'generic.com'\nPROJECT_INFO_EMAIL_ADDRESS = 'info@generic.com'\n\n\n# Caching\n#CACHE_BACKEND = 'johnny.backends.memcached://127.0.0.1:11211'\n\n# If you want to use Django Debug Toolbar, you need to list your IP address here\nINTERNAL_IPS = ('127.0.0.1',)\n\n# Email addresses\nMANAGERS = (\n ('Mauricio Mercado', 'mau@maumercado.com'),\n)\n\nDJANGO_STATIC_SAVE_PREFIX = os.path.join(MEDIA_ROOT, 'cache')\nDJANGO_STATIC_NAME_PREFIX = \"cache/\"\nDJANGO_STATIC_MEDIA_URL = MEDIA_URL\nDJANGO_STATIC_CLOSURE_COMPILER = os.path.join(MEDIA_ROOT, 'jar/compiler.jar')\nDJANGO_STATIC_YUI_COMPRESSOR = os.path.join(MEDIA_ROOT, 'jar/yuicompressor-2.4.2.jar')\n\n# We need to re-set these\nFILEBROWSER_URL_FILEBROWSER_MEDIA = MEDIA_URL + \"filebrowser/\"\nFILEBROWSER_PATH_FILEBROWSER_MEDIA = os.path.join(MEDIA_ROOT, 'filebrowser/')\nTINYMCE_JS_URL = ADMIN_MEDIA_PREFIX + \"tinymce/jscripts/tiny_mce/tiny_mce.js\"\nTINYMCE_JS_ROOT = os.path.join(MEDIA_ROOT, 'grappelli/tinymce/jscripts/tiny_mce/')\n\n# logging\nimport logging.config\nLOG_FILENAME = os.path.join(os.path.dirname(__file__), 'logging.conf')\nlogging.config.fileConfig(LOG_FILENAME)","sub_path":"generic/configs/production/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":1555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"652584584","text":"from numpy import array , sqrt\r\n\r\ndef split_sequence(sequence:list, n_steps:int):\r\n X, y = list(), list()\r\n for i in range(len(sequence)):\r\n end_ix = i + n_steps\r\n if end_ix > len(sequence)-1:\r\n break\r\n seq_x, seq_y = sequence[i:end_ix], sequence[end_ix]\r\n X.append(seq_x)\r\n y.append(seq_y)\r\n \r\n return array(X), array(y)\r\n\r\ndateset = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100]\r\nn_steps = 3\r\n\r\nx, y = split_sequence(dateset, n_steps)\r\n\r\n# x = x.reshape(x.shape[0], x.shape[1], 1)\r\n\r\nfrom sklearn.model_selection import train_test_split\r\n\r\nx_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=31, shuffle=True)\r\n\r\nfrom sklearn.preprocessing import MinMaxScaler , StandardScaler \r\n\r\nscaler = StandardScaler()\r\nscaler.fit(x_train)\r\nx_train = scaler.transform(x_train)\r\nx_test = scaler.transform(x_test)\r\n\r\nx_train = x_train.reshape(x_train.shape[0], x_train.shape[1], 1)\r\nx_test = x_test.reshape(x_test.shape[0], x_test.shape[1], 1)\r\n\r\n## LSTM model\r\n\r\nfrom keras.models import Model\r\nfrom keras.layers import Dense , Input , LSTM , Dropout , BatchNormalization\r\n\r\n_input = Input(shape=(3,1))\r\nf = LSTM(64, activation='relu')(_input)\r\nf = Dense(32)(f)\r\nf = BatchNormalization()(f)\r\nf = Dropout(0.2)(f)\r\nf = Dense(16)(f)\r\nf = Dense(8)(f)\r\nf = Dense(4)(f)\r\nf = Dense(2)(f)\r\noutput = Dense(1)(f)\r\nmodel = Model(inputs=_input, outputs=output)\r\n\r\nmodel.summary()\r\n\r\nmodel.compile(loss='mse', optimizer='adam', metrics=['mse'])\r\n\r\nfrom keras.callbacks import EarlyStopping\r\nearly_stopping = EarlyStopping(monitor='loss', patience=2000, mode='auto')\r\n\r\nmodel.fit(x_train, y_train, batch_size=1, validation_freq=0.2, callbacks=[early_stopping], epochs=20000)\r\n\r\nloss, mse = model.evaluate(x_test, y_test, batch_size=1)\r\nprint('loss: ', loss)\r\nprint('mse: ', mse)\r\n\r\nx_prd = array([[90,100,110]])\r\nx_prd = scaler.transform(x_prd)\r\nx_prd = x_prd.reshape(x_prd.shape[0], x_prd.shape[1], 1)\r\np = model.predict(x_prd, batch_size=1)\r\nprint(p)\r\n\r\n#RMSE 구하기\r\nfrom sklearn.metrics import mean_squared_error\r\ny_predict = model.predict(x_test, batch_size=1)\r\ndef RMSE(y_test, y_predict):\r\n return sqrt(mean_squared_error(y_test, y_predict))\r\nprint('RMSE :', RMSE(y_test, y_predict))\r\n\r\n#R2 구하기\r\nfrom sklearn.metrics import r2_score\r\nr2_y_predict = r2_score(y_test, y_predict)\r\nprint(f'R2: {r2_y_predict}')\r\n","sub_path":"keras22_univariate3.py","file_name":"keras22_univariate3.py","file_ext":"py","file_size_in_byte":2394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"230222725","text":"# -*- coding: utf-8 -*-\nimport pymssql\nimport flask\nimport json\nimport requests\nimport time\nimport os\nimport datetime\n\n\napp = flask.Flask(__name__)\napplication = app\n\nconn = pymssql.connect(host='172.168.10.130',user= 'bcmpadmin',password ='WelcomeBcmp', database = 'ecology')\ncursor = conn.cursor()\n# sql = \"select * from zbcmp_pub_model_upload \"\n# # cursor.execute(sql)\n# # res = cursor.fetchall()\n# #\n# # print(res)\n\n\n\n\n\n\n# 接受前端文件并放至服务器\n@app.route(\"/bcmp/services/v1/fg/object/upload//\",methods=[\"POST\"])\ndef photo(projectId,fileName):\n\n file = flask.request.files.get(\"file\")\n now_time = datetime.datetime.now().strftime('%Y%m%d%H%M%S')\n name = file.filename\n name = name.split(\".\")\n name_1 = str(projectId)+ str(now_time)\n name = name_1+'.'+name[1]\n path = \"d:/bimfile/%s\"%projectId\n file_path = path + name\n file.save(file_path)\n location = file_path\n sql = \"insert into zbcmp_pub_model_upload (file_path,file_name) values(%s,%s)\"%(\"'\"+str(location)+\"'\",\"'\"+fileName+\"'\")\n cursor.execute(sql)\n conn.commit()\n return json.dumps({\"StatusCode\":\"200\"})\n\n\n\nif __name__ == '__main__':\n app.run(host=\"0.0.0.0\", debug=True, port=5000)\n","sub_path":"untitled/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1223,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"582597104","text":"#/usr/bin/python\n#encoding=utf-8\n\nfrom collections import Counter\nimport re\n\nlabel_file = './timit/train_label.txt'\nchar_file = './timit/char_list.txt'\n\nchar_list = []\nf = open(label_file, 'r')\nfor label in f.readlines():\n label = label.strip()\n utt, label = label.split('\\t', 1)\n label = label.lower()\n char_list += [ label[i] for i in range(len(label))]\nf.close()\nchar_list = list(set(char_list))\nf = open(char_file, 'w')\ncount = 1\nfor x in char_list:\n if re.search('[a-z\\']', x) != None:\n f.write(x+' '+str(count)+'\\n')\n count += 1\nf.write(\"SPACE \"+str(count)+\"\\n\")\nf.close()\n","sub_path":"timit/data_prepare/create_characters_list.py","file_name":"create_characters_list.py","file_ext":"py","file_size_in_byte":608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"480415729","text":"# System constants used across BBE\n\n# General\nNUM_OF_SIMS = 1\nNUM_OF_COMPETITORS = 2\nNUM_OF_EXCHANGES = 1\nPRE_RACE_BETTING_PERIOD_LENGTH = 0\nIN_PLAY_CUT_OFF_PERIOD = 0\nSESSION_SPEED_MULTIPLIER = 1\n\n# Data Store Attributes\nRACE_DATA_FILENAME = 'data/race_event_core.csv'\n\n# Message Protocol Numbers\nEXCHANGE_UPDATE_MSG_NUM = 1\nRACE_UPDATE_MSG_NUM = 2\n\n# Exchange Attributes\nMIN_ODDS = 1.1\nMAX_ODDS = 20.00\n\n# Print-Outs\nTBBE_VERBOSE = False\nSIM_VERBOSE = False\nEXCHANGE_VERBOSE = False\n\n# Event Attributes\n# average horse races are between 5 and 12 (1005 - 2414) furlongs or could go min - max (400 - 4000)\nRACE_LENGTH = 500\nMIN_RACE_LENGTH = 400\nMAX_RACE_LENGTH = 4000\n\nMIN_RACE_UNDULATION = 0\nMAX_RACE_UNDULATION = 100\n\nMIN_RACE_TEMPERATURE = 0\nMAX_RACE_TEMPERATUE = 50\n\n# Betting Agent Attributes\nNUM_EX_ANTE_SIMS = 5\nNUM_IN_PLAY_SIMS = 5\n\n\n\n#OD models\n\nMODEL_NAME = 'BC'\nOPINION_COMPETITOR = 1 # Bettors will be expressing opinions about this competitor. Opinions are in the range of [0,1].\n\nMAX_OP = 1\nMIN_OP = 0\n\n# intensity of interactions\nmu = 0.2 # used for all models eg. 0.2\ndelta = 0.25 # used for Bounded Confidence Model eg. 0.1\nlmda = 0.5 # used for Relative Disagreement Model eg. 0.1\n\n\n","sub_path":"Application/system_constants.py","file_name":"system_constants.py","file_ext":"py","file_size_in_byte":1202,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"40573412","text":"import vampytest\n\nfrom ...guild_widget_user import GuildWidgetUser\n\nfrom ..fields import put_users_into\n\n\ndef test__put_users_into():\n \"\"\"\n Tests whether ``put_users_into`` works as intended.\n \"\"\"\n user_id_0 = 10\n user_name_0 = 'Far'\n \n user_id_1 = 11\n user_name_1 = 'East'\n \n user_0 = GuildWidgetUser(\n user_id = user_id_0,\n name = user_name_0,\n )\n \n user_1 = GuildWidgetUser(\n user_id = user_id_1,\n name = user_name_1,\n )\n \n for input_value, defaults, expected_output in (\n (None, False, {'members': []}),\n (None, True, {'members': []}),\n (\n (user_0, user_1),\n False,\n {\n 'members': [\n user_0.to_data(defaults = False),\n user_1.to_data(defaults = False),\n ],\n },\n ),\n (\n (user_0, user_1),\n True,\n {\n 'members': [\n user_0.to_data(defaults = True),\n user_1.to_data(defaults = True),\n ],\n },\n ),\n ):\n output = put_users_into(input_value, {}, defaults)\n vampytest.assert_eq(output, expected_output)\n","sub_path":"hata/discord/guild/guild_widget/tests/test__put_users_into.py","file_name":"test__put_users_into.py","file_ext":"py","file_size_in_byte":1256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"250234299","text":"# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this file,\n# You can obtain one at http://mozilla.org/MPL/2.0/.\n\"\"\" BSO object -- used for (de)serialization\n\"\"\"\n\nimport re\n\nFIELDS = set(('id', 'collection', 'sortindex', 'version', 'timestamp',\n 'payload', 'payload_size', 'ttl'))\n\nFIELD_DEFAULTS = {\n \"payload\": \"\",\n \"sortindex\": None,\n \"ttl\": None,\n}\n\nMAX_TTL = 31536000\nMAX_ID_SIZE = 64\nMAX_PAYLOAD_SIZE = 256 * 1024\nMAX_SORTINDEX_VALUE = 999999999\nMIN_SORTINDEX_VALUE = 0\nVALID_ID_REGEX = re.compile(\"^[a-zA-Z0-9_-]+$\")\n\n\nclass BSO(dict):\n \"\"\"Holds BSO info\"\"\"\n\n def __init__(self, data=None, converters=None):\n if data is None:\n data = {}\n if converters is None:\n converters = {}\n\n try:\n data_items = data.items()\n except AttributeError:\n msg = \"BSO data must be dict-like, not %s\"\n raise ValueError(msg % (type(data),))\n\n for name, value in data_items:\n if value is not None:\n if not isinstance(value, (int, long, float, basestring)):\n msg = \"BSO fields must be scalar values, not %s\"\n raise ValueError(msg % (type(value),))\n if name in converters:\n value = converters[name](value)\n if value is None:\n continue\n\n self[name] = value\n\n def validate(self):\n \"\"\"Validates the values the BSO has.\"\"\"\n # Check that there are no extraneous fields.\n for name in self:\n if name not in FIELDS:\n return False, 'unknown field %r' % (name,)\n\n # Check that id field is well-formed.\n if 'id' in self:\n value = self['id']\n # Check that it's base64url-compliant.\n # Doing the regex match first has the nice side-effect of\n # erroring out if the value is not a string or unicode object.\n # This avoids accidentally coercing other types to a string.\n try:\n if not VALID_ID_REGEX.match(value):\n return False, 'invalid id'\n except TypeError:\n return False, 'invalid id'\n # Make sure it's stored as a bytestring, not a unicode object.\n # This won't fail because we've checked for valid chars above.\n value = str(self['id'])\n if len(value) > MAX_ID_SIZE:\n return False, 'invalid id'\n self['id'] = value\n\n # Check that the ttl is a positive int, and less than one year.\n if 'ttl' in self:\n try:\n ttl = int(self['ttl'])\n except ValueError:\n return False, 'invalid ttl'\n if ttl < 0 or ttl > MAX_TTL:\n return False, 'invalid ttl'\n self['ttl'] = ttl\n\n # Check that the sortindex is a valid positive integer.\n # Convert from other types as necessary.\n if 'sortindex' in self:\n try:\n self['sortindex'] = int(self['sortindex'])\n except ValueError:\n return False, 'invalid sortindex'\n if self['sortindex'] > MAX_SORTINDEX_VALUE:\n return False, 'invalid sortindex'\n if self['sortindex'] < MIN_SORTINDEX_VALUE:\n return False, 'invalid sortindex'\n\n # Check that the payload is a string, and is not too big.\n payload = self.get('payload')\n if payload is not None:\n if not isinstance(payload, basestring):\n return False, 'payload not a string'\n if len(payload.encode(\"utf8\")) > MAX_PAYLOAD_SIZE:\n return False, 'payload too large'\n\n return True, None\n","sub_path":"syncstorage/bso.py","file_name":"bso.py","file_ext":"py","file_size_in_byte":3819,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"598520756","text":"\"\"\" Write a Program DeckOfCards.java, to initialize deck of cards having suit\n(\"Clubs\", \"Diamonds\", \"Hearts\", \"Spades\") & Rank (\"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\", \"10\",\n\"Jack\", \"Queen\", \"King\", \"Ace\").\n\nShuffle the cards using Random method and then distribute 9 Cards to 4 Players and\nPrint the Cards the received by the 4 Players using 2D Array…\n\"\"\"\n\n\nimport random\nimport itertools\n\n# rank = ['2', '3', '4', '5', '6', '7', '8', '9', '10', 'king', 'queen', 'jack', 'ace']\n\n# suit = ['clubs', 'diamonds', 'hearts', 'spades']\n\nclass Cards:\n def dack_of_cards(self):\n dack_of_card= list(\n itertools.product(['2','3','4','5','6','7','8','9','king','queen','jack','ace'],\n ['clubs','diamond','hearts','spades']))\n print(dack_of_card)\n for number_of_player in range(1, 5): # This loop shuffle cards to 4 players\n random.shuffle(dack_of_card) # \"random.shuffle\" is used to shuffle the card\n print(\"\\n\")\n print(\"player:\",number_of_player)\n # This loop is to distribute 9 cards to the players\n for number_of_cards in range(1, 10):\n print(number_of_cards, dack_of_card[number_of_cards][0],\n \"of\", dack_of_card[number_of_cards][1])\ncards_obj = Cards()\nif __name__ == '__main__':\n cards_obj.dack_of_cards()\n\n\n","sub_path":"ObjectOrientedProgramming/DackOfCardsShuffel_9.py","file_name":"DackOfCardsShuffel_9.py","file_ext":"py","file_size_in_byte":1359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"219094943","text":"import os\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"1\"\nos.environ[\"TF_CPP_MIN_LOG_LEVEL\"] = \"2\"\nimport numpy as np\nimport sys\nimport keras\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Conv2D, Flatten, MaxPooling2D\nimport pickle\nfrom keras.optimizers import Adam\nimport os\nimport random\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import confusion_matrix\nimport time\nimport tensorflow as tf\nfrom sklearn.metrics import accuracy_score, confusion_matrix, classification_report\nx = np.ones((1, 2, 3))\na = np.transpose(x, (1, 0, 2))\nimport heapq\n\n# filename = '/home/sxz/data/geolife_Data/Origin_data_Cross.pickle'\n# with open(filename, 'rb') as f:\n# Train_X1, Train_Y1,Test_X1, Test_Y1, Test_Y_ori1 = pickle.load(f)\nfilename = '/home/sxz/data/geolife_Data/My_data_for_DL_kfold_dataset_RL.pickle'\nwith open(filename, 'rb') as f:\n kfold_dataset1, unlabel = pickle.load(f)\n\nprint(np.shape(unlabel))\nrandom_sample = np.random.choice(len(unlabel), size=int(0.1*len(unlabel)), replace=True, p=None)\nunlabel = unlabel[random_sample]\n# sys.exit(0)\n\nfilename = '/home/sxz/data/geolife_Data/My_data_for_DL_kfold_dataset_RL.pickle'\nwith open(filename, 'rb') as f:\n kfold_dataset, unlabel = pickle.load(f)\n\nfilename = '/home/sxz/data/geolife_Data/pseudo_data4.pickle'\nwith open(filename, 'rb') as f:\n Train_Xp, Train_Yp = pickle.load(f)\n# print(kfold_dataset[0][1])\n# print(len(kfold_dataset[0][1][kfold_dataset[0][1]==0]))\n# print(len(kfold_dataset[0][1][kfold_dataset[0][1]==1]))\n# print(len(kfold_dataset[0][1][kfold_dataset[0][1]==2]))\n# print(len(kfold_dataset[0][1][kfold_dataset[0][1]==3]))\n# print(len(kfold_dataset[0][1][kfold_dataset[0][1]==4]))\n# sys.exit(0)\ntimes = 1\nacc_all = 0\nacc_w_all = 0\nfor T in range(times):\n\n for i in range(len(kfold_dataset)):\n tf.Session(config=tf.ConfigProto(allow_soft_placement=True, log_device_placement=True))\n # sess = print(tf.Session(config=tf.ConfigProto(log_device_placement=True)))\n start_time = time.clock()\n np.random.seed(7)\n random.seed(7)\n\n\n # Training and test set for GPS segments\n prop = 12000/14000\n random.seed(7)\n np.random.seed(7)\n tf.set_random_seed(7)\n # index = np.arange(len(Train_X))\n # np.random.shuffle(index)\n # Train_X = Train_X[index[:round(prop*len(Train_X))]]\n # Train_Y = Train_Y[index[:round(prop*len(Train_Y))]]\n # #Train_X_Comb = np.vstack((Train_X, Train_X_Unlabel))\n # random.shuffle(Train_X_Comb)\n\n ensemble_num = 1\n NoClass = 5\n Threshold = 31\n\n\n\n model_all = []\n for i1 in range(ensemble_num):\n # Model and Compile\n model = Sequential()\n activ = 'relu'\n model.add(Conv2D(32, (1, 3), strides=(1, 1), padding='same', activation=activ, input_shape=(1, 248, 4)))\n A = model.output_shape\n # print(A)\n model.add(Conv2D(32, (1, 3), strides=(1, 1), padding='same', activation=activ))\n A = model.output_shape\n # print(A)\n model.add(MaxPooling2D(pool_size=(1, 2)))\n A = model.output_shape\n # print(A)\n model.add(Conv2D(64, (1, 3), strides=(1, 1), padding='same', activation=activ))\n A = model.output_shape\n # print(A)\n model.add(Conv2D(64, (1, 3), strides=(1, 1), padding='same', activation=activ))\n A = model.output_shape\n # print(A)\n model.add(MaxPooling2D(pool_size=(1, 2)))\n A = model.output_shape\n # print(A)\n model.add(Conv2D(128, (1, 3), strides=(1, 1), padding='same', activation=activ))\n A = model.output_shape\n # print(A)\n model.add(Conv2D(128, (1, 3), strides=(1, 1), padding='same', activation=activ))\n A = model.output_shape\n # print(A)\n model.add(MaxPooling2D(pool_size=(1, 2)))\n A = model.output_shape\n # print(A)\n model.add(Dropout(.5))\n A = model.output_shape\n # print(A)\n model.add(Flatten())\n A = model.output_shape\n model.add(Dense(int(A[1] * 1/4.), activation=activ))\n A = model.output_shape\n model.add(Dropout(.5))\n A = model.output_shape\n model.add(Dense(NoClass, activation='softmax'))\n A = model.output_shape\n model_all.append(model)\n print(model_all)\n acc = 0\n acc_w =0\n optimizer = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)\n Train_X = Train_Xp\n\n random_sample = np.random.choice(len(Train_X), size=int(prop*len(Train_X)), replace=True, p=None)\n Train_X = Train_X[random_sample]\n ori = Train_Yp\n\n\n\n Train_Y = np.zeros([len(Train_Yp) , 5])\n \n for k in range(len(Train_Yp)):\n Train_Y[k][ori[k]] = 1\n Train_Y = Train_Y[random_sample]\n ori = ori[random_sample]\n\n # # 以下是只抽5个样本出来训练的结果\n # index = np.zeros((5,),dtype = int)\n # for i in range(5):\n # print(i)\n # print(np.where( ori == i )[0])\n # index[i] = np.where( ori == i)[0][0]\n # print(index)\n # Train_X = Train_X[index]\n # Train_Y = Train_Y[index]\n \n # Train_X_tmp = Train_X\n # Train_Y_tmp = ori[index]\n\n # print(Train_Y)\n # print(np.where(Train_Y==[1,0,0,0,0] ))\n # print(Train_Y[k][2])\n # print(Train_Y[k][3])\n # print(Train_Y[k][4])\n # sys.exit(0)\n Test_X = kfold_dataset1[i][2]\n Test_Y = kfold_dataset1[i][3]\n Test_Y_ori = kfold_dataset1[i][4]\n\n print(np.shape(Train_Y))\n print(np.shape(Train_X))\n # sys.exit(0)\n \n \n y_pred_all = np.zeros((ensemble_num,len(Test_X)))\n\n for i2 in range(ensemble_num):\n model_all[i2].compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy'])\n offline_history = model_all[i2].fit(Train_X, Train_Y, epochs=50, batch_size=512, shuffle=False,\n validation_data=(Test_X, Test_Y))\n hist = offline_history\n print('Val_accuracy', hist.history['val_acc'])\n print('optimal Epoch: ', np.argmax(hist.history['val_acc']))\n # Saving the test and training score for varying number of epochs.\n with open('Revised_accuracy_history_largeEpoch_NoSmoothing.pickle', 'wb') as f:\n pickle.dump([hist.epoch, hist.history['acc'], hist.history['val_acc']], f)\n\n A = np.argmax(hist.history['val_acc'])\n print('the optimal epoch size: {}, the value of high accuracy {}'.format(hist.epoch[A], np.max(hist.history['val_acc'])))\n\n r = model_all[i2].predict(unlabel,batch_size=1000)\n print(r)\n print(np.argmax(r,axis=1))\n mark = np.argmax(r,axis=1)\n\n _0index = np.where(mark == 0)[0]\n print(np.shape(mark))\n print(np.shape(_0index))\n print(_0index)\n print(r[_0index][:,0])\n _0array = r[_0index][:,0].tolist()\n max_num_index_0 = map(_0array.index, heapq.nlargest(20,_0array))\n temp = list(max_num_index_0)\n print(np.shape(_0index))\n print(temp)\n print(_0index[temp])\n #_0index[temp]���是置信度最高的指定个数的unlabel的点\n\n u0data = unlabel[_0index[temp]]\n\n print(np.shape(u0data))\n\n _1index = np.where(mark == 1)[0]\n _1array = r[_1index][:,1].tolist()\n max_num_index_1 = map(_1array.index, heapq.nlargest(20,_1array))\n temp = list(max_num_index_1)\n u1data = unlabel[_1index[temp]]\n print(np.shape(u1data))\n\n\n _2index = np.where(mark == 2)[0]\n _2array = r[_2index][:,2].tolist()\n max_num_index_2 = map(_2array.index, heapq.nlargest(20,_2array))\n temp = list(max_num_index_2)\n u2data = unlabel[_2index[temp]]\n print(np.shape(u2data))\n\n _3index = np.where(mark == 3)[0]\n _3array = r[_3index][:,3].tolist()\n max_num_index_3 = map(_3array.index, heapq.nlargest(20,_3array))\n temp = list(max_num_index_3)\n u3data = unlabel[_3index[temp]]\n print(np.shape(u3data))\n\n\n _4index = np.where(mark == 4)[0]\n _4array = r[_4index][:,4].tolist()\n max_num_index_4 = map(_4array.index, heapq.nlargest(20,_4array))\n temp = list(max_num_index_4)\n u4data = unlabel[_4index[temp]]\n print(np.shape(u4data))\n\n unlabel_t = []\n unlabel_t = np.vstack((u0data,u1data))\n unlabel_t = np.vstack((unlabel_t,u2data))\n unlabel_t = np.vstack((unlabel_t,u3data))\n unlabel_t = np.vstack((unlabel_t,u4data))\n unlabel_Y = np.zeros((100,),dtype = int)\n unlabel_Y[:20] = 0\n unlabel_Y[20:40] = 1\n unlabel_Y[40:60] = 2\n unlabel_Y[60:80] = 3\n unlabel_Y[80:100] = 4\n # unlabel_t = np.vstack((unlabel_t, Train_X_tmp))\n # print(unlabel_Y)\n # unlabel_Y = np.hstack((unlabel_Y,Train_Y_tmp))\n # print(unlabel_Y)\n # print(np.shape(unlabel_t))\n # print(np.shape(unlabel_Y))\n # print(unlabel_Y[4999])\n # sys.exit(0)\n # _1index = np.where(mark == 1)\n # _2index = np.where(mark == 2)\n # _3index = np.where(mark == 3)\n # _4index = np.where(mark == 4)\n # max_1000_0 = map(_0index.index,heapq.nlargest(1000,))\n # print(len(np.argmax(r,axis=1)[np.argmax(r,axis=1) == 0]))\n # print(len(np.argmax(r,axis=1)[np.argmax(r,axis=1) == 1]))\n # print(len(np.argmax(r,axis=1)[np.argmax(r,axis=1) == 2]))\n # print(len(np.argmax(r,axis=1)[np.argmax(r,axis=1) == 3]))\n # print(len(np.argmax(r,axis=1)[np.argmax(r,axis=1) == 4]))\n # with open('/home/sxz/data/geolife_Data/pseudo_data4.pickle', 'wb') as f:\n # pickle.dump([unlabel_t, unlabel_Y], f)\n # pseudo_data3是真正的纯粹伪标签\n # 每一类选择置信度最高的那一个点\n # sys.exit(0)\n\n # Calculating the test accuracy, precision, recall\n y_pred_all[i2] = np.argmax(model_all[i2].predict(Test_X, batch_size=100), axis=1)\n print('Test Accuracy %: ', accuracy_score(Test_Y_ori, y_pred_all[i2]))\n print('\\n')\n print('Confusin matrix: ', confusion_matrix(Test_Y_ori, y_pred_all[i2]))\n print('\\n')\n sys.exit(0)\n\n\n\n # print(y_pred_all)\n y_pred_ens = np.zeros((len(Test_Y_ori),5))\n for jj in range(ensemble_num):\n for jji in range(len(Test_X)):\n y_pred_ens[jji][int(y_pred_all[jj][jji])] += 1\n # print(y_pred_ens)\n y_pred_final = np.argmax(y_pred_ens, axis = 1)\n print(Test_Y_ori)\n\n print('Test Accuracy %: ', accuracy_score(Test_Y_ori, y_pred_final))\n print('\\n')\n print('Confusin matrix: ', confusion_matrix(Test_Y_ori, y_pred_final))\n print('\\n')\n sys.exit(0)\n print(classification_report(Test_Y_ori, y_pred_final, digits=3))\n report = classification_report(Test_Y_ori, y_pred_final, digits=3)\n report = report.splitlines()\n res = []\n res.append(['']+report[0].split())\n for row in report[2:-3]:\n res.append(row.split())\n lr = report[-1].split()\n res.append([' '.join(lr[:3])]+lr[3:])\n acc_w += float(res[7][0].split(' ')[2])\n acc_w_all += acc_w\n for ii in range(5):\n acc += float(res[ii+1][1])\n print(acc)\n acc_all += acc/5\nfin = acc_all/(times*5)\nfin_w = acc_w_all/(times*5)\nprint(fin)\nprint(fin_w)","sub_path":"kfold_final.py","file_name":"kfold_final.py","file_ext":"py","file_size_in_byte":12102,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"335066270","text":"\"\"\"Functions that help create the mastermaps and other folium maps\"\"\"\nimport numpy as np\n\n# Import graphing related helpers\nimport matplotlib.pyplot as plt\nimport folium\nfrom folium.plugins import MarkerCluster, FastMarkerCluster, HeatMap, HeatMapWithTime\nimport branca.colormap as cm\n\n# Import other submodules\nfrom .. import utilities\nfrom .BindColorMap import BindColormap\n\n\n# To do\n# 1. Add legend functionality for categorical choropleth, somehow.\n\n\n# Marker Cluster processing function\ndef make_marker_cluster(gdf, make_centroids = True, points_column = 'geometry', fast = False, name = None, show = False,\n basemap = None, **kwargs):\n \"\"\"\n Makes a marker cluster from a gdf and potentially adds it to a map/feature group.\n\n :param GeoDataFrame gdf: A geodataframe.\n :param bool make_centroids: If true and the geodataframe has polygon geometry, it will make the centroids and use those\n to make the marker cluster.\n :param str or int points_column: If make_centroids is False, will assume the pointa are in this column.\n Defaults to 'geometry'.\n :param bool fast: If True, use a FastMarkerCluster as opposed to a regular MarkerCluster.\n :param str name: Defaults to None. If not None, will generate a FeatureGroup with this name and return that instead of\n the MarkerCluster object.\n :param bool show: Defaults to False. The show parameter for the FeatureGroup that the marker cluster will be added\n to.\n :param folium.Map basemap: Defaults to None. If not none, will add the MarkerCluster or FeatureGroup to the supplied basemap.\n :param kwargs: kwargs to pass to the FastMarkerCluster or MarkerCluster initialization\n :return: Either a FeatureGroup or a MarkerCluster.\n \"\"\"\n\n\n # Possibly make centroids\n if make_centroids:\n gdf['centroids'] = gdf['geometry'].centroid\n points_column = 'centroids'\n\n # Retrieve points and make markercluster\n points = [utilities.simple.retrieve_coords(point) for point in gdf[points_column]]\n\n if fast:\n marker = FastMarkerCluster(points, **kwargs)\n else:\n marker = MarkerCluster(points, **kwargs)\n\n # Possibly create featuregroup and possibly add to basemap; then return\n if name is not None:\n feature_group = folium.FeatureGroup(name, show = show)\n marker.add_to(feature_group)\n if basemap is not None:\n feature_group.add_to(basemap)\n return feature_group\n else:\n if basemap is not None:\n marker.add_to(basemap)\n return marker\n\ndef polygon_layer(gdf, color = 'blue', weight = 1, alpha = 0.6, name = None, show = False, basemap = None):\n \"\"\"\n\n :param gdf:\n :param factor:\n :param color:\n :param weight:\n :param alpha:\n :param str name: Defaults to None. If not None, will generate a FeatureGroup with this name and return that instead of\n the GeoJson object.\n :param bool show: Defaults to False. The show parameter for the FeatureGroup that the GeoJson will be added to.\n :param folium.Map basemap: Defaults to None. If not none, will add the GeoJson or FeatureGroup to the supplied basemap.\n :return:\n \"\"\"\n\n gjson = folium.GeoJson(\n gdf,\n style_function=lambda feature: {\n 'fillColor': color,\n 'color': color,\n 'weight': weight,\n 'fillOpacity': alpha,\n }\n )\n\n if name is not None:\n feature_group = folium.FeatureGroup(name, show = show)\n gjson.add_to(feature_group)\n if basemap is not None:\n feature_group.add_to(basemap)\n return feature_group\n else:\n if basemap is not None:\n gjson.add_to(basemap)\n return gjson\n\n\ndef categorical_choropleth(gdf, factor, colors = None, quietly = False, weight = 1, alpha = 0.6,\n geometry_column = 'geometry', name = None, show = False, basemap = None):\n \"\"\"\n Creates categorical choropleth using tab10 spectrum\n\n\n :param gdf: A geopandas geodataframe.\n :param factor: The feature you want to plot (should be categorical).\n :param colors: Colors to use in the categorical plot. If None, will generate colors using the tab10 colormap.\n :param quietly: If true, will not print anything. Defaults to False.\n :param weight: The weight in the style function. Defaults to 1.\n :param alpha: The alpha in the style function. Defaults to 0.6.\n :param geometry_column: The geometry column of the gdf. Defaults to 'geometry'.\n :param str name: Defaults to None. If not None, will generate a FeatureGroup with this name and return that instead of\n the GeoJson object.\n :param bool show: Defaults to False. The show parameter for the FeatureGroup that the GeoJson will be added to.\n :param folium.Map basemap: Defaults to None. If not none, will add the GeoJson or FeatureGroup to the supplied basemap.\n :return: A folium geojson or featuregroup.\n \"\"\"\n\n values = gdf[factor].unique()\n\n # Get colors\n if colors is None:\n colors = plt.cm.tab10(np.linspace(0, 1, len(values)))\n colors = [utilities.simple.convert_to_hex(color) for color in colors]\n elif len(colors) < len(values):\n raise IndexError('In categorical_choropleth call, the \"colors\" input has fewer colors than the data has unique values')\n\n # Get colordic and apply to data\n colordic = {value: color for value, color in zip(values, colors)}\n gdf['color'] = gdf[factor].map(colordic)\n if not quietly:\n print('Legend functionality is not available in categorical choropleths yet, so instead we print the colordic')\n print('Here it is: {}'.format(colordic))\n\n # Transform data, as always\n gdf = gdf.to_crs({'init': 'epsg:4326'})\n\n gdf = gdf[[factor, geometry_column, 'color']]\n\n gjson = folium.GeoJson(\n gdf,\n style_function=lambda feature: {\n 'fillColor': feature['properties']['color'],\n 'color': feature['properties']['color'],\n 'weight': weight,\n 'fillOpacity': alpha,\n }\n )\n\n # Possibly create featuregroup and possibly add to basemap; then return\n if name is not None:\n feature_group = folium.FeatureGroup(name, show = show)\n gjson.add_to(feature_group)\n if basemap is not None:\n feature_group.add_to(basemap)\n return feature_group\n else:\n if basemap is not None:\n gjson.add_to(basemap)\n return gjson\n\ndef continuous_choropleth(gdf, factor, layer_name, scale_name = None, weight = 1, alpha = 0.6,\n colors = ['blue', 'green', 'yellow', 'orange', 'red'],\n quants = [1/6, 2/6, 3/6, 4/6, 5/6],\n method = 'log', round_method = None,\n show = False, geometry_column = 'geometry', basemap = None):\n \"\"\"\n :param gdf: Geodataframe\n :param factor: factor for analysis\n :param layer_name: Name of feature group layer\n :param scale_name: Name of scale\n :param weight: Weight\n :param alpha: Alpha of polygons\n :param colors: A list of colors to use in the colormap, defaults to ['blue', 'green', 'yellow', 'orange', 'red'].\n :param quants: The quantiles to use to 'switch' colors in the colormap. Defaults to [1/6, 2/6, 3/6, 4/6, 5/6].\n If you want a log-based or linear colorscale, adjust the 'method' parameter and set quants to None.\n :param method: The method by which the color scale is generated. Defaults to 'log', can also be 'quant' or 'linear'.\n This parameter is overridden by the \"quantiles\" parameter.\n :param round_method: If you want to round the color scale to integer values, supply round_method = 'int'\n :param show: Show by default on start\n :param geometry_column: 'geometry'\n :param basemap: If not None, will add the colormap and a scale (bound together) to the baesmap as a layer.\n :return: GeoJson, Colormap\n \"\"\"\n\n # Get rid of nas\n gdf = gdf.loc[(gdf[factor].notnull()) & (gdf[geometry_column].notnull())]\n\n # Create colormap with caption\n min_data = gdf[factor].min()\n max_data = gdf[factor].max()\n if quants is not None:\n index = gdf[factor].quantile(quants)\n if len(colors) != len(index):\n raise IndexError('index and colors must be same length')\n colormap = cm.LinearColormap(colors = colors, vmin = min_data, vmax = max_data, index = index)\n else:\n colormap = cm.LinearColormap(colors = colors, vmin = min_data, vmax = max_data).to_step(12, method = method, round_method = round_method)\n\n\n if scale_name is None:\n colormap.caption = layer_name\n else:\n colormap.caption = scale_name\n\n # Create gjson\n gdf = gdf[[factor, geometry_column]]\n gjson = folium.GeoJson(\n gdf,\n show = show,\n name = layer_name,\n style_function = lambda feature: {\n 'fillColor': colormap(feature['properties'][factor]),\n 'color': colormap(feature['properties'][factor]),\n 'weight': weight,\n 'alpha': alpha,\n }\n )\n\n # This is for backwards compatability but always do this, it saves time\n if basemap is not None:\n colormap.add_to(basemap)\n gjson.add_to(basemap)\n BindColormap(gjson, colormap).add_to(basemap)\n\n return gjson, colormap\n\n\ndef heatmap(gdf, geometry_column = 'geometry', with_time = False, time_column = 'Year', name = None, show = False, basemap = None, **kwargs):\n \"\"\"\n Create a heatmap or a heatmap with time from a geodataframe of points.\n\n :param gdf: Geodataframe with points as the geometry type.\n :param geometry_column: The geometry column of the gdf. Defaults to 'geometry'\n :param start_color: The start color, defaults to 'white'\n :param end_color: The end color, defaults to the MI blue\n :param with_time: If true, plot a heat map with time, not just a heat map.\n :param time_column: The column used to specify the years of the data, defaults to 'Year'\n :param str name: Defaults to None. If not None, will generate a FeatureGroup with this name and return that instead of\n the GeoJson object.\n :param bool show: Defaults to False. The show parameter for the FeatureGroup that the GeoJson will be added to.\n :param folium.Map basemap: Defaults to None. If not none, will add the GeoJson or FeatureGroup to the supplied basemap.\n :param **kwargs: kwargs to be passed onto the 'heatmap' or 'heatmapwithtime' folium constructors.\n :return: HeatMap object or FeatureGroup\n \"\"\"\n\n if with_time:\n all_points = []\n time_periods = sorted(gdf[time_column].unique().tolist())\n for time_period in time_periods:\n points = gdf.loc[gdf[time_column] == time_period, geometry_column]\n points = [utilities.simple.retrieve_coords(point) for point in points]\n all_points.append(points)\n result = HeatMapWithTime(all_points, index = time_periods, **kwargs)\n\n else:\n points = [utilities.simple.retrieve_coords(point) for point in gdf[geometry_column]]\n result = HeatMap(points, **kwargs)\n\n\n if name is not None:\n feature_group = folium.FeatureGroup(name, show = show)\n result.add_to(feature_group)\n if basemap is not None:\n feature_group.add_to(basemap)\n return feature_group\n else:\n if basemap is not None:\n result.add_to(basemap)\n return result","sub_path":"TXHousing/analysis/choropleth.py","file_name":"choropleth.py","file_ext":"py","file_size_in_byte":11639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"43107336","text":"from collections import deque\nimport sys\ninput = lambda : sys.stdin.readline().strip()\n\n\ndef bfs(x, y):\n queue = deque([(x, y)])\n visited[x][y] = 1\n while queue:\n cx, cy = queue.popleft()\n for dx, dy in [(1, 0), (-1, 0), (0, 1), (0, -1)]:\n nx, ny = cx + dx, cy + dy\n if 0 > nx or nx > N-1 or ny < 0 or ny > M-1:\n continue\n if visited[nx][ny] == 0 and graph[nx][ny] == 1:\n graph[nx][ny] = graph[cx][cy] + 1\n visited[nx][ny] = 1\n queue.append((nx, ny))\n return graph[N-1][M-1]\n\n\nN, M = map(int, input().split())\ngraph = [[0] * M for _ in range(N)]\nvisited = [[0] * M for _ in range(N)]\nfor i in range(N):\n graph[i] = list(map(int, input()))\n\nprint(bfs(0, 0))\n","sub_path":"Python/BOJ/Silver/2178.py","file_name":"2178.py","file_ext":"py","file_size_in_byte":779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"197743317","text":"import csv\nfn = 'otu_table.csv'\nshared = '16S_shared_percent.csv'\n\nwith open(fn, 'rU') as f:\n rows = []\n for row in csv.reader(f):\n rows.append(row)\n\notus, ids = [], []\nfor a in range(len(rows[1])):\n if a > 0:\n otu = rows[1][a]\n for b in range(len(rows)):\n if rows[b][a] == '1':\n this_id = rows[b][0]\n otus.append(otu)\n ids.append(this_id)\n\nwith open('IDs.csv', 'w') as f:\n writer = csv.writer(f)\n writer.writerow(['OTU', 'ID'])\n for a in range(len(otus)):\n writing = [otus[a], ids[a]]\n writer.writerow(writing)\n\nwith open(shared, 'rU') as f:\n rows = []\n for row in csv.reader(f):\n rows.append(row)\nrow0 = rows[0]\ndel rows[0]\n\nnew_id, rest_of_row = [], []\nfor a in range(len(rows)):\n otu = rows[a][0]\n otui = int(rows[a][0][3:])\n for b in range(len(otus)):\n if otui == int(otus[b][3:]):\n new_id.append(ids[b])\n rest_of_row.append(rows[a][1:])\nrow0[0] = '#OTU ID'\n\nunique_ids = []\nfor i in new_id:\n adding = True\n for u in unique_ids:\n if i == u:\n adding = False\n if adding == True:\n unique_ids.append(i)\n\nall_rows = []\nfor u in unique_ids:\n all_rows.append([])\n\nfor u in range(len(unique_ids)):\n for a in range(len(new_id)):\n if unique_ids[u] == new_id[a]:\n all_rows[u].append(rest_of_row[a])\n\nfor b in range(len(all_rows)):\n if len(all_rows[b]) > 1:\n new_row = []\n for c in range(len(all_rows[b])):\n for d in range(len(all_rows[b][c])):\n all_rows[b][c][d] = float(all_rows[b][c][d])\n for e in range(len(all_rows[b][0])):\n for f in range(len(all_rows[b])):\n all_rows[b][0][e] += all_rows[b][f][e]\n all_rows[b] = [all_rows[b][0]]\n\nnew_id, rest_of_row = unique_ids, all_rows\n\nnew_r0 = ''\nfor c in range(len(row0)):\n new_r0 += row0[c]+'\\t'\nnew_r0 += '\\n'\n \nfor_text = [new_r0]\nfor a in range(len(new_id)):\n this_row = new_id[a]+'\\t'\n for b in range(len(rest_of_row[a])):\n for c in range(len(rest_of_row[a][b])):\n this_row += str(rest_of_row[a][b][c])+'\\t'\n this_row += '\\n'\n for_text.append(this_row)\n\nf = open('new_otu_file_percent.txt', 'w')\nfor t in for_text:\n t = str(t)\n f.write(t)\nf.close()\n\n\n","sub_path":"CommunityAnalysis/PICRUSt/all_PICRUSt/PICRUSt/get_ids.py","file_name":"get_ids.py","file_ext":"py","file_size_in_byte":2323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"300531788","text":"import ERBasic\r\nimport ERElemental\r\nimport ERStructure\r\nclass Piece(object):\r\n def __init__(self, role, color, coords, direction):\r\n self.role = role\r\n self.color = color\r\n self.coords = coords\r\n self.direction = direction\r\n self.moved = False\r\n self.element = \"none\"\r\n self.mods = []\r\n def getMoves(self, board):\r\n moves = ERBasic.getMoves(board, self)\r\n if \"elemental\" in self.mods:\r\n moves = ERElemental.getMoves(board, self, moves)\r\n if \"structure\" in self.mods:\r\n moves = ERStructure.getMoves(board, self, moves)\r\n\r\n return moves\r\n","sub_path":"chessPieces.py","file_name":"chessPieces.py","file_ext":"py","file_size_in_byte":641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"482315843","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Oct 2 10:46:48 2021\n\n@author: favou\n\"\"\"\nimport pickle\nimport pandas as pd\nimport numpy as np\n\n#== Alter the following 5 variables with per-36 statistics ====================\n\n# Threes = 0 #three pointers made per 36 minutes\n# Three_attempts = 0.5 #three pointers attempted per 36 minutes\n# Three_per = Threes/Three_attempts # proportion of three pointers made\n# ORB = 3 #offensive rebounds per 36 minutes \n# DRB = 0.5 #defensive rebounds per 36 minutes\n# TRB = 26 #total rebounds per 36 minutes\n# AST = 2 #total assists per 36 minutes\n# STL = 1 #total steal per 36 minutes \n# BLK = 0.2 #total blocks per 36 minutes\n# PF = 2 # personal fouls per 36 minutes \n\nheadings = ['3P', '3PA', '3P_per', 'ORB', 'DRB', 'TRB', 'AST', 'STL', 'BLK', 'PF']\n#==============================================================================\n\n\n\n\n# load model \nwith open('Optimal_KNN_model_train.sav', 'rb') as pickle_file:\n knn5 = pickle.load(pickle_file)\n\n#loading df of unscaled per 36mins stats (for 10% untrained data)\ndf_unscaled = pd.read_csv('df_10.csv')[headings]\n\n# coverting list of stats to dataframe of scaled stats \nL = [Threes, Three_attempts, Three_per, ORB, DRB, TRB, AST, STL, BLK, PF]\n#L = [1.241379, 3.413793, 4.034483, 2.172414, 0.310345]\n\ndef min_max_scaler_list(L, df):\n #Make a list or per-36min stats scaled \n \n L_s = [] # holds the scaled stats\n for i in range(len(L)):\n \n if 'per' not in df.columns[i]:\n \n min_s = df.iloc[:,i].min()\n max_s = df.iloc[:,i].max()\n \n #in case the min or max statistic is written in section begining with line 10\n \n min_s = L[i] if L[i] < min_s else df.iloc[:,i].min()\n max_s = L[i] if L[i] > max_s else df.iloc[:,i].max()\n \n e_s = (L[i]-min_s)/(max_s-min_s)\n \n else: \n e_s = L[i]\n \n L_s.append(e_s)\n \n return L_s\n\nL_scaled = min_max_scaler_list(L, df_unscaled)\n\n#Appending scaled list to dataframe\n #long way\n# columns = headings\n\n# series = pd.Series(L_scaled, index = columns)\n# df_stats_scaled = pd.DataFrame()\n# df_stats_scaled = df_stats_scaled.append(series, ignore_index=True)[headings]\n\n #short way\ndf_stats_scaled = pd.DataFrame(np.array(L_scaled).reshape(-1, len(L_scaled)), columns = headings)\n\n#prediction \nposition = knn5.predict(df_stats_scaled)[0]\nprint(f'This player is probably a {position}')\n\n","sub_path":"KNN/final model/Testing_Optimised_NBA_KNN.py","file_name":"Testing_Optimised_NBA_KNN.py","file_ext":"py","file_size_in_byte":2491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"21973549","text":"n,t = [int(x) for x in input().split(' ')]\nc = input().split(' ')\nc = [int(x) for x in c]\nfn = n\n\nAddedC = 0\nfor candy in range(len(c)-1):\n fn -= c[candy]\n if fn < 5:\n AddedC += n - fn\n fn += n - fn\n \nprint(AddedC)\n","sub_path":"HackerR/WOC30/A.py","file_name":"A.py","file_ext":"py","file_size_in_byte":238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"342637453","text":"\"\"\"Calorie-Harmony dataset\"\"\"\nimport os\nimport logging\nimport pandas as pd\nimport cv2\nimport numpy as np\n\nlogging.basicConfig(format='%(asctime)s %(name)s %(levelname)s: %(message)s',\n datefmt='%H:%M:%S', level=logging.INFO)\n\nTRAIN_SPLIT = {'P429': {'2020_03_03': ['07', '08', '09'], '2020_03_04': ['05', '10', '12'], '2020_03_05': ['06', '11']}}\nTEST_SPLIT = {'P431': {'2020_03_12': ['09', '10', '11', '12']}}\n\nclass Dataset():\n def __init__(self, labels_root_dir, frames_root_dir):\n self.labels_root_dir = labels_root_dir\n self.frames_root_dir = frames_root_dir\n\n def get_data_info(self):\n all_info = {**TRAIN_SPLIT, **TEST_SPLIT}\n return all_info\n\n def get_labels(self, participant, day, hour):\n \"\"\"Read labels from csv to numpy array\"\"\" \n labels_path = os.path.join(self.labels_root_dir, 'Labels/%s/%s/%s/Andy/gesture_labels.csv' % (participant, day, hour))\n labels = pd.read_csv(labels_path)\n labels = labels.dropna()\n labels = labels.loc[labels['certainty'] == 1.0]\n labels_timestamp = labels['timestamp'].values.tolist()\n return labels_timestamp\n \n def get_frames(self, participant, day, hour):\n \"\"\"Convert a video file to numpy array\"\"\"\n \n def read_image(path):\n img = cv2.imread(path)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n \n dim = (320, 256)\n img = cv2.resize(img, dim, interpolation = cv2.INTER_AREA)\n \n return img\n \n frames_path = os.path.join(self.frames_root_dir, 'CalorieHarmony/%s/In_Wild/Camera/Frame/%s/%s' % (participant, day, hour))\n paths = os.listdir(frames_path)\n paths.sort()\n \n assert len(paths) != 0, \"Couldn't find frames in directory\"\n \n timestamps = []\n frames = []\n counter = 0\n for file in paths:\n if file.endswith(\".jpg\"):\n file_path = os.path.join(frames_path, file)\n timestamps.append(int(file.replace('.jpg', '')))\n frames.append(read_image(file_path))\n \n counter += 1\n if counter % 5000 == 0:\n logging.info('Loading images: {0}'.format(counter))\n\n return timestamps, np.array(frames)\n\n def done(self):\n logging.info(\"Done\")\n\n def get_train_split(self):\n return TRAIN_SPLIT\n\n def get_test_split(self):\n return TEST_SPLIT\n","sub_path":"calharmony_data.py","file_name":"calharmony_data.py","file_ext":"py","file_size_in_byte":2493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"48269558","text":"\"\"\"\nHelper functions to perform active learning with modAL learners.\n\"\"\"\nfrom sklearn.ensemble import RandomForestRegressor\nimport numpy as np\nfrom modAL.models import ActiveLearner, CommitteeRegressor\nfrom sklearn.gaussian_process import GaussianProcessRegressor\nfrom sklearn.metrics import r2_score\nfrom modAL.disagreement import max_std_sampling\n\n\nclass RfWrapper(RandomForestRegressor): # superclass\n \"\"\"\n Wrapper class for RandomForestRegressor which modifies predict() method to include second argument return_std.\n This argument is expected by\n modAL library for active learning regression. Provided by course instructors.\n \"\"\"\n\n def predict(self, X, return_std=False):\n if return_std:\n ys = np.array([e.predict(X) for e in self.estimators_])\n return np.mean(ys, axis=0).ravel(), np.std(ys, axis=0).ravel()\n return super().predict(X).ravel()\n\n\ndef get_next_sample(learner, X, y):\n \"\"\"\n Queries the pool X of data and selects a new sample using the query_strategy of the ActiveLearner.\n\n :param learner:the ActiveLearner within which a query_strategy is defined.\n :param X:the pool of data from which to select a sample. This is a numpy array of feature instances.\n :param y:the pool of labels corresponding to X instances. This is a numpy array of labels.\n :return: (X,y,idx) tuple of the selected sample, where idx is the index of the selected sample.\n \"\"\"\n # call the query strategy defined in the learner to obtain a new sample\n query_idx, query_sample = learner.query(X)\n\n # modify indexing to interpret as collection of one element with d features\n query_sample_reshaped = query_sample.reshape(1, -1)\n\n # obtain the query label\n query_label = y[query_idx]\n\n # modify indexing to interpret as 1D array of one element\n query_label_reshaped = query_label.reshape(1, )\n\n return query_sample_reshaped, query_label_reshaped, query_idx\n\n\ndef run_active_learner_regression(learner, X_pool, y_pool, n_queries):\n \"\"\"\n Performs active learning using given ActiveLearner. Runs for\n the given number of queries. Each iteration draws from the pool of\n data using the learner's query_strategy, updates the model, then removes queried instance from the data pool.\n\n :param learner: the ActiveLearner\n :param X_pool:the pool of feature data from which to sample\n :param y_pool:the labels corresponding to the X_pool\n :param n_queries:the number of queries (iterations) to execute during active learning\n :return: None\n \"\"\"\n # perform active learning\n for q in range(n_queries):\n # get sample\n X_sample, y_sample, query_idx = get_next_sample(learner, X_pool, y_pool)\n\n # use new sample to update the model\n learner.teach(X_sample, y_sample)\n\n # remove labeled instance from pool\n X_pool = np.delete(X_pool, query_idx, axis=0)\n y_pool = np.delete(y_pool, query_idx)\n\n\ndef run_and_score_active_learner_regression(learner, X_pool, y_pool, X_test, y_test, n_queries):\n \"\"\"\n Performs active learning using given ActiveLearner. Runs for\n the given number of queries. Each iteration draws from the pool of\n data using the learner's query_strategy, updates the model, removes queried instance from the data pool,\n then scores the model against given test data.\n\n :param learner: the ActiveLearner\n :param X_pool:the pool of feature data from which to sample\n :param y_pool:the labels corresponding to the X_pool\n :param X_test:the collection of data with which to score the model\n :param y_test:the labels corresponding to the X_test\n :param n_queries:the number of queries (iterations) to execute during active learning\n :return: List of scores for each query.\n \"\"\"\n history = []\n\n # score before starting\n r2 = score_regression_model(learner, X_test, y_test)\n history.append(r2)\n\n # perform active learning\n for q in range(n_queries):\n # get sample\n X_sample, y_sample, query_idx = get_next_sample(learner, X_pool, y_pool)\n\n # use new sample to update the model\n learner.teach(X_sample, y_sample)\n\n # remove labeled instance from pool\n X_pool = np.delete(X_pool, query_idx, axis=0)\n y_pool = np.delete(y_pool, query_idx)\n\n # score learner\n r2 = score_regression_model(learner, X_test, y_test)\n history.append(r2)\n\n return history\n\n\ndef build_committee(kernel, n_learner, n_initial, X_pool, y_pool, seed):\n \"\"\"\n Constructs a CommitteeRegressor of ActiveLearner members based on provided parameters.\n Uses GaussianProcessRegressors as committee members.\n Defines initial training set of random instances for each learner in the committee.\n\n :param kernel: Kernel to be used in Gaussian Process regressors.\n :param n_learner: Number of members in the committee.\n :param n_initial: Number of initial training instances for each committee member.\n :param X_pool:the pool of feature data from which to sample\n :param y_pool:the labels corresponding to the X_pool\n :param seed: Random seed for reproducibility.\n :return: CommitteeRegressor\n \"\"\"\n # get initial training set for each learner\n initial_idx = []\n for i in range(n_learner):\n initial_idx.append(np.random.choice(len(X_pool), size=n_initial, replace=False))\n\n # initialize learners for Committee\n learner_list = [ActiveLearner(\n estimator=GaussianProcessRegressor(kernel, random_state=seed),\n X_training=X_pool[idx],\n y_training=y_pool[idx]) for idx in initial_idx]\n\n # create Committee\n committee = CommitteeRegressor(learner_list=learner_list, query_strategy=max_std_sampling)\n return committee\n\n\ndef score_regression_model(learner, X_test, y_test):\n \"\"\"\n Calculates R2 score for given learner.\n\n :param learner: ActiveLearner\n :param X_test: Test set\n :param y_test: Regression values for test set\n :return: Float, r2 score.\n \"\"\"\n y_pred = learner.predict(X_test, return_std=False)\n r2 = r2_score(y_test, y_pred) # y_true, y_pred\n return r2\n\n\ndef build_random_forest_regressor(n_estimators, max_depth, n_initial, X_pool, y_pool, seed):\n \"\"\"\n Constructs RandomForestRegressor ActiveLearner with custom query_strategy.\n Defines initial training set of random instances for learner.\n\n :param n_estimators: Number of estimators in Random Forest.\n :param max_depth: Max depth of trees in Random Forest.\n :param n_initial: Number of initial training instances for each committee member.\n :param X_pool:the pool of feature data from which to sample\n :param y_pool:the labels corresponding to the X_pool\n :param seed: Random seed for reproducibility.\n :return: ActiveLearner\n \"\"\"\n initial_idx = np.random.choice(len(X_pool), size=n_initial, replace=False)\n\n # https://modal-python.readthedocs.io/en/latest/content/examples/active_regression.html\n def GP_regression_std(regressor, X):\n _, std = regressor.predict(X, return_std=True)\n query_idx = np.argmax(std)\n return query_idx, X[query_idx]\n\n regressor = ActiveLearner(\n estimator=RfWrapper(n_estimators=n_estimators, max_depth=max_depth, random_state=seed),\n query_strategy=GP_regression_std,\n X_training=X_pool[initial_idx],\n y_training=y_pool[initial_idx]\n )\n\n return regressor\n","sub_path":"packages/activelearning/activelearning.py","file_name":"activelearning.py","file_ext":"py","file_size_in_byte":7405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"112636794","text":"\n'''\nThis contains the CLI tool for managing a file DB...\n'''\nimport os\nimport csv\nimport sys\nimport json\nimport sqlite3\nimport logging\nimport argparse\n\nlogging.basicConfig(level=logging.WARNING, format='%(asctime)s: %(levelname)s - %(name)s - %(message)s')\n\nlogger = logging.getLogger(__name__)\n\n\n\ndef main():\n # Set up a parser:\n parser = argparse.ArgumentParser(prog='filedb')\n\n # Common arguments:\n parser.add_argument('-v', '--verbose', action='count', default=0, help='Logging level; add more -v for more logging.')\n parser.add_argument('--dry-run', action='store_true', help='Do not modify the TrackDB.')\n parser.add_argument('-i', '--indent', type=int, help='Number of spaces to indent when emitting JSON.')\n\n # Use sub-parsers for different operations:\n subparsers = parser.add_subparsers(dest=\"op\")\n subparsers.required = True\n\n # 'jsonl-to-sqlite' subcommand - read a file listing generated by hadoop fs -lsr ... and convert to SQLite:\n parser_sq = subparsers.add_parser('jsonl-to-sqlite', help='Read a TrackDB JSONL format file listing and convert to SQLite')\n parser_sq.add_argument('input_jsonl', type=str, help='The file to read, in TrackDB JSONL. Can be \"-\" for STDIN.')\n parser_sq.add_argument('output_sqlite', type=str, help='The file to output to in SQLite format.')\n\n # And PARSE it:\n args = parser.parse_args()\n\n # Set up verbose logging:\n if args.verbose == 1:\n logging.getLogger().setLevel(logging.INFO) \n elif args.verbose > 1:\n logging.getLogger().setLevel(logging.DEBUG) \n\n # Ops:\n logger.debug(\"Got args: %s\" % args)\n if args.op == 'jsonl-to-sqlite':\n # Input\n if args.input_jsonl == '-':\n reader = sys.stdin\n else:\n reader = open(args.input_jsonl, 'r')\n # Output\n con = sqlite3.connect(args.output_sqlite)\n cur = con.cursor()\n cur.execute('''CREATE TABLE IF NOT EXISTS files\n (id TEXT PRIMARY KEY ASC, collection TEXT, stream TEXT, kind TEXT, timestamp DATETIME, store TEXT, file_size INTEGER)''')\n\n # Convert and write out:\n counter = 0\n for line in reader:\n item = json.loads(line)\n counter += 1\n cur.execute(\"INSERT OR IGNORE INTO files(id, collection, stream, kind, timestamp, store, file_size) \\\n VALUES( '%(id)s', '%(collection_s)s', '%(stream_s)s', '%(kind_s)s', '%(timestamp_dt)s', '%(hdfs_service_id_s)s', %(file_size_l)s )\" % item)\n\n # Close up\n if reader is not sys.stdin:\n reader.close()\n con.commit()\n con.close()\n\n # Check this seems to have worked:\n if counter == 0:\n raise Exception(\"No records were found for conversion!\")\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"lib/filedb/cmd.py","file_name":"cmd.py","file_ext":"py","file_size_in_byte":2811,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"93428319","text":"\"\"\"\nCommand line interface\n\"\"\"\n\nimport click\n\nimport control\n\n\n# noinspection PyMissingOrEmptyDocstring\n@click.group()\ndef cli(): pass\n\n\n@cli.command()\ndef list_all():\n \"\"\"\n List every flashcard\n \"\"\"\n for fid, name in control.list_flashcards():\n print(f'{fid} - {name}')\n\n\n@cli.command()\n@click.argument('title')\n@click.argument('content')\ndef add_flashcard(title: str, content: str):\n \"\"\"\n Adds a new flashcard\n\n \\b\n :param title: Flashcard title\n :param content: Flashcard contents\n \"\"\"\n control.add_flashcard(title, content)\n print('Flashcard has been added')\n\n\n@cli.command()\n@click.argument('string')\ndef title_search(string: str):\n \"\"\"\n Looks for a matching title\n\n :param string: Search string\n \"\"\"\n for fid, title in control.title_search(string):\n print(f'{fid} - {title}')\n\n\n@cli.command()\n@click.argument('flashcard_id')\ndef print_flashcard(flashcard_id):\n \"\"\"\n Prints the content of a flashcard\n\n \\b\n :param flashcard_id: ID of the flashcard to print\n \"\"\"\n print(control.get_flashcard(flashcard_id))\n\n\n@cli.command()\n@click.argument('flashcard_id')\ndef delete_flashcard(flashcard_id):\n \"\"\"\n Deletes a flashcard\n\n \\b\n :param flashcard_id: ID of the flashcard to delete\n \"\"\"\n control.delete_flashcard(flashcard_id)\n print(f'The flashcard (id={flashcard_id}) has been deleted')\n\n\n@cli.command()\n@click.argument('flashcard_id')\ndef check_flashcard(flashcard_id):\n \"\"\"\n Marks the flashcard as (not) done, based on the previous state.\n\n \\b\n :param flashcard_id: ID of the flashcard to mark.\n \"\"\"\n if control.check_flashcard(flashcard_id):\n print('Flashcard marked as done')\n else:\n print('Flashcard marked as not done')\n\n\nif __name__ == '__main__':\n cli()\n","sub_path":"cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":1803,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"165645610","text":"\"\"\"\nデータの切り出し(2048点)\n\"\"\"\n\nfrom app import path\nimport pandas as pd\nimport numpy as np\nimport csv\n\nN = path.data_long\nhammingWindow = np.hamming(N+1)\n\nfor i in path.new_subject:\n for d in path.new_day:\n for s in path.sets:\n for j in path.mix_char2:\n for l in path.time:\n file_path = path.cut_mix_new + \"/\" + i + \"/\" + d + \"/\" + s + \"/\" + j + \"/\" + l + \"cut.CSV\"\n df = pd.read_csv(file_path)\n\n print(\"newdata/\" + i + \"/\" + d + \"/\" + s + \"/\" + j + \"/\" + l + \".csv\")\n\n csv_file = open(file_path, \"r\", encoding=\"ms932\", errors=\"\", newline=\"\")\n f = csv.reader(csv_file, delimiter=\",\", doublequote=True, lineterminator=\"\\r\\n\", quotechar='\"',\n skipinitialspace=True)\n\n o2 = 0\n\n for o in path.cut8_time:\n nf = open(path.cut_8_new + \"/\" + i + \"/\" + d + \"/\" + s + \"/\" + j + \"/\" + o + \"/\" + l + \"cut.CSV\", 'w')\n dataWriter = csv.writer(nf)\n\n # データの切り出し\n\n y1 = df.iloc[ int((N / 8) * o2):int((N / 8) * (o2 + 1)), : ]\n y2 = df.iloc[ int((N / 8) * o2):int((N / 8) * (o2 + 1)), : ]\n y3 = df.iloc[ int((N / 8) * o2):int((N / 8) * (o2 + 1)), : ]\n y4 = df.iloc[ int((N / 8) * o2):int((N / 8) * (o2 + 1)), : ]\n y5 = df.iloc[ int((N / 8) * o2):int((N / 8) * (o2 + 1)), : ]\n y6 = df.iloc[ int((N / 8) * o2):int((N / 8) * (o2 + 1)), : ]\n y7 = df.iloc[ int((N / 8) * o2):int((N / 8) * (o2 + 1)), : ]\n y8 = df.iloc[ int((N / 8) * o2):int((N / 8) * (o2 + 1)), : ]\n o2 += 1\n\n # y1 = np.abs(y1)\n # y2 = np.abs(y2)\n # y3 = np.abs(y3)\n # y4 = np.abs(y4)\n # y5 = np.abs(y5)\n # y6 = np.abs(y6)\n # y7 = np.abs(y7)\n # y8 = np.abs(y8)\n\n new_data = pd.concat([y1, y2, y3, y4, y5, y6, y7, y8], axis=1)\n\n # 下の2行は0列目の全ての要素を参照,(1列目から8列目)までを代入\n new_data.columns = new_data.iloc[ 0, : ]\n new_data.index = new_data.iloc[ :, 0 ]\n new_data = new_data.iloc[ 1:N + 1, 1:8 ]\n\n new_data.to_csv(path.cut_8_new + \"/\" + i + \"/\" + d + \"/\" + s + \"/\" + j + \"/\" + o + \"/\" + l + \"cut.CSV\")\n print(\"Cleate/\" + \"/\" + i + \"/\" + d + \"/\" + s + \"/\" + j + \"/\" + o + \"/\" + l + \"cut.CSV\")\n\n nf.close()\n\n","sub_path":"cut_8_new.py","file_name":"cut_8_new.py","file_ext":"py","file_size_in_byte":2867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"542030574","text":"# -*- coding: utf-8 -*-\n# @Author: bpf@fastcdn.com \n# @Date: 2018-07-26 22:20:03 \n# @Last Modified by: bpf@fastcdn.com \n# @Last Modified time: 2018-07-26 22:20:03 \n\n\nimport config_default\n\ndef merge(defaults, override):\n if override == None:\n return defaults\n\n r = {}\n for k, v in defaults.items():\n if k in override:\n if isinstance(v, dict):\n r[k] = merge(v, override[k])\n else:\n r[k] = override[k]\n else:\n r[k] = v\n return r\n\nconfigs = config_default.configs\n\ntry:\n import config_override\n configs = merge(configs, config_override.configs)\nexcept ImportError:\n pass\n","sub_path":"backend/api/config/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"51647900","text":"#!/usr/bin/env python\n\"\"\"\nSimple tool to simulate stellar populations.\n\"\"\"\n__author__ = \"Alex Drlica-Wagner\"\n__email__ = \"kadrlica@fnal.gov\"\n__version__ = \"0.1.0\"\n\nimport os,sys\nimport numpy as np\n\nimport scipy.stats as stats\n\nfrom dwarf import Dwarf\nfrom instruments import factory as instrumentFactory\n\n\ndef randerr(size=1,func='normal',**kwargs):\n \"\"\" Return a sample from a random variate. \"\"\"\n kwargs.update(size=size)\n funclower = func.lower()\n if funclower in ['normal','gaussian','gauss']:\n rvs = stats.norm.rvs\n elif funclower in ['uniform']:\n rvs = stats.uniform(-1,2).rvs\n elif funclower in ['lorentzian','cauchy']:\n rvs = stats.cauchy.rvs\n elif funclower in ['delta']:\n rvs = stats.randint(1,2).rvs\n else:\n raise Exception('Unrecognized type: %s'%func)\n return rvs(**kwargs) \n\n\nclass Simulator(object):\n \n def run(self, num=1, exptime=10000):\n self.create_dwarf()\n self.create_instrument()\n if not hasattr(exptime,'__iter__'): exptime = [exptime]\n\n out = []\n for e in exptime:\n for i in num:\n data = simulate(dwarf,instrument,exptime)\n out.append(data)\n return out\n \n @staticmethod\n def simulate(dwarf,instrument,exp=10000):\n \"\"\" Simulate observation \"\"\"\n\n # Set the second band to 'i' (matches CaT lines)\n dwarf.band_1 = 'g'; dwarf.band_2 = 'i'\n mag_1,mag_2,ra,dec = dwarf.simulate() \n snr = instrument.mag2snr(mag_2,exp)\n \n #olderr = np.seterr(all='ignore')\n sel = (mag_1 > 16) & (snr > 5)\n #np.seterr(**olderr)\n \n nstar = sel.sum()\n mag = mag_1[sel]\n color = (mag_1-mag_2)[sel]\n snr = snr[sel]\n\n # The true velocity, u, of each star is the sum of the mean velocity and\n # a component from the intrinsic velocity dispersion\n vtrue = dwarf.vmean + dwarf.vdisp*randerr(nstar,'normal')\n\n # There are two components of the measurement uncertainty on\n # the velocity of each star\n vstaterr = instrument.snr2err(snr)\n vsyserr = instrument.vsys\n\n # The measured velocity is the true velocity plus a component from the\n # instrumental measurement error\n vstat = vstaterr*randerr(nstar,'normal')\n vsys = vsyserr*randerr(nstar,'normal')\n\n vmeas = vtrue + vstat + vsys\n\n # Now assign the measurement error to the statistical error\n vmeaserr = vstaterr\n\n # The error that is commonly used is the sum of the measurement error\n # and the systematice error estimate in quadrature\n verr = np.sqrt(vstaterr**2 + vsyserr**2)\n \n names = ['RA','DEC','MAG_%s'%dwarf.band_1.upper(),'MAG_%s'%dwarf.band_2.upper(),\n 'SNR','VTRUE','VSTAT','VSYS','VMEAS','VMEASERR','VERR']\n data = [ra[sel],dec[sel],mag_1[sel],mag_2[sel],snr,vtrue,vstat,vsys,vmeas,vmeaserr,verr]\n return np.rec.fromarrays(data,names=names)\n \nif __name__ == \"__main__\":\n import argparse\n description = \"Simulate the observable properties of a dwarf galaxy.\"\n parser = argparse.ArgumentParser(description=description)\n parser.add_argument('outfile',nargs='?',\n help=\"Optional output file\")\n parser.add_argument('--seed',type=int,default=None,\n help=\"Random seed\")\n\n group = parser.add_argument_group('Physical')\n parser.add_argument('--stellar_mass',type=float,default=2000.,\n help='Stellar mass for simulated satellite (Msun)')\n parser.add_argument('--vmean',type=float,default=60.,\n help='Mean systemic velocity (km/s)')\n parser.add_argument('--vdisp',type=float,default=3.3,\n help='Velocity dispersion (km/s)')\n \n group = parser.add_argument_group('Isochrone')\n group.add_argument('--isochrone',type=str,default='Bressan2012',\n help='Isochrone type.')\n group.add_argument('--distance_modulus',type=float,default=17.5,\n help='Distance modulus.')\n group.add_argument('--age',type=float,default=13.0,\n help='Age of stellar population (Gyr).')\n group.add_argument('--metallicity',type=float,default=1e-3,\n help='Metallicity of stellar population.')\n \n group = parser.add_argument_group('Kernel')\n group.add_argument('--kernel',type=str,default='EllipticalPlummer',\n help='Kernel type.')\n group.add_argument('--ra',type=float,default=54.0,\n help='Centroid right acension (deg).')\n group.add_argument('--dec',type=float,default=-54.0,\n help='Centroid declination (deg).')\n group.add_argument('--extension',type=float,default=0.1,\n help='Extension (deg).')\n group.add_argument('--ellipticity',type=float,default=0.0,\n help='Spatial extension (deg).')\n group.add_argument('--position_angle',type=float,default=0.0,\n help='Spatial extension (deg).')\n\n group = parser.add_argument_group('Instrument')\n group.add_argument('--instrument',default='gmacs',choices=['gmacs'],\n help='Instrument')\n egroup = group.add_mutually_exclusive_group()\n egroup.add_argument('--exptime',default=3600.,type=float,\n help='Exposure time (s)')\n egroup.add_argument('--maglim',default=None,type=float,\n help='Limiting magnitude (S/N = 5)')\n group.add_argument('--vsys',default=None,type=float,\n help='Systematic velocity error (km/s)')\n args = parser.parse_args()\n kwargs = vars(args)\n\n np.random.seed(args.seed)\n\n exptime = mag2exp(args.maglim) if args.maglim else args.exptime\n\n dwarf = Dwarf(vmean=args.vmean,vdisp=args.vdisp)\n isochrone=Dwarf.createIsochrone(name=args.isochrone, age=args.age,\n metallicity=args.metallicity,\n distance_modulus=args.distance_modulus)\n dwarf.set_isochrone(isochrone)\n\n kernel=Dwarf.createKernel(name=args.kernel,extension=args.extension,\n ellipticity=args.ellipticity,\n position_angle=args.position_angle,\n lon=args.ra,lat=args.dec)\n dwarf.set_kernel(kernel)\n dwarf.richness = args.stellar_mass/dwarf.isochrone.stellar_mass()\n\n\n instr= instrumentFactory(args.instrument)\n if args.vsys is not None: instrument.vsys = args.vsys\n\n # Run the simulation\n data = Simulator.simulate(dwarf,instr,exptime)\n\n # Output\n if args.outfile:\n out = open(args.outfile,'w')\n else:\n out = sys.stdout\n out.write('#'+' '.join(['%-9s'%n for n in data.dtype.names])+'\\n')\n np.savetxt(out,data,fmt='%-9.5f')\n","sub_path":"dsphsim/dsphsim.py","file_name":"dsphsim.py","file_ext":"py","file_size_in_byte":6911,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"564296352","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n# --------------------------------------------\n# Author: Zhou Cheng \n# Date : 2015-03-31 11:31:34\n# --------------------------------------------\n\nimport pika\nimport pika.credentials\n\nhigh_words = ['module', 'high', 'site']\nlow_words = ['module', 'low', 'site']\n\nHOST = 'mq01.netpupil.cn'\nEXCHANGE = 'parker'\nEXCHANGE_TYPE = 'topic'\n\ndef send():\n connection = pika.BlockingConnection(pika.ConnectionParameters(host=HOST))\n channel = connection.channel()\n\n channel.exchange_declare(exchange=EXCHANGE, type=EXCHANGE_TYPE)\n\n for i in range(10):\n # random.shuffle(high_words)\n routing_key = '.'.join(high_words)\n channel.basic_publish(exchange=EXCHANGE,\n routing_key=routing_key,\n body=routing_key)\n print(\" [x] Sent %r:%r\" % (routing_key, routing_key))\n # random.shuffle(low_words)\n routing_key = '.'.join(low_words)\n channel.basic_publish(exchange=EXCHANGE,\n routing_key=routing_key,\n body=routing_key)\n print(\" [x] Sent %r:%r\" % (routing_key, routing_key))\n connection.close()\n\n\nif __name__ == '__main__':\n send()","sub_path":"Python/RabbitMQ/topics_publisher.py","file_name":"topics_publisher.py","file_ext":"py","file_size_in_byte":1272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"545642074","text":"#!/usr/bin/env python\n\"\"\"Provides utilities for subsampling.\"\"\"\n\nimport numpy as np\nfrom sklearn.neighbors import NearestNeighbors\nfrom eden.graph import Vectorizer\nimport random\nfrom sklearn.metrics.pairwise import cosine_similarity\n\nimport logging\nlogger = logging.getLogger(__name__)\n\n\ndef min_similarity_selection(matrix, scores=None, max_num=None):\n \"\"\"Select the max_num most dissimilar instances.\n\n Given a similarity matrix and a score associate to each instance,\n iteratively find the most similar pair and remove the one with the\n smallest score until only max_num remain.\n \"\"\"\n similarity_matrix = matrix.copy()\n size = similarity_matrix.shape[0]\n # remove diagonal elements, so that sim(i,i)=0\n # and the pair i,i is not selected\n similarity_matrix = similarity_matrix - np.diag(np.diag(similarity_matrix))\n # iterate size - k times, i.e. until only k instances are left\n for t in range(size - max_num):\n # find pairs with largest similarity\n (i, j) = np.unravel_index(\n np.argmax(similarity_matrix), similarity_matrix.shape)\n # choose instance with smallest score as the one to be removed\n if scores[i] < scores[j]:\n id = i\n else:\n id = j\n # remove instance with lower score by setting all its\n # pairwise similarities to 0\n similarity_matrix[id, :] = 0\n similarity_matrix[:, id] = 0\n # extract surviving elements, i.e. element that have a row\n # that is not only 0s\n select_ids = [ind for ind, row in enumerate(similarity_matrix)\n if np.sum(row) > 0]\n return select_ids\n\n\ndef _outliers(graphs, k=3):\n vec = Vectorizer(r=3, d=3,\n normalization=False, inner_normalization=False)\n x = vec.transform(graphs)\n knn = NearestNeighbors()\n knn.fit(x)\n neigbhbors = knn.kneighbors(x, n_neighbors=k, return_distance=False)\n outlier_list = []\n non_outlier_list = []\n for i, ns in enumerate(neigbhbors):\n not_outlier = False\n for n in ns[1:]:\n if i in list(neigbhbors[n, :]):\n not_outlier = True\n break\n if not_outlier is False:\n outlier_list.append(i)\n else:\n non_outlier_list.append(i)\n return outlier_list, non_outlier_list\n\n\ndef _select_non_outliers(graphs, k=3):\n outlier_list, non_outlier_list = _outliers(graphs, k)\n graphs = [graphs[i] for i in non_outlier_list]\n logging.debug('outlier removal:%d' % len(graphs))\n return graphs\n\n\ndef _remove_similar_pairs(graphs):\n vec = Vectorizer(r=3, d=3,\n normalization=False, inner_normalization=False)\n x = vec.transform(graphs)\n matrix = cosine_similarity(x)\n scores = np.array([1] * len(graphs))\n ids = min_similarity_selection(matrix,\n scores=scores,\n max_num=len(graphs) / 2)\n graphs = [graphs[i] for i in ids]\n logging.debug('similar pairs removal:%d' % len(graphs))\n return graphs\n\n\ndef _size_filter(graphs, fraction_to_remove=.1):\n frac = 1.0 - fraction_to_remove / 2\n size = len(graphs)\n graphs = sorted(graphs, key=lambda g: len(g))[:int(size * frac)]\n graphs = sorted(graphs, key=lambda g: len(g), reverse=True)\n graphs = graphs[:int(size * frac)]\n logging.debug('size filter:%d' % len(graphs))\n return graphs\n\n\ndef _random_sample(graphs, max_size):\n if len(graphs) > max_size:\n graphs = random.sample(graphs, max_size)\n logging.debug('random sample:%d' % len(graphs))\n return graphs\n\n\ndef pre_process(graphs,\n initial_max_size=3000,\n fraction_to_remove=.1,\n n_neighbors_for_outliers=3,\n remove_similar=True,\n max_size=500):\n \"\"\"pre_process.\"\"\"\n logging.debug('original size:%d' % len(graphs))\n graphs = _random_sample(graphs, initial_max_size)\n graphs = _size_filter(graphs, fraction_to_remove)\n graphs = _select_non_outliers(graphs, k=n_neighbors_for_outliers)\n if remove_similar:\n graphs = _remove_similar_pairs(graphs)\n graphs = _random_sample(graphs, max_size)\n return graphs\n","sub_path":"constrActive/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"31676208","text":"from PyPDF2 import PdfFileWriter, PdfFileReader\r\nfrom tkinter import *\r\nfrom tkinter.filedialog import *\r\nimport PyPDF2 as pypdf\r\nimport sys, os\r\nimport tkinter.font as font\r\n\r\n\r\ndef resource_path(relative_path):\r\n if hasattr(sys, '_MEIPASS'):\r\n return os.path.join(sys._MEIPASS, relative_path)\r\n return os.path.join(os.path.abspath(\".\"), relative_path)\r\n\r\n\r\nroot=Tk()\r\nroot.title('Answers Hider')\r\n#root.iconbitmap( resource_path('./icon.ico'))\r\npdf_list = []\r\n\r\n\r\nfilename1=StringVar()\r\nsrc_pdf=StringVar()\r\n\r\ndef load_pdf(filename):\r\n f = open(filename,'rb')\r\n return pypdf.PdfFileReader(f)\r\n\r\ndef load1():\r\n f = askopenfilename(filetypes=(('PDF File', '*.pdf'), ('All Files','*.*')))\r\n filename1.set(f.split('/')[-1])\r\n src_pdf=f\r\n print(f)\r\n print(src_pdf)\r\n pdf1 = load_pdf(f)\r\n pdf_list.append(pdf1)\r\n pdf_list.append(f)\r\n\r\ndef add_to_writer(pdfsrc, writer):\r\n [writer.addPage(pdfsrc.getPage(i)) for i in range(pdfsrc.getNumPages())]\r\n writer.removeImages()\r\n\r\ndef remove_images():\r\n print(\"remove rectangles and images\")\r\n writer = PdfFileWriter()\r\n\r\n output_filename= asksaveasfilename(filetypes=(('PDF File', '*.pdf'), ('All Files','*.*')))\r\n outputfile= open(output_filename+\".pdf\",'wb')\r\n\r\n add_to_writer(pdf_list[0], writer)\r\n\r\n #pdf_src = PdfFileReader(inputStream)\r\n\r\n writer.write(outputfile)\r\n outputfile.close()\r\n root.quit()\r\n\r\n##Label(root, text=\"Rectangles remover\").grid(row=0, column=2, sticky=E)\r\nbutton1=Button(root, text=\"Choose file\", command=load1, height = 5, width = 14).grid(row=1, column=0)\r\nLabel(root, textvariable=filename1,width=20).grid(row=1, column=1, sticky=(N,S,E,W))\r\n#photo= PhotoImage(file=resource_path('./button_pic.png'))\r\n\r\n#Button(root, text=\"Remove answers\",image=photo, command=remove_images, width=100, height=120).grid(row=1, column=2,sticky=E)\r\nbutton2=Button(root, text=\"Remove answers\", command=remove_images,font='Helvetica 12 bold', fg=\"red\", height =4).grid(row=1, column=2,sticky=E)\r\n\r\n#Label(root, text=\"Remove Answers^^\").grid(row=2, column=2, sticky=E)\r\n#Label(root, text=\"Good Luck!\").grid(row=2, column=0, sticky=W)\r\n\r\nLabel(root, text='''שימו לב,\\n\r\nהאפליקציה מסירה שכבה מסויימת של אובייקטים מהדף,\\n\r\nולכן תסיר גם תמונות או שרטוטים מסויימים, אם קיימים.\\n\r\nהדף לא נפתח כראוי בתוכנות מסויימות של אדובי,\\n\r\nהפתרון הפשוט לכך הוא לחצן ימני על הקובץ שנוצר,\\n\r\nלחצ�� ימני > פתח באמצעות > כרום, פיירפוקס, או כל תוכנה אחרת שיודעת להציג פדף.\\n\r\n\\n\r\nוזיכרו: הפתרון הטוב ביותר יהיה לשלוח מייל חביב למתרגל האחראי לאחר המבחן\\nולבקש ממנו להעלות גם גרסה ללא הפתרונות למען הסמסטרים הבאים.\\n\r\n\\n\r\nבהצלחה!\\n''', font='Helvetica 7', justify=RIGHT).grid(row=3, columnspan=3, sticky=E)\r\n\r\n\r\nfor child in root.winfo_children():\r\n child.grid_configure(padx=10,pady=10)\r\n\r\nroot.mainloop()\r\n\r\n\r\n\r\n\r\n","sub_path":"remover.py","file_name":"remover.py","file_ext":"py","file_size_in_byte":3148,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"378711509","text":"# -*- coding: utf-8 -*-\n##############################################################################\n#\n# NCTR, Nile Center for Technology Research\n# Copyright (C) 2011-2012 NCTR ().\n#\n##############################################################################\n\nfrom odoo import models, fields, api\n\nclass hr_department(models.Model):\n\t_inherit = 'hr.department'\n\n\tanalytic_account_id = fields.Many2one(\"account.analytic.account\", \"Analytic Account\")\n\t_sql_constraints = [\n ('name_uniq', 'unique (name)', 'The name of the Department must be unique!')\n ]\n\n\t@api.model\n\tdef create(self,vals):\n\t\t'''\n\t\toverride create function to set responsible of department's analytic account \n\t\tequals to department's manager\n\t\t'''\n\t\tana_id = super(hr_department,self).create(vals)\n\n\t\tif self.manager_id.id != False and self.analytic_account_id.id != False:\n\t\t\tself.analytic_account_id.write({'user_id':self.manager_id.user_id.id})\n\n\t\treturn ana_id\n\n\t@api.multi\n\tdef write(self, vals):\n\t\t'''\n\t\toverride write function to set responsible of department's analytic account \n\t\tequals to department's manager\n\t\t'''\n\t\tana_id = super(hr_department,self).write(vals)\n\n\t\tif self.manager_id.id != False and self.analytic_account_id.id != False:\n\t\t\tself.analytic_account_id.write({'user_id':self.manager_id.user_id.id})\n\n\t\treturn ana_id\n\n\n\n\nclass AccountAnalytic(models.Model):\n\n _inherit = \"account.analytic.account\"\n\n\n @api.model\n def _default_user(self):\n return self.env.context.get('user_id', self.env.user.id)\n\n user_id= fields.Many2one('res.users',string='Responsible',required=True ,default=_default_user,readonly=True)\n\n\n","sub_path":"v_11/EBS-SVN/branches/common/hr_department_custom/models/hr_department.py","file_name":"hr_department.py","file_ext":"py","file_size_in_byte":1664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"453194429","text":"from emiproc.grids import COSMOGrid, EDGARGrid, ICONGrid\nimport os\nimport time\n\n# inventory\ninventory = 'EDGAR'\n\n# model either \"cosmo-art\", \"cosmo-ghg\" or \"icon\" (affects the\n# output units and handling of the output grid)\nmodel = 'cosmo-ghg'\n\n# path to input inventory\ninput_path = \"/input/EDGAR/v432_FT_CHE/\"\n\n# Year of the inventory (required for finding the inventory files)\ninput_year = 2015\n\n# input grid\ninput_grid = EDGARGrid(\n xmin=-30,\n xmax=60,\n ymin=30,\n ymax=69,\n dx=0.1,\n dy=0.1,\n)\n\n# input species\nspecies = [\"CO2\"]\n\n# input categories\ncategories = [\n \"AGS\",\n \"CHE\",\n \"ENE\",\n \"FFF\",\n \"IND\",\n \"IRO\",\n \"NEU\",\n \"NFE\",\n \"NMM\",\n \"PRO\",\n \"PRU_SOL\",\n \"RCO\",\n \"REF_TRF\",\n \"SWD_INC\",\n \"TNR_Aviation_CDS\",\n \"TNR_Aviation_CRS\",\n \"TNR_Aviation_LTO\",\n \"TNR_Other\",\n \"TNR_Ship\",\n \"TRO\",\n]\n\n# mapping from input to output species (input is used for missing keys)\nin2out_species = {}\n\n# mapping from input to output species (input is used for missing keys)\n# All the categories will be summed. \n# There is no mapping between these catgories and GNFR yet\nin2out_category = {}\n\n# output variables are written in the following format using species\nvarname_format = '{species}_EDGAR'\n\n# Domain\n# CHE_Europe domain\noutput_grid = COSMOGrid(\n nx=760,\n ny=610,\n dx=0.05,\n dy=0.05,\n xmin=-17,\n ymin=-11,\n pollon=-170,\n pollat=43,\n)\n\n# output path and filename\noutput_path = os.path.join('outputs', 'EDGAR','{online}')\noutput_name = \"edgar.nc\"\n\n\n# resolution of shape file used for country mask\nshpfile_resolution = \"10m\"\n\n# number of processes computing the mapping inventory->COSMO-grid\nnprocs = 18\n\n# metadata added as global attributes to netCDF output file\nnc_metadata = {\n \"DESCRIPTION\": \"Gridded annual emissions\",\n \"DATAORIGIN\": \"TNO\",\n \"CREATOR\": \"Jean-Matthieu Haussaire\",\n \"EMAIL\": \"jean-matthieu.haussaire@empa.ch\",\n \"AFFILIATION\": \"Empa Duebendorf, Switzerland\",\n \"DATE CREATED\": time.ctime(time.time()),\n}\n","sub_path":"cases/config_edgar.py","file_name":"config_edgar.py","file_ext":"py","file_size_in_byte":2030,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"172062093","text":"# -*- coding: UTF-8 -*-\n\nimport os\nimport re\nimport time\n\nfrom urllib.parse import quote\n\nimport aria2p\nimport feedparser\nimport requests\nfrom jinja2 import Template\nfrom pymongo import MongoClient\n\nfrom .utils import default_user_agent, escapeText, postData\n\n\nclass Anime:\n def __init__(self, config, rss):\n env = os.environ\n\n if env.get(\"ARIA2_HOST\"):\n a_host = env[\"ARIA2_HOST\"]\n if not a_host.startswith(\"http\"):\n a_host = \"http://\" + a_host\n else:\n a_host = config[\"aria2\"][\"host\"]\n if not a_host.startswith(\"http\"):\n a_host = \"http://\" + a_host\n\n a_port = int(env.get(\"ARIA2_PORT\") or config[\"aria2\"][\"port\"] or 6800)\n\n a_secret = env.get(\"ARIA2_SECRET\") or config[\"aria2\"][\"secret\"]\n\n if a_host:\n self.aria2 = aria2p.API(\n aria2p.Client(\n host=a_host,\n port=a_port,\n secret=a_secret,\n )\n )\n else:\n self.aria2 = None\n\n self.telegram = {}\n if env.get(\"TELEGRAM_ENABLE\"):\n self.telegram[\"token\"] = env[\"TELEGRAM_TOKEN\"]\n self.telegram[\"chat_id\"] = env[\"TELEGRAM_CHAT_ID\"]\n elif config[\"telegram\"][\"enable\"]:\n self.telegram = config[\"telegram\"]\n\n self.url = env.get(\"BASE_URL\") or config.get(\"base_url\") or None\n mongo_url = env.get(\"DATABASE\") or config[\"mongo_url\"]\n\n self.rss = rss[\"Anime\"]\n self.template = Template(rss[\"Template\"])\n\n client = MongoClient(mongo_url)\n self.db = client[\"Anime\"]\n\n def readRSS(self, send=None):\n if send != None:\n self.send = send\n\n for s in self.rss:\n self.handleRSS(s)\n\n # RSS source:\n # 1. https://mikanani.me/\n # 2. https://rssbg.now.sh\n def handleRSS(self, a):\n entries = feedparser.parse(\n a[\"url\"],\n request_headers={\"user-agent\": default_user_agent},\n )[\"entries\"]\n regex = re.compile(a[\"regex\"])\n\n for r in entries:\n if regex.match(r[\"title\"]) and not self.db[\"Download\"].find_one(\n {\"title\": r[\"title\"]}\n ):\n download_link = None\n for l in r[\"links\"]:\n if l[\"type\"] == \"application/x-bittorrent\":\n download_link = l[\"href\"]\n\n download_link = download_link or r[\"link\"]\n\n if self.sendToAria2(a[\"path\"], download_link):\n down = {\n \"series\": a[\"series\"],\n \"title\": r[\"title\"],\n \"link\": download_link,\n \"create_time\": time.time(),\n }\n\n self.db[\"Download\"].insert_one(down)\n if self.telegram:\n self.sendToTelegram(\n r[\"title\"], a[\"type\"], a[\"series\"], a[\"path\"]\n )\n\n def sendToAria2(self, path, url):\n if not self.send:\n print(\"未添加Aria2客户端或仅作为测试!链接为: \", url)\n return False\n\n else:\n if url.startswith(\"magnet:?xt=\"):\n try:\n self.aria2.add_magnet(url, options={\"dir\": path})\n print(\"添加成功 Magnet: \", url)\n return True\n except Exception as e:\n print(\"添加失败 Magnet: \", url, e)\n return False\n\n else:\n r = requests.get(url)\n with open(\"tmp.torrent\", \"wb\") as f:\n f.write(r.content)\n\n try:\n self.aria2.add_torrent(\"tmp.torrent\", options={\"dir\": path})\n print(\"添加成功 Torrent: \", url)\n return True\n except Exception as e:\n print(\"添加失败 Torrent: \", url, e)\n return False\n finally:\n os.remove(\"tmp.torrent\")\n\n def sendToTelegram(self, title, type, series, path):\n args = {\n \"title\": escapeText(title),\n \"type\": escapeText(type),\n \"series\": escapeText(series),\n \"link\": self.url.rstrip(\"/\") + \"/\" + quote(path.strip(\"/\").split(\"/\")[-1])\n if self.url\n else \"\",\n }\n\n msg = self.template.render(args)\n\n url = \"https://api.telegram.org/bot\" + self.telegram[\"token\"] + \"/sendMessage\"\n payload = {\n \"chat_id\": self.telegram[\"chat_id\"],\n \"text\": msg,\n \"parse_mode\": \"MarkdownV2\",\n }\n\n r = postData(url, data=payload)\n\n if r.json().get(\"ok\"):\n print(title + \" 已成功发送到Telegram!\")\n","sub_path":"AR2A/anime.py","file_name":"anime.py","file_ext":"py","file_size_in_byte":4867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"314793192","text":"from flask import Blueprint\r\nfrom flask import render_template\r\nfrom .forms import PostForm\r\nfrom app.models import Post\r\nfrom flask import request, redirect, url_for\r\nfrom app import db\r\nfrom flask_security import login_required\r\n\r\nposts = Blueprint(\"posts\", __name__, template_folder=\"templates\")\r\n\r\n\r\n# http://localhost/blog/create\r\n@login_required\r\n@posts.route(\"/create\", methods=[\"POST\", \"GET\"])\r\ndef create_post():\r\n if request.method == \"POST\":\r\n title = request.form[\"title\"]\r\n body = request.form['body']\r\n if title:\r\n try:\r\n post = Post(title=title, body=body)\r\n db.session.add(post)\r\n db.session.commit()\r\n except:\r\n print(\"Something wrong\")\r\n return redirect(url_for(\"posts.index\"))\r\n form = PostForm()\r\n return render_template('posts/create_post.html', form=form)\r\n\r\n\r\n@login_required\r\n@posts.route(\"//edit\", methods=[\"POST\", \"GET\"])\r\ndef edit_post(slug):\r\n post = Post.query.filter(Post.slug == slug).first()\r\n\r\n if request.method == \"POST\":\r\n form = PostForm(formdata=request.form, obj=post)\r\n form.populate_obj(post)\r\n db.session.commit()\r\n return redirect(url_for(\"posts.post_detail\", slug=post.slug))\r\n\r\n form = PostForm(obj=post)\r\n return render_template(\"posts/edit_post.html\", post=post, form=form)\r\n\r\n\r\n@posts.route(\"/film\")\r\ndef index():\r\n q = request.args.get(\"q\")\r\n if q:\r\n posts = Post.query.filter(Post.title.contains(q) | Post.body.contains(q)).all()\r\n else:\r\n posts = Post.query.order_by(Post.created.desc())\r\n return render_template(\"posts/index.html\", posts=posts)\r\n\r\n\r\n@posts.route(\"/soon\")\r\ndef soon():\r\n q = request.args.get(\"q\")\r\n if q:\r\n postsoon = Post.query.filter(Post.title.contains(q) | Post.body.contains(q)).all()\r\n else:\r\n postsoon = Post.query.order_by(Post.created.desc())\r\n return render_template(\"posts/soon.html\", postsoon=postsoon)\r\n\r\n\r\n@posts.route(\"/\")\r\ndef post_detail(slug):\r\n post = Post.query.filter(Post.slug == slug).first()\r\n return render_template(\"posts/post_detail.html\", post=post)\r\n","sub_path":"blueprint.py","file_name":"blueprint.py","file_ext":"py","file_size_in_byte":2180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"359058131","text":"from config import db\r\nfrom sqlalchemy.sql import func, desc\r\n\r\nuser_likes = db.Table(\"user_likes\",\r\n db.Column(\"user_like\", db.Integer, db.ForeignKey(\"users.user_id\"), primary_key = True),\r\n db.Column(\"idea_liked\", db.Integer, db.ForeignKey(\"ideas.idea_id\"), primary_key = True)\r\n )\r\n\r\nfriends_table = db.Table(\"friends\",\r\n db.Column(\"friender_id\", db.Integer, db.ForeignKey(\"users.user_id\"), primary_key=True),\r\n db.Column(\"friendee_id\", db.Integer, db.ForeignKey(\"users.user_id\"), primary_key=True)\r\n)\r\n\r\nblock_list = db.Table(\"blocked\",\r\n db.Column(\"blocker_id\", db.Integer, db.ForeignKey(\"users.user_id\"), primary_key=True),\r\n db.Column(\"blockee_id\", db.Integer, db.ForeignKey(\"users.user_id\"), primary_key=True)\r\n)\r\n\r\nclass User(db.Model):\r\n __tablename__ = \"users\"\r\n user_id = db.Column(db.Integer, primary_key = True)\r\n f_name = db.Column(db.String(45))\r\n l_name = db.Column(db.String(45))\r\n email = db.Column(db.String(45))\r\n password = db.Column(db.String(255))\r\n admin_status = db.Column(db.Integer)\r\n profile = db.Column(db.String(1600))\r\n created_at = db.Column(db.DateTime, server_default=func.now())\r\n updated_at = db.Column(db.DateTime, server_default=func.now(), onupdate=func.now())\r\n liked_idea = db.relationship(\"Idea\", secondary = \"user_likes\")\r\n friends=db.relationship(\"User\", \r\n secondary=friends_table, \r\n primaryjoin=user_id==friends_table.c.friendee_id, \r\n secondaryjoin=user_id==friends_table.c.friender_id,\r\n backref=\"friending\")\r\n blocked=db.relationship(\"User\", \r\n secondary=block_list, \r\n primaryjoin=user_id==block_list.c.blockee_id, \r\n secondaryjoin=user_id==block_list.c.blocker_id,\r\n backref=\"blocking\") \r\n\r\n def create_admin(self):\r\n admin = User(\r\n f_name= \"Admin\",\r\n l_name= \"Admin\",\r\n email = \"admin\",\r\n admin_status = 1,\r\n password = self)\r\n db.session.add(admin)\r\n db.session.commit() \r\n\r\nclass Idea(db.Model):\r\n __tablename__=\"ideas\"\r\n idea_id = db.Column(db.Integer, primary_key = True)\r\n content = db.Column(db.String(255))\r\n created_at = db.Column(db.DateTime, server_default=func.now())\r\n updated_at = db.Column(db.DateTime, server_default=func.now())\r\n author_id = db.Column(db.Integer, db.ForeignKey(\"users.user_id\"))#, nullable = False)\r\n author = db.relationship(\"User\", foreign_keys = [author_id], backref = \"user_ideas\", cascade = \"all\")\r\n liked_by = db.relationship(\"User\", secondary = \"user_likes\")\r\n \r\n# class Follow(db.Model):\r\n# __tablename__=\"follows\"\r\n# id=db.Column(db.Integer, primary_key=True)\r\n# user_id=db.Column(db.Integer, db.ForeignKey(\"users.user_id\"))\r\n# user=db.relationship(\"User\",backref=\"likes\", cascade=\"all\")\r\n# user_id=db.Column(db.Integer, db.ForeignKey(\"users.user_id\"))\r\n# user=db.relationship(\"User\",backref=\"likes\", cascade=\"all\")\r\n# created_at=db.Column(db.DateTime, server_default=func.now())","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3092,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"336129315","text":"#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# author: bigfoolliu\n\n\n\"\"\"\n给你一个包含 n 个整数的数组 nums,判断 nums 中是否存在三个元素 a,b,c ,使得 a + b + c = 0 ?请你找出所有和为 0 且不重复的三元组。\n\n注意:答案中不可以包含重复的三元组。\n\n \n\n示例 1:\n\n输入:nums = [-1,0,1,2,-1,-4]\n输出:[[-1,-1,2],[-1,0,1]]\n示例 2:\n\n输入:nums = []\n输出:[]\n示例 3:\n\n输入:nums = [0]\n输出:[]\n \n\n提示:\n\n0 <= nums.length <= 3000\n-105 <= nums[i] <= 105\n\n来源:力扣(LeetCode)\n链接:https://leetcode-cn.com/problems/3sum\n著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。\n\"\"\"\n\nfrom typing import List\nimport doctest\n\n\nclass Solution:\n \"\"\"\n >>> s = Solution()\n >>> s.threeSum([])\n []\n >>> s.threeSum([0])\n []\n >>> s.threeSum([-1, 0, 1, 2, -1, -4])\n [[-1, -1, 2], [-1, 0, 1]]\n \"\"\"\n\n def threeSum(self, nums: List[int]) -> List[List[int]]:\n \"\"\"\n 双指针法\n\n 为了避免重复,可以先将数组排序\n \"\"\"\n ret = []\n n = len(nums)\n nums.sort() # 先排序\n\n # 枚举所有的数字\n for first_num in range(n):\n # 如果first_num不是数组的第一个数且下一个数字和当前的数字重复,则舍弃当前的数字使用下一个数字\n if first_num > 0 and nums[first_num] == nums[first_num - 1]:\n continue\n third_num = n - 1 # 第三个数-右指针先指向末尾\n target = -nums[first_num] # 目标值,类似于两数之和\n\n # 第二个数需要从第一个数右边开始遍历来保证顺序\n for second_num in range(first_num + 1, n):\n # 第二个数需要和上一次枚举的数字不同\n if second_num > first_num + 1 and nums[second_num] == nums[second_num - 1]:\n continue\n while second_num < third_num and nums[second_num] + nums[third_num] > target:\n third_num -= 1\n\n # 如果左右指针重合,随着第二个数的增加,将不存在符合要求的第三个数了\n if second_num == third_num:\n break\n\n if nums[second_num] + nums[third_num] == target:\n ret.append([nums[first_num], nums[second_num], nums[third_num]])\n\n return ret\n\n def threeSum1(self, nums: List[int]) -> List[List[int]]:\n \"\"\"\n 暴力解法\n\n 最坏情况:\n 时间复杂度:O(n^3)\n 空间复杂度:O(1)\n\n 超时\n \"\"\"\n if len(nums) < 3:\n return []\n ret = []\n nums.sort() # 先排序\n times = 0\n\n for i in range(len(nums) - 2):\n if nums[i] > 0: # 因为排序之后后面的数字更大\n break\n for j in range(i + 1, len(nums) - 1):\n if nums[i] + nums[j] > 0:\n break\n for k in range(j + 1, len(nums)):\n times += 1\n if nums[i] + nums[j] + nums[k] > 0:\n break\n if nums[i] + nums[j] + nums[k] == 0:\n ret.append([nums[i], nums[j], nums[k]])\n # 将结果去重\n _ret = []\n _dict = {}\n for i in ret:\n if str(i) in _dict:\n continue\n else:\n _dict[str(i)] = 1\n _ret.append(i)\n return _ret\n\n\nif __name__ == '__main__':\n doctest.testmod()\n","sub_path":"algorithms/leetcode/medium/0015_三数之和.py","file_name":"0015_三数之和.py","file_ext":"py","file_size_in_byte":3632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"489735962","text":"#!/usr/bin/env python3\n\nfrom pathlib import Path\nimport sys\nimport cv2\nimport depthai as dai\nimport numpy as np\n\n\n# Get argument first\nmobilenet_path = str((Path(__file__).parent / Path('models/mobilenet.blob')).resolve().absolute())\nif len(sys.argv) > 1:\n mobilenet_path = sys.argv[1]\n\npipeline = dai.Pipeline()\n\ncam = pipeline.createColorCamera()\ncam.setBoardSocket(dai.CameraBoardSocket.RGB)\ncam.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P)\n\nvideoEncoder = pipeline.createVideoEncoder()\nvideoEncoder.setDefaultProfilePreset(1920, 1080, 30, dai.VideoEncoderProperties.Profile.H265_MAIN)\ncam.video.link(videoEncoder.input)\n\nvideoOut = pipeline.createXLinkOut()\nvideoOut.setStreamName('h265')\nvideoEncoder.bitstream.link(videoOut.input)\n\nleft = pipeline.createMonoCamera()\nleft.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P)\nleft.setBoardSocket(dai.CameraBoardSocket.LEFT)\n\nright = pipeline.createMonoCamera()\nright.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P)\nright.setBoardSocket(dai.CameraBoardSocket.RIGHT)\n\ndepth = pipeline.createStereoDepth()\ndepth.setConfidenceThreshold(200)\n# Note: the rectified streams are horizontally mirrored by default\ndepth.setOutputRectified(True)\ndepth.setRectifyEdgeFillColor(0) # Black, to better see the cutout\nleft.out.link(depth.left)\nright.out.link(depth.right)\n\ndetection_nn = pipeline.createNeuralNetwork()\ndetection_nn.setBlobPath(mobilenet_path)\n\nxout_depth = pipeline.createXLinkOut()\nxout_depth.setStreamName(\"depth\")\ndepth.disparity.link(xout_depth.input)\n\nxout_right = pipeline.createXLinkOut()\nxout_right.setStreamName(\"rect_right\")\ndepth.rectifiedRight.link(xout_right.input)\n\nmanip = pipeline.createImageManip()\nmanip.initialConfig.setResize(300, 300)\n# The NN model expects BGR input. By default ImageManip output type would be same as input (gray in this case)\nmanip.initialConfig.setFrameType(dai.RawImgFrame.Type.BGR888p)\ndepth.rectifiedRight.link(manip.inputImage)\nmanip.out.link(detection_nn.input)\n\nxout_manip = pipeline.createXLinkOut()\nxout_manip.setStreamName(\"manip\")\nmanip.out.link(xout_manip.input)\n\nxout_nn = pipeline.createXLinkOut()\nxout_nn.setStreamName(\"nn\")\ndetection_nn.out.link(xout_nn.input)\n\n# MobilenetSSD label texts\ntexts = [\"background\", \"aeroplane\", \"bicycle\", \"bird\", \"boat\", \"bottle\", \"bus\", \"car\", \"cat\", \"chair\", \"cow\",\n \"diningtable\", \"dog\", \"horse\", \"motorbike\", \"person\", \"pottedplant\", \"sheep\", \"sofa\", \"train\", \"tvmonitor\"]\n\n\n# Pipeline defined, now the device is connected to\nwith dai.Device(pipeline) as device:\n # Start pipeline\n device.startPipeline()\n\n q_right = device.getOutputQueue(name=\"rect_right\", maxSize=8, blocking=False)\n q_manip = device.getOutputQueue(name=\"manip\", maxSize=8, blocking=False)\n q_depth = device.getOutputQueue(name=\"depth\", maxSize=8, blocking=False)\n q_nn = device.getOutputQueue(name=\"nn\", maxSize=8, blocking=False)\n q_rgb_enc = device.getOutputQueue(name=\"h265\", maxSize=30, blocking=True)\n\n frame_right = None\n frame_manip = None\n frame_depth = None\n bboxes = []\n labels = []\n\n\n def frame_norm(frame, bbox):\n norm_vals = np.full(len(bbox), frame.shape[0])\n norm_vals[::2] = frame.shape[1]\n return (np.clip(np.array(bbox), 0, 1) * norm_vals).astype(int)\n\n videoFile = open('video.h265','wb')\n\n while True:\n in_right = q_right.tryGet()\n in_manip = q_manip.tryGet()\n in_nn = q_nn.tryGet()\n in_depth = q_depth.tryGet()\n\n while q_rgb_enc.has():\n q_rgb_enc.get().getData().tofile(videoFile)\n\n if in_right is not None:\n shape = (in_right.getHeight(), in_right.getWidth())\n frame_right = in_right.getData().reshape(shape).astype(np.uint8)\n frame_right = np.ascontiguousarray(frame_right)\n\n if in_manip is not None:\n shape = (3, in_manip.getHeight(), in_manip.getWidth())\n frame_manip = in_manip.getData().reshape(shape).transpose(1, 2, 0).astype(np.uint8)\n frame_manip = np.ascontiguousarray(frame_manip)\n\n if in_nn is not None:\n bboxes = np.array(in_nn.getFirstLayerFp16())\n bboxes = bboxes.reshape((bboxes.size // 7, 7))\n bboxes = bboxes[bboxes[:, 2] > 0.5]\n # Cut bboxes and labels\n labels = bboxes[:, 1].astype(int)\n bboxes = bboxes[:, 3:7]\n\n if in_depth is not None:\n frame_depth = in_depth.getData().reshape((in_depth.getHeight(), in_depth.getWidth())).astype(np.uint8)\n frame_depth = np.ascontiguousarray(frame_depth)\n frame_depth = cv2.applyColorMap(frame_depth, cv2.COLORMAP_JET)\n\n if frame_right is not None:\n for raw_bbox, label in zip(bboxes, labels):\n bbox = frame_norm(frame_right, raw_bbox)\n cv2.rectangle(frame_right, (bbox[0], bbox[1]), (bbox[2], bbox[3]), (255, 0, 0), 2)\n cv2.putText(frame_right, texts[label], (bbox[0] + 10, bbox[1] + 20), cv2.FONT_HERSHEY_TRIPLEX, 0.5, 255)\n cv2.imshow(\"rectif_right\", frame_right)\n\n if frame_manip is not None:\n for raw_bbox, label in zip(bboxes, labels):\n bbox = frame_norm(frame_manip, raw_bbox)\n cv2.rectangle(frame_manip, (bbox[0], bbox[1]), (bbox[2], bbox[3]), (255, 0, 0), 2)\n cv2.putText(frame_manip, texts[label], (bbox[0] + 10, bbox[1] + 20), cv2.FONT_HERSHEY_TRIPLEX, 0.5, 255)\n cv2.imshow(\"manip\", frame_manip)\n\n if frame_depth is not None:\n for raw_bbox, label in zip(bboxes, labels):\n bbox = frame_norm(frame_depth, raw_bbox)\n cv2.rectangle(frame_depth, (bbox[0], bbox[1]), (bbox[2], bbox[3]), (0, 0, 255), 2)\n cv2.putText(frame_depth, texts[label], (bbox[0] + 10, bbox[1] + 20), cv2.FONT_HERSHEY_TRIPLEX, 0.5, (0, 0, 255))\n cv2.imshow(\"depth\", frame_depth)\n\n if cv2.waitKey(1) == ord('q'):\n break\n\n videoFile.close()\n\n print(\"To view the encoded data, convert the stream file (.h265) into a video file (.mp4) using a command below:\")\n print(\"ffmpeg -framerate 30 -i video.h265 -c copy video.mp4\")\n","sub_path":"examples/12_rgb_encoding_mono_mobilenet_depth.py","file_name":"12_rgb_encoding_mono_mobilenet_depth.py","file_ext":"py","file_size_in_byte":6216,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"646406200","text":"# Configuration file for the Sphinx documentation builder.\n#\n# This file only contains a selection of the most common options. For a full\n# list see the documentation:\n# https://www.sphinx-doc.org/en/master/usage/configuration.html\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\nimport os\nimport sys\nsys.path.insert(0, os.path.abspath('../../'))\n\nautodoc_mock_imports = [\"numpy\", \"pandas\"]\n\n\n# -- Project information -----------------------------------------------------\n\nproject = 'AeroEvap'\ncopyright = '2019-2021, Chris Pearson and John Volk'\nauthor = 'Chris Pearson and John Volk'\n\n# The short X.Y version\nversion = ''\n# The full version, including alpha/beta/rc tags\nrelease = ''\n\ntry:\n from aeroevap import __version__ as version\nexcept ImportError:\n pass\nelse:\n release = version\n\n# -- General configuration ---------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.autosectionlabel',\n 'sphinx.ext.doctest',\n 'sphinx.ext.intersphinx',\n 'sphinx.ext.coverage',\n 'sphinx.ext.mathjax',\n 'sphinx.ext.napoleon',\n 'sphinx.ext.todo',\n 'sphinx.ext.ifconfig',\n 'sphinx.ext.viewcode',\n 'sphinx.ext.githubpages',\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\nsource_suffix = '.rst'\nmaster_doc = 'index'\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = []\nadd_function_parentheses = False\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = 'sphinx_rtd_theme'\nhtml_theme_path = [\"_themes\", ]\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\nhtml_favicon = '_static/favicon.ico'\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'AeroEvapdoc'\n\n# -- Options for LaTeX output ------------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n #\n # 'papersize': 'letterpaper',\n\n # The font size ('10pt', '11pt' or '12pt').\n #\n # 'pointsize': '10pt',\n\n # Additional stuff for the LaTeX preamble.\n #\n # 'preamble': '',\n\n # Latex figure (float) alignment\n #\n # 'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (master_doc, 'AeroEvap.tex', 'AeroEvap Documentation',\n 'Chris Pearson and John Volk', 'manual'),\n]\n\n\n# -- Options for manual page output ------------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n (master_doc, 'AeroEvap', 'AeroEvap Documentation',\n [author], 1)\n]\n\n\n\n# -- Options for Texinfo output ----------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (master_doc, 'AeroEvap', 'AeroEvap Documentation',\n author, 'AeroEvap', \n 'Calculate evaporation using the aerodynamic mass-transfer approach.',\n 'Miscellaneous'),\n]\n\n\n\n# -- Options for intersphinx extension ---------------------------------------\n\n# Example configuration for intersphinx: refer to the Python standard library.\nintersphinx_mapping = {\n 'sphinx': ('http://www.sphinx-doc.org/en/stable', None),\n 'python': ('https://docs.python.org/3', None),\n 'numpy': ('https://docs.scipy.org/doc/numpy', None),\n 'pandas': ('http://pandas.pydata.org/pandas-docs/stable', None),\n}\n\nhtml_css_files = [\n 'custom.css',\n]\nhtml_js_files = [\n 'copybutton.js',\n 'custom.js',\n 'https://cdn.jsdelivr.net/npm/clipboard@1/dist/clipboard.min.js'\n]\n\n","sub_path":"docs/source/conf.py","file_name":"conf.py","file_ext":"py","file_size_in_byte":4686,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"75451472","text":"import numpy as np\nimport gym\nfrom dnc.dnc import DNC\nimport torch\nimport torch.optim as optim\nimport torch.nn as nn\n\n\ngamma = 0.99 # discount factor for reward\n\n\ndef repackage_hidden(h):\n \"\"\"Wraps hidden states in new Tensors,\n to detach them from their history.\"\"\"\n if isinstance(h, torch.Tensor):\n return h.detach()\n elif isinstance(h, (list, )):\n return [repackage_hidden(v) for v in h]\n else:\n return tuple(repackage_hidden(v) for v in h)\n\n\ndef repackage_hidden_dnc(h):\n if h is None:\n return None\n\n (chx, mhxs, _) = h\n chx = repackage_hidden(chx)\n if type(mhxs) is list:\n mhxs = [dict([(k, repackage_hidden(v)) for k, v in mhx.items()]) for mhx in mhxs]\n else:\n mhxs = dict([(k, repackage_hidden(v)) for k, v in mhxs.items()])\n return (chx, mhxs, None)\n\n\ndef discount_rewards(r):\n \"\"\" take 1D float array of rewards and compute discounted reward \"\"\"\n discounted_r = np.zeros_like(r)\n running_add = 0\n for t in reversed(range(0, r.size)):\n running_add = running_add * gamma + r[t]\n discounted_r[t] = running_add\n return discounted_r\n\n\nenv = gym.make('CartPole-v0')\ninput_size = 4\npolicy = DNC(4,200,gpu_id=0, output_size=1)\nhidden = None\nobservation = env.reset()\nepisode_number = 0\nreward_sum = 0\nreset = False\ndone_reward_stack, x_stack, y_stack, done_stack = [], [], [], []\n\noptimizer = optim.Adam(policy.parameters(), lr=0.0001, eps=1e-9, betas=[0.9, 0.98]) # 0.0001\nwhile episode_number <= 5000:\n env.render()\n x = torch.from_numpy(np.reshape(observation,[1,4])).unsqueeze(1)\n x = x.type(torch.FloatTensor)\n x = x.cuda()\n hidden = repackage_hidden_dnc(hidden)\n left_prob, hidden = policy(x, hidden, reset_experience=reset)\n reset = False\n action = 1 if np.random.uniform() < left_prob.item() else 0\n # record various intermediates (needed later for backprop)\n x_stack.append(x)\n y = 1 if action == 0 else 0\n y_stack.append(y)\n observation, reward, done, info = env.step(action)\n reward_sum += reward\n done_stack.append(done * 1)\n done_reward_stack.append(reward) # record reward (has to be done after we call step() to get reward for previous action)\n\n if done:\n episode_number += 1\n observation = env.reset()\n reset = True\n\n # stack together all inputs, hidden states, action gradients, and rewards for this episode\n epx = np.vstack(x_stack)\n epy = np.vstack(y_stack)\n epr = np.vstack(done_reward_stack)\n epd = np.vstack(done_stack)\n x_stack, done_reward_stack, y_stack, done_stack = [], [], [], [] # reset array memory\n discounted_epr = discount_rewards(epr).astype('float32')\n discounted_epr -= np.mean(discounted_epr)\n discounted_epr /= np.std(discounted_epr)","sub_path":"recurrent/myrl.py","file_name":"myrl.py","file_ext":"py","file_size_in_byte":2815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"606409157","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat May 9 2020\n\n@authors: Romane GALLIENNE, Cindy PEREIRA\n\"\"\"\n\nfrom Fonctions import createDico\nfrom Fonctions import readFile\nimport os\nimport pickle\nfrom sys import argv\n\n\"\"\"\nFichier permettant de vectoriser les corpus tests\nNE PAS LANCER, LES VARIABLES SONT DEJA SAUVEGARDEES\n\"\"\"\n\n# Récupère chaque texte de toutes les langues\ndef recuperationTextesCorpus(langue) :\n path = os.getcwd()\n directory = os.path.abspath(os.path.join(path, os.pardir)) + '/Corpus/' + langue\n txt = \"\"\n for fileName in os.listdir(directory) :\n txt += readFile(directory + \"/\" + fileName)\n return txt\n\n\n# Crée le dictionnaire n-gramme de chaque langue et le sérialise\ndef recuperationDico(langue, n) :\n text = recuperationTextesCorpus(langue)\n dico = createDico(text, n)\n fileName = os.getcwd() + '/variables/' + langue + 'Dico' + str(n) + '.pkl'\n f = open(fileName, 'wb')\n pickle.dump(dico, f)\n f.close()\n\n\nn = 2\n# Si l'utilisateur a entré un n, on le récupère, sinon on le fixe à 2\nif len(argv) > 1 :\n n = int(argv[1])\n\nrecuperationDico(\"anglais\", n)\nrecuperationDico(\"allemand\", n)\nrecuperationDico(\"espagnol\", n)\nrecuperationDico(\"francais\", n)\nrecuperationDico(\"portugais\", n)\n","sub_path":"src/VectorisationCorpus.py","file_name":"VectorisationCorpus.py","file_ext":"py","file_size_in_byte":1273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"577035052","text":"import socket\nimport threading\nimport queue\nimport time\nimport pickle\n\nclients = []\nmessages = queue.Queue()\n\nhost = \"127.0.0.1\"\n\nport = 9000\n\nconnection = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nconnection.bind((host, port))\nconnection.listen(5)\n\ndef acceptConnections():\n while True:\n conn, addr = connection.accept()\n client_dict = {\"NICKNAME\": \"\", \"CONNECTION_TS\": time.time(), \"CLIENT\": conn, \"ROOM\": \"\"}\n clients.append(client_dict)\n print(\"New Client Connected: \", addr)\n t = threading.Thread(target=client_thread, args=(conn,))\n t.start()\n\n\ndef broadcast_messages():\n while True:\n msg = messages.get()\n data = pickle.dumps(msg)\n for c in clients:\n if c[\"ROOM\"] == msg[\"room\"]:\n try:\n c[\"CLIENT\"].send(data)\n print(msg)\n except:\n c[\"CLIENT\"].close()\n if c in clients:\n clients.remove(c)\n\n\ndef client_thread(conn):\n while True:\n try:\n data = conn.recv(1024)\n msg = pickle.loads(data)\n print(\"New message \", msg)\n\n if msg[\"action\"] == \"reg\":\n file = open(\"users.txt\", \"a\")\n file.write(msg[\"username\"] + \" \" + msg[\"nickname\"] + \" \" + msg[\"password\"] + \"\\n\")\n file.close()\n\n response_msg = {\"action\":\"reg_success\"}\n response_data= pickle.dumps(response_msg)\n conn.send(response_data)\n elif msg[\"action\"] == \"msg\":\n messages.put(msg)\n elif msg[\"action\"] == \"join_room\": #if statement for at joine et rum\n for client_index in range(clients.__len__()):\n if clients[client_index][\"CLIENT\"] == conn:\n messages.put({\"action\": \"leave_room\", \"room\": clients[client_index][\"ROOM\"], \"nickname\": msg[\"nickname\"]})\n clients[client_index][\"ROOM\"] = msg[\"room\"]\n messages.put(msg)\n elif msg[\"action\"] == \"login\":\n file = open(\"users.txt\", \"r\")\n users = file.read().splitlines()\n login_success = False\n for line_index in range(users.__len__()):\n temp_user = users[line_index].split(\" \")\n if temp_user[0] == msg[\"username\"] and temp_user[2] == msg[\"password\"]:\n login_msg = pickle.dumps({\"action\":\"msg\", \"msg\":temp_user[1] + \" has joined Hackers Paradise\"})\n for client_index in range(clients.__len__()):\n if clients[client_index][\"CLIENT\"] == conn:\n clients[client_index][\"ROOM\"] = \"Black Hats\"\n clients[client_index][\"NICKNAME\"] = temp_user[1]\n else:\n # besked til alle clients undtagen den som prøver at logge ind om at en ny client er logget in\n if clients[client_index][\"ROOM\"] != \"\":\n clients[client_index][\"CLIENT\"].send(login_msg)\n response_data = pickle.dumps({\"action\":\"login_success\", \"nickname\":temp_user[1]})\n conn.send(response_data)\n login_success = True\n\n if login_success == False:\n response_data = pickle.dumps({\"action\": \"login_failed\"})\n conn.send(response_data)\n elif msg[\"action\"] == \"logout\":\n for client_index in range(clients.__len__()):\n if clients[client_index][\"CLIENT\"] == conn:\n clients[client_index][\"ROOM\"] = \"Black Hats\"\n clients[client_index][\"NICKNAME\"] = \"\"\n messages.put(msg)\n print(\"New message \", msg)\n except:\n continue\n\nthreading.Thread(target=broadcast_messages).start()\nacceptConnections()","sub_path":"Server.py","file_name":"Server.py","file_ext":"py","file_size_in_byte":4062,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"582245624","text":"from wxpy import *\nimport threading\nbot = Bot(True)\n\n@bot.register()\ndef send_to_file(msgs):\n\tfile_data = \"data.txt\"\n\twith open(file_data, \"w\") as f:\n\t\tfor msg in msgs:\n\t\t\tif msg.type == \"Sharing\":\n\t\t\t\tprint(msg)\n\t\t\t\tprint(msg.url)\n\t\t\t\tf.write(msg.url+'\\n')\n\t\tf.close()\n\ndef repeat():\n\n\tprint(\"Running!\")\n\tsend_to_file(bot.messages)\n\ttimer = threading.Timer(10, repeat)\n\ttimer.start()\n\nrepeat()\n\n\n","sub_path":"微信机器人/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"583720392","text":"# coding: utf-8\n\nfrom __future__ import division\n\nimport copy\nimport sys\nimport types\n\nfrom exercise_recommendation.envs import *\nfrom exercise_recommendation.policies import *\nfrom exercise_recommendation.tutors import *\nfrom rllab.baselines.linear_feature_baseline import LinearFeatureBaseline\nfrom rllab.policies.categorical_gru_policy import CategoricalGRUPolicy\n\n\ndef make_rl_student_env(env):\n \"\"\"\n\n Args:\n env:\n\n Returns:\n\n \"\"\"\n env = copy.deepcopy(env)\n\n env.n_item_feats = int(np.log(2 * env.n_items))\n\n env.item_feats = np.random.normal(\n np.zeros(2 * env.n_items * env.n_item_feats),\n np.ones(2 * env.n_items * env.n_item_feats)).reshape((2 * env.n_items, env.n_item_feats))\n\n env.observation_space = spaces.Box(\n np.concatenate((np.ones(env.n_item_feats) * -sys.maxsize, np.zeros(1))),\n np.concatenate((np.ones(env.n_item_feats) * sys.maxsize, np.ones(1)))\n )\n\n def encode_item(self, item, outcome):\n return self.item_feats[self.n_items * outcome + item, :]\n\n def vectorize_obs(self, item, outcome):\n return np.concatenate((self.encode_item(item, outcome), np.array([outcome])))\n # return self.encode_item(item, outcome)\n\n env._obs_orig = env._obs\n\n def _obs(self):\n item, outcome = env._obs_orig()\n\n return self.vectorize_obs(item, outcome)\n\n env.encode_item = types.MethodType(encode_item, env)\n env.vectorize_obs = types.MethodType(vectorize_obs, env)\n env._obs = types.MethodType(_obs, env)\n\n return env\n\n\ndef all_reset(agent):\n \"\"\"\n Reset policy and student model for recommendation (when the agent recommends to a new student)\n \"\"\"\n agent.raw_policy.env.env.recomreset()\n agent.raw_policy.policy.reset()\n\n return agent\n\n\ndef simulation(agent, trace, steps):\n \"\"\"\n Simulate the recommendation given the student history exercise trace\n :param agent: recommendation policy\n :param trace: student history exercise trace\n :param steps: the number of exercises recommended to the student\n :return: recommended exercises and his predicted knowledge status\n \"\"\"\n recom_trace = []\n a2i = dict(zip(candidate_exercises, range(len(candidate_exercises))))\n trace = [(a2i[i[0]], i[1]) for i in trace]\n for q, a in trace:\n obs = agent.raw_policy.env.env.vectorize_obs(q, a)\n recomq = agent.guide(obs)\n\n res = []\n for t in range(steps):\n prob = agent.raw_policy.env.env.predict(candidate_exercises[recomq])\n answer = 1 if np.random.random() < prob else 0\n\n # obs = agent.raw_policy.env.env.vectorize_obs(recomq, answer)\n recom_trace.append((recomq, answer))\n obs = agent.raw_policy.env.env.actualStep(recomq, answer)\n res.append(np.mean(list(map(agent.raw_policy.env.env.predict, candidate_exercises))))\n recomq = agent.guide(obs)\n\n return recom_trace, res\n\n\ndef evaluation(agent):\n \"\"\"\n Evaluate the policy when it recommend exercises to different student\n student_traces:[[(923, 1), (175, 0), (1010, 1), (857, 0), (447, 0)], [........], [.........]]\n :param agent:\n :return: different students'predicted knowledge status\n \"\"\"\n # with open('./好未来数据/student_traces.', 'rb') as f:\n # student_traces = pickle.load(f)\n allre = [[] for i in range(50)]\n for trace in student_traces:\n agent = all_reset(agent)\n t, res = simulation(agent, trace, 50)\n print(\"Preporuceni put: \" + str(t))\n for j in range(50):\n allre[j].append(res[j])\n result = [np.mean(k) for k in allre]\n return result\n\n\n# def run_eps(agent, env, n_eps=100):\n# tot_rew = []\n# for i in range(n_eps):\n# totalr, _ = run_ep(agent, env)\n# tot_rew.append(totalr)\n# return tot_rew\n\n\n# student_traces = [[(1, 0), (3, 1)], [(6, 1), (6, 0), (7, 1)]]\n# stu = [[(51424, 0), (51435, 1),(51444, 1)]]\nstu = [[(85829, 0), (85838, 1)]]\n\n# the parameters of trained DKVMN-CA model\nwith open('old/checkpoint/skill_builder0_10batch_2epochs/kt_params', 'rb') as f:\n # with open('old/checkpoint/skill_builder0_10batch_2epochs/kt_params', 'rb') as f:\n params = pickle.load(f)\n\n# Knowledge Concepts Corresponding to the exercise\nwith open('data/skill_builder/chunk_exercise_concepts_mapping.pkl', 'rb') as f:\n e2c = pickle.load(f)\n\nwith open('data/skill_builder/chunk_exercises_id_converter.pkl', 'rb') as f:\n exercises_id_converter = pickle.load(f)\n\n# cands=[51424,51435,51444,51395,51481]\ncands = [85829, 61089, 85814, 85838]\n\ncandidate_exercises = [exercises_id_converter[e] for e in cands]\nstudent_traces = [[(exercises_id_converter[e], a) for e, a in t] for t in stu]\n\n# current problems:\n# key error?\n\nConcepts = 9 # number of concepts\nNumQ = 2446 # number of exercises\n# Concepts = 123 # number of concepts\n# NumQ = 17751 # number of exercises\nn_steps = 5 # number of steps of algorithm\nn_items = len(candidate_exercises) # number of candidate exercises\n# n_items = [len(candidate_exercises[i]) for i in candidate_exercises]\ndiscount = 0.99\nn_eps = 1 # number of epochs in algorithm\n\nreward_funcs = ['likelihood']\nenvs = [\n ('DKVMN', DKVEnv)\n]\n\ntutor_builders = [\n ('RL', RLTutor)\n]\n\nenv_kwargs = {\n 'n_items': n_items, 'n_steps': n_steps, 'discount': discount\n}\n\n# env = DKVEnv(**env_kwargs, reward_func='likelihood')\n# rl_env = make_rl_student_env(env)\n# agent = RLTutor(n_items)\n# reward = agent.train(rl_env, n_eps=n_eps)\n# print(evaluation(agent))\n\n\nenv = DKVEnv(**env_kwargs, reward_func='likelihood')\n\nrl_env = MyGymEnv(make_rl_student_env(env))\n\npolicy = CategoricalGRUPolicy(\n env_spec=rl_env.spec, hidden_dim=32,\n state_include_action=False)\nraw_policy = LoggedTRPO(\n env=rl_env,\n policy=policy,\n baseline=LinearFeatureBaseline(env_spec=rl_env.spec),\n batch_size=4000,\n max_path_length=rl_env.env.n_steps,\n n_itr=n_eps,\n discount=0.99,\n step_size=0.01,\n verbose=False\n)\n\nagent = RLTutor(rl_env=rl_env, raw_policy=raw_policy)\n\nreward = agent.train()\nprint(evaluation(agent))\n\n# # student_traces = [[(1, 0), (3, 1)], [(6, 1), (6, 0), (7, 1)]]\n# # stu = [[(51424, 0), (51435, 1),(51444, 1)]]\n# # stu = [[(85829, 0),(85838, 1)]]\n# # za biologiju\n# stu = [[(1, 0), (27, 1)]]\n#\n# # the parameters of trained DKVMN-CA model\n# # with open('old/checkpoint/skill_builder0_10batch_2epochs/kt_params', 'rb') as f:\n# with open('/home/zvonimir/Exercise-Recommendation-System/checkpoint/biology30_32batch_1epochs/kt_params', 'rb') as f:\n# params = pickle.load(f)\n#\n# # Knowledge Concepts Corresponding to the exercise\n# # with open('data/skill_builder/chunk_exercise_concepts_mapping.pkl', 'rb') as f:\n# with open('/home/zvonimir/Exercise-Recommendation-System/data/biology30/chunk_exercise_concepts_mapping.pkl',\n# 'rb') as f:\n# e2c = pickle.load(f)\n#\n# # with open('data/skill_builder/chunk_exercises_id_converter.pkl', 'rb') as f:\n# with open('/home/zvonimir/Exercise-Recommendation-System/data/biology30/chunk_exercises_id_converter.pkl', 'rb') as f:\n# exercises_id_converter = pickle.load(f)\n#\n# # cands=[51424,51435,51444,51395,51481]\n# # cands=[85829,61089,85814,85838]\n# cands = [1, 15, 16, 27]\n#\n# candidate_exercises = [exercises_id_converter[e] for e in cands]\n# student_traces = [[(exercises_id_converter[e], a) for e, a in t] for t in stu]\n#\n# # current problems:\n# # key error?\n#\n# Concepts = 5 # number of concepts\n# NumQ = 30 # number of exercises\n# # Concepts = 9 # number of concepts\n# # NumQ = 2446 # number of exercises\n# # Concepts = 123 # number of concepts\n# # NumQ = 17751 # number of exercises\n# n_steps = 5 # number of steps of algorithm\n# n_items = len(candidate_exercises) # number of candidate exercises\n# # n_items = [len(candidate_exercises[i]) for i in candidate_exercises]\n# discount = 0.99\n# n_eps = 1 # number of epochs in algorithm\n#\n# # reward_funcs = ['likelihood']\n# # envs = [\n# # ('DKVMN', DKVEnv)\n# # ]\n# #\n# # tutor_builders = [\n# # ('RL', RLTutor)\n# # ]\n#\n# env_kwargs = {\n# 'n_items': n_items, 'n_steps': n_steps, 'discount': discount\n# }\n#\n# env = DKVEnv(**env_kwargs, reward_func='likelihood')\n#\n# rl_env = MyGymEnv(make_rl_student_env(env))\n#\n# policy = CategoricalGRUPolicy(\n# env_spec=rl_env.spec, hidden_dim=32,\n# state_include_action=False)\n# raw_policy = LoggedTRPO(\n# env=rl_env,\n# policy=policy,\n# baseline=LinearFeatureBaseline(env_spec=rl_env.spec),\n# batch_size=4000,\n# max_path_length=rl_env.env.n_steps,\n# n_itr=n_eps,\n# discount=0.99,\n# step_size=0.01,\n# verbose=False\n# )\n#\n# agent = RLTutor(n_items=n_items, rl_env=rl_env, raw_policy=raw_policy)\n#\n# reward = agent.train()\n# print(evaluation(agent))\n# outList = evaluation(agent)\n#\n# # if __name__=='main':\n# # main()\n","sub_path":"newest_rs.py","file_name":"newest_rs.py","file_ext":"py","file_size_in_byte":8781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"397053530","text":"\nfrom io import StringIO\n\nimport os\nfrom os import path\nimport sys\nfrom pprint import pprint\nimport time\nimport re\nimport traceback\n\nimport bpy\n\ntry:\n import chess.pgn\nexcept Exception as e:\n print('chess module missing (pip install python-chess?)')\n traceback.print_exc()\n sys.exit(1)\n\n\nSQUARE_SIZE = 3.0\n\n\n\ndef chess_to_coordinates(row, col, z):\n x_map = {'a': 0., 'b': 1., 'c': 2., 'd': 3., 'e': 4., 'f': 5., 'g': 6., 'h': 7.}\n y_map = {'1': 0., '2': 1., '3': 2., '4': 3., '5': 4., '6': 5., '7': 6., '8': 7.}\n \n return (x_map[row] + 0.5) * SQUARE_SIZE, (y_map[col] + 0.5) * SQUARE_SIZE, z\n\n\ndef clean():\n for action in bpy.data.actions:\n if action.users == 0:\n bpy.data.actions.remove(action)\n for mesh in bpy.data.meshes:\n if mesh.users == 0:\n bpy.data.meshes.remove(mesh)\n\ndef initial_setup():\n clean()\n \n bpy.context.scene.layers[0] = True\n\n \n # remove stuff\n bpy.ops.object.select_all(action='DESELECT')\n for obj in filter(lambda x: not x.name.startswith('template_'), bpy.data.objects):\n obj.select = True\n bpy.ops.object.delete()\n\n \n bpy.context.scene.frame_set(1)\n bpy.context.scene.frame_end = 2000\n \n\n #bpy.ops.rigidbody.world_add()\n \n \n board_map = {}\n # PAWNS\n z = 1.35288\n for idx1, col in enumerate(\"abcdefgh\"):\n for idx2, row in enumerate(\"27\"):\n src_obj = bpy.context.scene.objects['template_pawn']\n\n new_obj = src_obj.copy()\n new_obj.data = src_obj.data.copy()\n new_obj.animation_data_clear()\n new_obj.name = 'pawn.' + col + row\n \n bpy.context.scene.objects.link(new_obj)\n \n board_map[col + row] = new_obj\n new_obj.location = chess_to_coordinates(col, row, z)\n new_obj.keyframe_insert(data_path='location')\n \n # physics\n bpy.context.scene.rigidbody_world.group.objects.link(new_obj)\n\n # ROOKS\n z = 1.46252\n for idx1, col in enumerate(\"ah\"):\n for idx2, row in enumerate(\"18\"):\n src_obj = bpy.context.scene.objects['template_rook']\n\n new_obj = src_obj.copy()\n new_obj.data = src_obj.data.copy()\n new_obj.animation_data_clear()\n new_obj.name = 'rook.' + col + row\n \n bpy.context.scene.objects.link(new_obj)\n \n board_map[col + row] = new_obj\n new_obj.location = chess_to_coordinates(col, row, z)\n new_obj.keyframe_insert(data_path='location')\n bpy.context.scene.rigidbody_world.group.objects.link(new_obj)\n # KNIGHTS\n z = 1.\n for idx1, col in enumerate(\"bg\"):\n for idx2, row in enumerate(\"18\"):\n src_obj = bpy.context.scene.objects['template_knight']\n\n new_obj = src_obj.copy()\n new_obj.data = src_obj.data.copy()\n new_obj.animation_data_clear()\n new_obj.name = 'knight.' + col + row\n \n bpy.context.scene.objects.link(new_obj)\n \n board_map[col + row] = new_obj\n new_obj.location = chess_to_coordinates(col, row, z)\n new_obj.keyframe_insert(data_path='location')\n bpy.context.scene.rigidbody_world.group.objects.link(new_obj)\n # BISHOPS\n z = 1.7937\n for idx1, col in enumerate(\"cf\"):\n for idx2, row in enumerate(\"18\"):\n src_obj = bpy.context.scene.objects['template_bishop']\n\n new_obj = src_obj.copy()\n new_obj.data = src_obj.data.copy()\n new_obj.animation_data_clear()\n new_obj.name = 'bishop.' + col + row\n \n bpy.context.scene.objects.link(new_obj)\n \n board_map[col + row] = new_obj\n new_obj.location = chess_to_coordinates(col, row, z)\n new_obj.keyframe_insert(data_path='location')\n bpy.context.scene.rigidbody_world.group.objects.link(new_obj)\n # QUEENS\n z = 2.0401\n for idx1, col in enumerate(\"d\"):\n for idx2, row in enumerate(\"18\"):\n src_obj = bpy.context.scene.objects['template_queen']\n\n new_obj = src_obj.copy()\n new_obj.data = src_obj.data.copy()\n new_obj.animation_data_clear()\n new_obj.name = 'queen.' + col + row\n \n bpy.context.scene.objects.link(new_obj)\n \n board_map[col + row] = new_obj\n new_obj.location = chess_to_coordinates(col, row, z)\n new_obj.keyframe_insert(data_path='location')\n bpy.context.scene.rigidbody_world.group.objects.link(new_obj)\n # KINGS\n z = 2.32912\n for idx1, col in enumerate(\"e\"):\n for idx2, row in enumerate(\"18\"):\n src_obj = bpy.context.scene.objects['template_king']\n\n new_obj = src_obj.copy()\n new_obj.data = src_obj.data.copy()\n new_obj.animation_data_clear()\n new_obj.name = 'king.' + col + row\n \n bpy.context.scene.objects.link(new_obj)\n \n board_map[col + row] = new_obj\n new_obj.location = chess_to_coordinates(col, row, z)\n new_obj.keyframe_insert(data_path='location')\n bpy.context.scene.rigidbody_world.group.objects.link(new_obj)\n \n # BOARD \n bpy.ops.mesh.primitive_plane_add(view_align=False, enter_editmode=False, location=(0, 0, 0), layers=(True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False))\n bpy.context.selected_objects[0].name = 'ground'\n \n bpy.context.object.scale[1] = 4 * SQUARE_SIZE\n bpy.context.object.scale[0] = 4 * SQUARE_SIZE\n bpy.context.object.location[0] = 4 * SQUARE_SIZE\n bpy.context.object.location[1] = 4 * SQUARE_SIZE\n\n bpy.context.scene.rigidbody_world.group.objects.link(bpy.data.objects['ground'])\n\n # TODO: create checker texture\n checker_mat = bpy.data.materials.get('checker')\n bpy.data.objects['ground'].data.materials.append(checker_mat)\n\n bpy.context.scene.frame_set(2)\n bpy.context.scene.frame_set(3)\n bpy.context.scene.frame_set(1)\n \n bpy.data.objects['ground'].rigid_body.kinematic = True\n for piece in board_map.values():\n piece.rigid_body.kinematic = True\n\n\n return board_map\n \n\n\ndef load_pgn(pgn_path):\n print(\"Loading PGN \" + str(pgn_path))\n try:\n with open(pgn_path) as pgn_file:\n game = chess.pgn.read_game(pgn_file)\n except Exception as e:\n print(\"Load PGN failed\")\n traceback.print_exc()\n sys.exit(1)\n \n \n return game\n \n\ndef play(board_map, game, frames_per_move, n_fragments):\n start_time = time.time()\n\n board = game.board()\n for move_number, move in enumerate(game.main_line()):\n from_square = move.uci()[0:2]\n to_square = move.uci()[2:4]\n \n is_capture = board.is_capture(move)\n is_castling = board.is_castling(move)\n is_kingside_castling = board.is_kingside_castling(move)\n is_queenside_castling = board.is_queenside_castling(move)\n is_en_passant = board.is_en_passant(move)\n promotion = move.promotion\n if promotion:\n promoted_piece = chess.PIECE_NAMES[promotion]\n \n print('{}: {}, cap: {}, castl: {}'.format((move_number // 2) + 1, move, is_capture, is_castling))\n\n\n if is_castling:\n king = board_map[from_square]\n \n if to_square == 'g1':\n rook_from = 'h1'\n rook_dest = 'f1'\n elif to_square == 'c1':\n rook_from = 'a1'\n rook_dest = 'd1'\n elif to_square == 'g8':\n rook_from = 'h8'\n rook_dest = 'f8'\n elif to_square == 'c8':\n rook_from = 'a8'\n rook_dest = 'd8'\n rook = board_map[rook_from]\n \n # insert keyframes\n king.keyframe_insert(data_path='location')\n rook.keyframe_insert(data_path='location')\n \n bpy.context.scene.frame_set(bpy.context.scene.frame_current + frames_per_move)\n \n # move king\n king.location = chess_to_coordinates(to_square[0], to_square[1], king.location.z)\n king.keyframe_insert(data_path='location')\n \n # move rook\n rook.location = chess_to_coordinates(rook_dest[0], rook_dest[1], rook.location.z)\n rook.keyframe_insert(data_path='location')\n \n # update board\n board_map.pop(from_square)\n board_map.pop(rook_from)\n \n board_map[to_square] = king\n board_map[rook_dest] = rook\n \n \n elif is_capture:\n # keyframe for previous position\n board_map[from_square].keyframe_insert(data_path='location')\n board_map[to_square].keyframe_insert('rigid_body.kinematic')\n \n bpy.context.scene.frame_set(bpy.context.scene.frame_current + 1)\n board_map[to_square].rigid_body.kinematic = False\n board_map[to_square].keyframe_insert('rigid_body.kinematic')\n bpy.context.scene.frame_set(bpy.context.scene.frame_current + -1)\n \n bpy.ops.object.select_all(action='DESELECT')\n board_map[to_square].select = True\n bpy.ops.object.add_fracture_cell_objects(source_limit=n_fragments)\n \n for obj in filter(lambda x: x.name.startswith(board_map[to_square].name + '_cell'), bpy.data.objects):\n bpy.context.scene.rigidbody_world.group.objects.link(obj)\n\n this_frame = bpy.context.scene.frame_current\n bpy.context.scene.frame_set(1)\n bpy.context.scene.frame_set(2)\n bpy.context.scene.frame_set(this_frame)\n \n for obj in filter(lambda x: x.name.startswith(board_map[to_square].name + '_cell'), bpy.data.objects):\n obj.rigid_body.kinematic = True\n obj.keyframe_insert('rigid_body.kinematic')\n\n # disable old piece\n for obj in bpy.data.objects:\n obj.select = False\n board_map[to_square].select = True\n bpy.context.scene.frame_set(0)\n board_map[to_square].rigid_body.collision_groups[0] = True\n board_map[to_square].keyframe_insert('rigid_body.collision_groups')\n board_map[to_square].hide = False\n board_map[to_square].keyframe_insert('hide')\n board_map[to_square].hide_render = False\n board_map[to_square].keyframe_insert('hide_render')\n \n bpy.context.scene.frame_set(this_frame - 1)\n board_map[to_square].rigid_body.collision_groups[0] = False\n board_map[to_square].keyframe_insert('rigid_body.collision_groups')\n board_map[to_square].hide = True\n board_map[to_square].keyframe_insert('hide')\n board_map[to_square].hide_render = True\n board_map[to_square].keyframe_insert('hide_render')\n \n \n \n # enable rigid body for cells\n bpy.context.scene.frame_set(this_frame - 1)\n for obj in filter(lambda x: x.name.startswith(board_map[to_square].name + '_cell'), bpy.data.objects):\n obj.rigid_body.kinematic = True\n obj.keyframe_insert('rigid_body.kinematic')\n obj.rigid_body.collision_groups[0] = False\n obj.keyframe_insert('rigid_body.collision_groups')\n \n bpy.context.scene.frame_set(this_frame)\n for obj in filter(lambda x: x.name.startswith(board_map[to_square].name + '_cell'), bpy.data.objects):\n obj.rigid_body.kinematic = False\n obj.keyframe_insert('rigid_body.kinematic')\n obj.rigid_body.collision_groups[0] = True\n obj.keyframe_insert('rigid_body.collision_groups')\n \n bpy.context.scene.frame_set(0)\n for obj in filter(lambda x: x.name.startswith(board_map[to_square].name + '_cell'), bpy.data.objects):\n obj.hide = True\n obj.keyframe_insert('hide')\n obj.hide_render = True\n obj.keyframe_insert('hide_render')\n bpy.context.scene.frame_set(this_frame - 1)\n for obj in filter(lambda x: x.name.startswith(board_map[to_square].name + '_cell'), bpy.data.objects):\n obj.hide = False\n obj.keyframe_insert('hide')\n obj.hide_render = False\n obj.keyframe_insert('hide_render')\n \n \n \n # timestep\n bpy.context.scene.frame_set(this_frame + frames_per_move)\n \n # move piece\n board_map[from_square].location = chess_to_coordinates(to_square[0], to_square[1], board_map[from_square].location.z)\n board_map[from_square].keyframe_insert(data_path='location')\n \n \n \n \n # actually play the move\n board_map[to_square] = board_map[from_square]\n board_map.pop(from_square)\n\n else:\n # simple move\n # keyframe for previous position\n board_map[from_square].keyframe_insert(data_path='location')\n \n # timestep\n bpy.context.scene.frame_set(bpy.context.scene.frame_current + frames_per_move)\n \n # move piece\n board_map[from_square].location = chess_to_coordinates(to_square[0], to_square[1], board_map[from_square].location.z)\n board_map[from_square].keyframe_insert(data_path='location')\n \n # actually play the move\n board_map[to_square] = board_map[from_square]\n board_map.pop(from_square)\n\n # update the board\n board.push(move)\n \n if 'CHESS_FRACTURE_TEST' in os.environ and move_number > 10:\n print('Early exit because CHESS_FRACTURE_TEST is defined')\n break\n # end for moves\n \n # assign materials\n white_mat = bpy.data.materials.get('white')\n black_mat = bpy.data.materials.get('black')\n \n whites_re = re.compile(r'[a-z]+\\.[a-h][12].*')\n blacks_re = re.compile(r'[a-z]+\\.[a-h][78].*')\n for obj in bpy.data.objects:\n if whites_re.match(obj.name):\n obj.data.materials.append(white_mat)\n elif blacks_re.match(obj.name):\n obj.data.materials.append(black_mat)\n\n # compute some stats\n end_time = time.time()\n duration = end_time - start_time\n print('Duration: ' + str(duration))\n # end def play\n\n\n\ndef main():\n if 'CHESS_FRACTURE_FRAMES_PER_MOVE' in os.environ:\n frames_per_move = int(os.environ['CHESS_FRACTURE_FRAMES_PER_MOVE'])\n else:\n frames_per_move = 20\n print(\"CHESS_FRACTURE_FRAMES_PER_MOVE=\" + str(frames_per_move))\n\n if 'CHESS_FRACTURE_FRAGMENTS' in os.environ:\n n_fragments = int(os.environ['CHESS_FRACTURE_FRAGMENTS'])\n else:\n n_fragments = 10\n print(\"CHESS_FRACTURE_FRAGMENTS=\" + str(n_fragments))\n\n if 'CHESS_FRACTURE_PGN_PATH' in os.environ:\n print('CHESS_FRACTURE_PGN_PATH=' + str(os.environ['CHESS_FRACTURE_PGN_PATH']))\n game = load_pgn(os.environ['CHESS_FRACTURE_PGN_PATH'])\n else:\n game = load_pgn('/work/input.pgn')\n\n\n variant = game.board().uci_variant\n if variant != 'chess':\n sys.stdout.write('Unsupported game type {}\\n'.format(variant))\n sys.exit(1)\n\n board_map = initial_setup()\n print('Board setup done')\n\n try:\n play(board_map, game, frames_per_move, n_fragments)\n print('Simulation done')\n except Exception as e:\n print('Simulation failed')\n traceback.print_exc()\n sys.exit(1)\n\n try:\n if 'CHESS_FRACTURE_OUT_BLEND' in os.environ:\n save_file = os.environ['CHESS_FRACTURE_OUT_BLEND']\n \n bpy.ops.wm.save_as_mainfile(filepath=save_file)\n \n print('File saved as \"{}\"'.format(save_file))\n \n sys.exit(0) # happy path\n except Exception as e:\n print('Save failed ' + str(e))\n traceback.print_exc()\n sys.exit(1)\n # end def main\n\n\nif __name__ == '__main__':\n try:\n main()\n except Exception as e:\n print('main failed :' + str(e))\n traceback.print_exc()\n sys.exit(1)\n","sub_path":"blender/chess_fracture.py","file_name":"chess_fracture.py","file_ext":"py","file_size_in_byte":16585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"87891024","text":"# IMAGE MANIPULATION LIBRARYS\nfrom skimage import io\nimport numpy as np\n\n# OPENCL LIBRARYS\nimport pyopencl as cl\n\n# VGL LIBRARYS\nfrom vglImage import *\nfrom vglStrEl import *\nfrom structSizes import *\nfrom vglOclContext import *\nimport vglConst as vc\n\nclass vgl:\n\t# THE vgl CONSTRUCTOR CREATES A NEW CONTEXT\n\t# AND INITIATES THE QUEUE, ADDING QUE CONTEXT TO IT.\n\tdef __init__(self, filepath):\n\t\tprint(\"Starting OpenCL\")\n\t\tself.ocl_ctx = VglOclContext()\n\t\tself.ocl_ctx.load_headers(filepath)\n\t\tself.ctx = self.ocl_ctx.get_context()\n\t\tself.queue = self.ocl_ctx.get_queue()\n\t\tself.builded = False\n\n\n\t# THIS FUNCTION WILL LOAD THE KERNEL FILE\n\t# AND BUILD IT IF NECESSARY.\n\tdef loadCL(self, filepath):\n\t\tprint(\"Loading OpenCL Kernel\")\n\t\tself.kernel_file = open(filepath, \"r\")\n\n\t\tif ((self.builded == False)):\n\t\t\tprint(\"::Building Kernel\")\n\t\t\tself.pgr = cl.Program(self.ctx, self.kernel_file.read())\n\t\t\tself.pgr.build(options=self.ocl_ctx.get_build_options())\n\t\t\tself.builded = True\n\t\telse:\n\t\t\tprint(\"::Kernel already builded. Going to next step...\")\n\n\t\tself.kernel_file.close()\n\t\t#print(\"Kernel\", self.pgr.get_info(cl.program_info.KERNEL_NAMES), \"compiled.\")\n\n\tdef loadImage(self, imgpath):\n\t\tprint(\"Opening image to be processed\")\n\t\t\n\t\tself.vglimage = VglImage(imgpath)\n\t\t\n\t\tmf = cl.mem_flags\n\t\tself.vglimage.vglNdImageUpload(self.ctx, self.queue)\n\t\tself.img_out_cl = cl.Buffer(self.ctx, mf.WRITE_ONLY, self.vglimage.get_host_image().nbytes)\n\n\t\tself.makeStructures()\n\n\tdef makeStructures(self):\n\t\tprint(\"Making Structures\")\n\t\tmf = cl.mem_flags\n\n\t\tss = StructSizes()\n\t\tss = ss.get_struct_sizes()\n\n\t\t# MAKING STRUCTURING ELEMENT\n\t\tself.strEl = VglStrEl()\n\t\tself.strEl.constructorFromTypeNdim(vc.VGL_STREL_CROSS(), 2)\n\t\t\n\t\timage_cl_strel = self.strEl.asVglClStrEl()\n\t\timage_cl_shape = self.vglimage.getVglShape().asVglClShape()\n\n\t\tvgl_strel_obj = np.zeros(ss[0], np.uint8)\n\t\tvgl_shape_obj = np.zeros(ss[6], np.uint8)\n\n\t\t# COPYING DATA AS BYTES TO HOST BUFFER\n\t\tself.copy_into_byte_array(image_cl_strel.data, vgl_strel_obj, ss[1])\n\t\tself.copy_into_byte_array(image_cl_strel.shape, vgl_strel_obj, ss[2])\n\t\tself.copy_into_byte_array(image_cl_strel.offset,vgl_strel_obj, ss[3])\n\t\tself.copy_into_byte_array(image_cl_strel.ndim, vgl_strel_obj, ss[4])\n\t\tself.copy_into_byte_array(image_cl_strel.size, vgl_strel_obj, ss[5])\n\n\t\tself.copy_into_byte_array(image_cl_shape.ndim, vgl_shape_obj, ss[7])\n\t\tself.copy_into_byte_array(image_cl_shape.shape, vgl_shape_obj, ss[8])\n\t\tself.copy_into_byte_array(image_cl_shape.offset,vgl_shape_obj, ss[9])\n\t\tself.copy_into_byte_array(image_cl_shape.size, vgl_shape_obj, ss[10])\n\n\t\t# CREATING DEVICE BUFFER TO HOLD STRUCT DATA\n\t\tself.vglstrel_buffer = cl.Buffer(self.ctx, mf.READ_ONLY, vgl_strel_obj.nbytes)\n\t\tself.vglshape_buffer = cl.Buffer(self.ctx, mf.READ_ONLY, vgl_shape_obj.nbytes)\n\t\t\t\t\n\t\t# COPYING DATA FROM HOST TO DEVICE\n\t\tcl.enqueue_copy(self.queue, self.vglstrel_buffer, vgl_strel_obj.tobytes(), is_blocking=True)\n\t\tcl.enqueue_copy(self.queue, self.vglshape_buffer, vgl_shape_obj.tobytes(), is_blocking=True)\n\n\tdef copy_into_byte_array(self, value, byte_array, offset):\n\t\tfor iterator, byte in enumerate( value.tobytes() ):\n\t\t\tbyte_array[iterator+offset] = byte\n\t\t\n\tdef execute(self, outputpath):\n\t\t# EXECUTING KERNEL WITH THE IMAGES\n\t\tprint(\"Executing kernel\")\n\t\t\n\t\tself.pgr.vglClNdConvolution(self.queue,\n\t\t\t\t\t\t\t\t\tself.vglimage.get_host_image().shape,\n\t\t\t\t\t\t\t\t\tNone, \n\t\t\t\t\t\t\t\t\tself.vglimage.get_device_image(), \n\t\t\t\t\t\t\t\t\tself.img_out_cl,\n\t\t\t\t\t\t\t\t\tself.vglshape_buffer,\n\t\t\t\t\t\t\t\t\tself.vglstrel_buffer).wait()\n\t\t\n\t\tself.vglimage.set_device_image(self.img_out_cl)\n\t\tself.vglimage.vglNdImageDownload(self.ctx, self.queue)\n\t\tself.vglimage.img_save(outputpath)\n\n#CLPath = \"../../CL_ND/testprobe.cl\"\nCLPath = \"../../CL_ND/vglClNdConvolution.cl\"\ninPath = sys.argv[1]\nouPath = sys.argv[2] \n\nprocess = vgl(CLPath)\nprocess.loadCL(CLPath)\nprocess.loadImage(inPath)\nprocess.execute(ouPath)\n","sub_path":"base_development_files/CL_ND/vglClNdConvolution.py","file_name":"vglClNdConvolution.py","file_ext":"py","file_size_in_byte":3913,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"545495693","text":"import argparse, os, pysam\nimport numpy as np\nimport pandas as pd\nfrom subprocess import call\nfrom multiprocessing import Process, Queue\nfrom time import sleep\n\n\nclass tabulate_alignment_fragment:\n \"\"\"\n tabulate_alignment_fragment.py\n \n reference_list_file: File with two columns used to generate bowtie2 index.\n Stored with the bowtie2 references and called genome_name_list.csv\n unique_alignment_file: bam file with all unique alignments\n multiple_alignment_file: bam file with all reads that mapped multiple times\n tabulated_alignment_file: output file including all the genomes and one column with fragments\n sample_name: a string that represents the name of the sample\n \n Description: This function is used in accu_align_v1.sh. Only the first read alignments are used.\n For unique alignments, the number of bps in inferred fragment length is used.\n For multiple alignments, the number of bps are proportionally assigned.\n \n Author: Brian Yu\n \n Revision History:\n 2018.09.10 Version 1 had the wrong algorithm. This version uses the unique reads.\n 2018.12.08 Looking at both paired alignments, ignore non-concordant alignments,\n remove nonperfect CIGAR, remove orphan reads that are unique. \n Currently does not remove multi-mapped reads that are orphans.\n \"\"\"\n\n def __init__(self, reference_list_file, sample_name):\n \"\"\"\n reference_list_file: arguments.reference_list_file\n sample_name: arguments.sample_name\n \"\"\"\n self.sample_name = sample_name\n # Read in genome names, use the corrected name as the row indices\n reference_list = pd.read_csv(reference_list_file, sep=',', header=0, index_col=1)\n reference_list[self.sample_name] = 0 # Added a column with 0's with sample_name as header\n self.coverage = reference_list.drop('file_name', axis=1) # file_name is column header, axis=1 means column\n\n def process_unique_alignments(self, bamfile_name):\n \"\"\"\n bamfile_name: arguments.unique_alignment_file\n \"\"\"\n # Process unique alignments\n print(\"Processing Uniquely Aligned Reads\")\n bamfile = pysam.AlignmentFile(bamfile_name, mode='rb')\n # alignment here is a pysam AlignedSegment data structure\n counter = 0\n for alignment in bamfile.fetch(until_eof=True):\n if alignment.is_paired and alignment.is_read1 and alignment.is_proper_pair and ((alignment.is_reverse and not alignment.mate_is_reverse) or (not alignment.is_reverse and alignment.mate_is_reverse)):\n ref = bamfile.get_reference_name(alignment.reference_id)\n self.coverage.ix[ref.split('_')[0], self.sample_name] += float(abs(alignment.template_length))\n counter += 1\n if counter % 100000 == 0:\n print('.', end='')\n counter = 0\n bamfile.close()\n print('.')\n # For debugging purposes\n # self.coverage.to_csv(arguments.tabulated_alignment_file, index=True, header=True)\n\n\n def process_multiple_alignments(self, bamfile_name):\n \"\"\"\n bamfile_name: arguments.multiple_alignment_file\n CIGAR string is handled previously when spliting the bam. It is not handled here.\n \"\"\"\n\n # Process multiple alignments\n print(\"Processing Reads With Multiple Alignments\")\n # Create another df with same index\n tempCov = pd.DataFrame(index=self.coverage.index)\n tempCov[self.sample_name] = 0\n reference_mapped_to = [] # temp storage of all the strain genomes mapped to\n inferred_template_size = [] # temp storage of all the inferred template size\n current_read_name = ''\n # Read in bamfile\n bamfile = pysam.AlignmentFile(bamfile_name, mode='rb')\n counter = 0\n\n for alignment in bamfile.fetch(until_eof=True):\n # if alignment.is_paired and alignment.is_read1 and alignment.is_proper_pair:\n # this part is still using all reads even if half of it falls into an off limit region\n if alignment.is_paired and alignment.is_proper_pair and ((alignment.is_reverse and not alignment.mate_is_reverse) or (not alignment.is_reverse and alignment.mate_is_reverse)):\n # If it's a new read\n if alignment.query_name != current_read_name:\n # Split the read and add to the original coverage df\n if reference_mapped_to:\n reference_mapped_to = list(set(reference_mapped_to)) # unique elements\n tempSeries = self.coverage.loc[reference_mapped_to, self.sample_name]\n # If, based on the unique reads, there are more than 0 total bps across ref mapped to\n if tempSeries.sum() > 0:\n tempCov.loc[reference_mapped_to, self.sample_name] += tempSeries / tempSeries.sum() * np.median(inferred_template_size)\n # else: # Do not process reads like this\n # print(alignment.query_name+' split evenly between '+str(tempSeries.size)+' genomes.')\n # tempCov.loc[reference_mapped_to, arguments.sample_name] += np.median(inferred_template_size) / tempSeries.size\n # Update the variables to handle the next read\n reference_mapped_to = []\n inferred_template_size = []\n current_read_name = alignment.query_name\n ref = bamfile.get_reference_name(alignment.reference_id)\n reference_mapped_to.append(ref.split('_')[0])\n inferred_template_size.append(float(abs(alignment.template_length)))\n counter += 1\n if counter % 100000 == 0:\n print('.', end='')\n counter = 0\n bamfile.close()\n # For debugging\n # print(coverage)\n # print(tempCov)\n self.coverage.loc[self.coverage.index, self.sample_name] += tempCov.loc[tempCov.index, self.sample_name]\n print('.')\n\n\n def output_coverage(self, output_file_name):\n \"\"\"\n output_file_name: arguments.tabulated_alignment_file\n \"\"\"\n # Output final tabulated file\n self.coverage.to_csv(output_file_name, index=True, header=True)\n\n\n\nclass tabulate_basepair_coverage:\n \"\"\"\n Class Description: Create coverage files across the genome of all reference sequences. Use 3 bam\n files - unique; multiple-unique; and multiple-multiple\n Revision History: 2018.10.11 Brian Yu Created. Need to think about how to use multiprocessing\n 2018.10.13 Use df.groupby in compute coverage\n 2018.10.14 Completed first path and completed debugging\n 2018.11.10 Resolved all bugs. First working version of tabulation pipeline\n 2018.12.09 Removed the function to split bam files because it's included \n in a separate function. \n Integrated into the similarity aware alignment method\n \"\"\"\n\n def __init__(self, reference_fasta, reference_names):\n \"\"\"\n reference_fasta: full path to the fasta file\n reference_names: file with reference names interested or []\n \"\"\"\n # Process reference contigs\n self.contig_list = {} # a dictionary with contig name and length\n temp_contig_name = None\n temp_contig_size = None\n with open(reference_fasta,'r') as f:\n for l in f:\n if '>' in l:\n if temp_contig_name and temp_contig_size:\n self.contig_list[temp_contig_name] = temp_contig_size\n temp_contig_name = l.rstrip()[1:]\n temp_contig_size = 0\n else:\n temp_contig_size += len(l.rstrip())\n # The last contig still needs to be processed\n self.contig_list[temp_contig_name] = temp_contig_size\n\n # Process reference contigs\n if not reference_names:\n self.ref = list(set([x.split('_')[0] for x in list(self.contig_list.keys())]))\n else:\n with open(reference_names, 'r') as f:\n self.ref = []\n for l in f:\n self.ref.append(l.rstrip())\n # Debugging\n print('Number of references is: ', len(self.ref))\n # print(self.contig_list)\n\n\n def create_depth_file(self, bamfile_name, core_num):\n \"\"\"\n bamfile_name: name of the bamfile to create depth file with\n core_num: core_num is only used in sorting and is optional. Default is 2\n Description: This function calls samtools.\n You can access the newly created bamfiles through self.bam_root\n The output from pysam.depth is returned as a string, no longer saved as a file\n \"\"\"\n if not core_num:\n core_num = 2\n else:\n core_num = int(core_num)\n sorted_bamfile = bamfile_name.split('.bam')[0] + '.sortedByCoord.bam';\n pysam.sort(\"--threads\",str(core_num),\"-m\",\"2G\",\"-o\",sorted_bamfile,bamfile_name)\n # The depth file is returned as one single string\n return pysam.depth(sorted_bamfile)\n\n \n def extract_lines_from_depth_file(self, depth_file_name, sample_number, window_size, output_file_name):\n \"\"\"\n depth_file_name: output of samtools depth, no header, each column other than the first 2 are samples\n The depth_file should be sorted by contig and also by position in the contig.\n sample_number: which column to use, starting at 0, could not be an array\n output_file: filename and full path of the output csv file\n Description: If depth file is empty, nothing gets produced.\n \"\"\"\n # Check that sample_number is not an array\n if type(sample_number) != type(int()):\n raise ValueError('Variable sample_number is not an int.')\n # If output_file exist, erase it\n if output_file_name and os.path.isfile(output_file_name):\n os.remove(output_file_name)\n # Open depth file. I'm going line by line here to minimize memory usage\n # aka. I don't want to use pd.read_table to read in the entire file\n # And thus, the choice of pulling out only one column to tabulate is also a design choice\n with open(depth_file_name,'r') as depth_file:\n coverage_block = {'contig_position' : [], 'contig_depth' : []} # define empty dictionary with two fields\n previous_contig_name = None\n contig_name = None\n for l in depth_file:\n contig_name = l.split()[0]\n if contig_name.split('_')[0] in self.ref:\n # If previous_contig_name exists and it is not the same as the new contig\n if previous_contig_name and contig_name != previous_contig_name:\n # Process the last contig coverage\n self.output_one_contig_coverage(self.compute_coverage_for_one_contig(previous_contig_name, coverage_block, window_size), output_file_name)\n # Update the variables for the next contig coverage\n coverage_block = {'contig_position' : [], 'contig_depth' : []}\n # Update previous_contig_name\n previous_contig_name = contig_name\n # split fields in the line (delimited by tab)\n tmp_line = l.split()\n # Append position and depth. field 1 = contig name, field 2 = contig position, field 3 = depth\n coverage_block['contig_position'].append(int(tmp_line[1]))\n coverage_block['contig_depth'].append(float(tmp_line[sample_number + 2])) # + 2 because first 2 columns are name and position \n # since the last contig would not be written, write it here\n # But the file might be empty, so you need to check if coverage_block is empty\n if bool(previous_contig_name):\n self.output_one_contig_coverage(self.compute_coverage_for_one_contig(previous_contig_name, coverage_block, window_size), output_file_name)\n\n\n def compute_coverage_for_one_contig(self, contig_name, coverage_block, window_size):\n \"\"\"\n contig_name: a string representing contig_name\n coverage_block: dictionary with two fields, contig_position and contig_depth, both are lists\n window_size: size of the interval in base pairs to sum or average over in terms of coverage\n \"\"\" \n # self.contig_list is a dictionary where the keys are contig names and the values are lengths\n contig_len = self.contig_list[contig_name]\n # lower_bound = range(1, contig_len, window_size)\n # upper_bound = lower_bound[1:] + [contig_len]\n # Convert the dictionary coverage_block to a dataframe with 1 column called depth. Keys are indices\n coverage_df = pd.DataFrame.from_dict(coverage_block, orient='columns', dtype=int)\n # Use dataframe functions to group by genome position window_size, average coverage, \n grouped_coverage = coverage_df.groupby(coverage_df['contig_position'].floordiv(window_size)).mean()\n # Extract coverage_depth and fill in none covered locations with 0; First two are contig_name and length\n coverage_vector = [contig_name, str(contig_len)]\n for i in range(int(np.ceil(contig_len/window_size))):\n if i in grouped_coverage.index:\n coverage_vector.append(str(grouped_coverage.ix[i, 'contig_depth']))\n else:\n coverage_vector.append('0')\n return coverage_vector\n\n\n def output_depth_file(self, depth_file_name, alignment_depth):\n \"\"\"\n Write the variable alignment_depth to depth_file_name\n \"\"\"\n with open(depth_file_name, 'w') as f:\n t = f.write(alignment_depth)\n\n\n def output_one_contig_coverage(self, line, output_file):\n \"\"\"\n line: line to write as a list, needs to be a list of strings, will be joined by commas\n output_file: full path to output file, should be a csv file\n \"\"\"\n if not output_file:\n print(','.join(line))\n else:\n with open(output_file,'a') as d:\n t = d.write(','.join(line) + '\\n')\n\ndef DNA_count(ref, sample_name, uu, mm, mu, output):\n f = tabulate_alignment_fragment(ref, sample_name)\n f.process_unique_alignments(uu)\n f.process_multiple_alignments(mm)\n f.process_multiple_alignments(mu)\n f.output_coverage(output)\n\ndef DNA_coverage(ref, window_size, uu, mu, mm):\n k = tabulate_basepair_coverage(ref, []) # use default [] argument for ref_list\n print(uu + '\\n' + mu + '\\n' + mm)\n # unique alignment\n bam_root = '.'.join(uu.split('.')[0:-2])\n k.output_depth_file(bam_root+'.depth_unique.txt', k.create_depth_file(uu, []))\n k.extract_lines_from_depth_file(bam_root+'.depth_unique.txt', 0, window_size, bam_root+'.coverage_unique.csv')\n # multiple to unique\n bam_root = '.'.join(mu.split('.')[0:-2])\n k.output_depth_file(bam_root+'.depth_multiple_unique.txt', k.create_depth_file(mu, []))\n k.extract_lines_from_depth_file(bam_root+'.depth_multiple_unique.txt', 0, window_size, bam_root+'.coverage_unique_multiple.csv')\n # multiple to multiple\n bam_root = '.'.join(mm.split('.')[0:-2])\n k.output_depth_file(bam_root+'.depth_multiple_multiple.txt', k.create_depth_file(mm, []))\n k.extract_lines_from_depth_file(bam_root+'.depth_multiple_multiple.txt', 0, window_size, bam_root+'.coverage_multiple_multiple.csv')\n\n\n\n# When running the script from command line, the following lines are executed\nif __name__ == \"__main__\":\n usage = \"USAGE: python tabulate_alignment_fragment.py [options] reference_fasta reference_list unique multiple_to_multiple multiple_to_unique output_file\"\n\n # Making default argument list structures\n p = argparse.ArgumentParser(usage=usage)\n p.add_argument('-s', dest='sample_name', action='store', type=str, default='test_sample')\n p.add_argument('-w', dest='window_size', action='store', type=int, default=1000)\n p.add_argument(dest='ref_fasta', action='store', type=str)\n p.add_argument(dest='reference_list_file', action='store', type=str)\n p.add_argument(dest='unique_alignment_file', action='store', type=str)\n p.add_argument(dest='multiple_align_to_multiple', action='store', type=str)\n p.add_argument(dest='multiple_align_to_unique', action='store', type=str)\n p.add_argument(dest='tabulated_alignment_file', action='store', type=str)\n\n A = p.parse_args()\n\n try:\n p1 = Process(target=DNA_count, args=(A.reference_list_file, A.sample_name, A.unique_alignment_file, A.multiple_align_to_multiple, A.multiple_align_to_unique, A.tabulated_alignment_file))\n p1.start()\n p1.join()\n #p2 = Process(target=DNA_coverage, args=(A.ref_fasta, A.window_size, A.unique_alignment_file, A.multiple_align_to_unique, A.multiple_align_to_multiple))\n #p2.start()\n #p2.join()\n print('Script completed')\n\n except ValueError as e:\n print(\"ERROR: ValueError:\",e)\n print(usage)\n except TypeError as e:\n print(\"ERROR: TypeError:\",e)\n print(usage)\n except IOError as e:\n print(\"ERROR: IOError %s\",e)\n print(usage)\n\n\n","sub_path":"misc_scripts/tabulate_alignment_fragment.py","file_name":"tabulate_alignment_fragment.py","file_ext":"py","file_size_in_byte":17641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"611698792","text":"#!/usr/bin/env python\nimport sys, uuid, os\nimport OmniDB_app.include.Spartacus as Spartacus\nimport OmniDB_app.include.Spartacus.Database as Database\nimport OmniDB_app.include.Spartacus.Utils as Utils\nimport OmniDB_app.include.OmniDatabase as OmniDatabase\n\nimport OmniDB.custom_settings\n\nimport optparse\n\nparser = optparse.OptionParser(version=OmniDB.custom_settings.OMNIDB_VERSION)\n\ngroup = optparse.OptionGroup(parser, \"General Options\",\n \"Options to manage and perform maintenance on the OmniDB user database.\")\ngroup.add_option(\"-d\", \"--homedir\", dest=\"homedir\",\n default='', type=str,\n help=\"home directory containing local databases config and log files\")\ngroup.add_option(\"-a\", \"--vacuum\", dest=\"vacuum\",\n default=False, action=\"store_true\",\n help=\"databases maintenance\")\ngroup.add_option(\"-r\", \"--resetdatabase\", dest=\"reset\",\n default=False,action=\"store_true\",\n help=\"reset user and session databases\")\ngroup.add_option(\"-t\", \"--deletetemp\", dest=\"deletetemp\",\n default=False,action=\"store_true\",\n help=\"delete temporary files\")\nparser.add_option_group(group)\n\ngroup = optparse.OptionGroup(parser, \"User Management Options\",\n \"Options to list, create and drop users and superusers.\")\ngroup.add_option(\"-l\", \"--listusers\", dest=\"listusers\",\n default=False, action=\"store_true\",\n help=\"list users\")\ngroup.add_option(\"-u\", \"--createuser\", dest=\"createuser\",\n nargs=2,metavar=\"username password\",\n help=\"create user: -u username password\")\ngroup.add_option(\"-s\", \"--createsuperuser\", dest=\"createsuperuser\",\n nargs=2,metavar=\"username password\",\n help=\"create super user: -s username password\")\ngroup.add_option(\"-x\", \"--dropuser\", dest=\"dropuser\",\n nargs=1,metavar=\"username\",\n help=\"drop user: -x username\")\nparser.add_option_group(group)\n\ngroup = optparse.OptionGroup(parser, \"Connection Management Options\",\n \"Options to list, create and drop connections.\")\ngroup.add_option(\"-m\", \"--listconnections\", dest=\"listconnections\",\n nargs=1,metavar=\"username\",\n help=\"list connections: -m username\")\ngroup.add_option(\"-c\", \"--createconnection\", dest=\"createconnection\",\n nargs=6,metavar=\"username technology host port database dbuser\",\n help=\"create connection: -c username technology host port database dbuser\")\ngroup.add_option(\"-z\", \"--dropconnection\", dest=\"dropconnection\",\n nargs=1,metavar=\"connid\",\n help=\"drop connection: -z connid\")\nparser.add_option_group(group)\n\n(options, args) = parser.parse_args()\n\nif options.homedir!='':\n if not os.path.exists(options.homedir):\n print(\"Home directory does not exist. Please specify a directory that exists.\")\n sys.exit()\n else:\n OmniDB.custom_settings.HOME_DIR = options.homedir\n\n#importing settings after setting HOME_DIR\nimport OmniDB.settings\nfrom OmniDB.startup import clean_temp_folder\n\ndatabase = OmniDatabase.Generic.InstantiateDatabase(\n 'sqlite','','',OmniDB.settings.OMNIDB_DATABASE,'','','0',''\n)\ndatabase_sessions = OmniDatabase.Generic.InstantiateDatabase(\n 'sqlite','','',OmniDB.settings.SESSION_DATABASE,'','','0',''\n)\n\ndef clean_users():\n try:\n print('Cleaning users...')\n database.v_connection.Execute('''\n delete\n from users\n ''')\n print ('Done.')\n except Exception as exc:\n print('Error:')\n print(exc)\n\ndef clean_chat():\n try:\n print('Cleaning chat...')\n database.v_connection.Execute('''\n delete\n from messages_groups\n ''')\n database.v_connection.Execute('''\n delete\n from messages_channels\n ''')\n database.v_connection.Execute('''\n delete\n from messages\n ''')\n database.v_connection.Execute('''\n delete\n from users_groups\n ''')\n database.v_connection.Execute('''\n delete\n from users_channels\n ''')\n database.v_connection.Execute('''\n delete\n from groups\n ''')\n database.v_connection.Execute('''\n delete\n from channels\n ''')\n database.v_connection.Execute('''\n insert into channels (\n cha_in_code,\n cha_st_name,\n cha_bo_private\n )\n values (\n 1,\n 'General',\n 0\n )\n ''')\n print ('Done.')\n except Exception as exc:\n print('Error:')\n print(exc)\n\ndef clean_sessions():\n try:\n print('Cleaning sessions...')\n database_sessions.v_connection.Execute('''\n delete\n from django_session\n ''')\n print ('Done.')\n except Exception as exc:\n print('Error:')\n print(exc)\n\ndef vacuum():\n try:\n print('Vacuuming OmniDB database...')\n database.v_connection.Execute('vacuum')\n print ('Done.')\n print('Vacuuming Sessions database...')\n database_sessions.v_connection.Execute('vacuum')\n print ('Done.')\n except Exception as exc:\n print('Error:')\n print(exc)\n\ndef clean_temp():\n try:\n print('Cleaning temp folder...')\n clean_temp_folder(True)\n print ('Done.')\n except Exception as exc:\n print('Error:')\n print(exc)\n\ndef list_users():\n try:\n v_table = database.v_connection.Query('''\n select user_id as userid,\n user_name as username,\n super_user as superuser\n from users\n order by user_id\n ''')\n print(v_table.Pretty())\n except Exception as exc:\n print('Error:')\n print(exc)\n\ndef create_user(p_user, p_pwd):\n try:\n print('Creating user...')\n v_cryptor = Utils.Cryptor('omnidb', 'iso-8859-1')\n database.v_connection.Execute('''\n insert into users values (\n (select coalesce(max(user_id), 0) + 1 from users),'{0}','{1}',1,'14',1,0,'utf-8',';','11')\n '''.format(p_user,v_cryptor.Hash(v_cryptor.Encrypt(p_pwd))))\n print('User created.')\n #database.v_connection.Execute('''\n # insert into users_channels (\n # use_in_code,\n # cha_in_code,\n # usc_bo_silenced\n # ) values (\n # 1,\n # 1,\n # 0\n # )\n # '''\n #)\n except Exception as exc:\n print('Error:')\n print(exc)\n\ndef create_superuser(p_user, p_pwd):\n try:\n print('Creating superuser...')\n v_cryptor = Utils.Cryptor('omnidb', 'iso-8859-1')\n database.v_connection.Execute('''\n insert into users values (\n (select coalesce(max(user_id), 0) + 1 from users),'{0}','{1}',1,'14',1,1,'utf-8',';','11')\n '''.format(p_user,v_cryptor.Hash(v_cryptor.Encrypt(p_pwd))))\n print('Superuser created.')\n #database.v_connection.Execute('''\n # insert into users_channels (\n # use_in_code,\n # cha_in_code,\n # usc_bo_silenced\n # ) values (\n # 1,\n # 1,\n # 0\n # )\n # '''\n #)\n except Exception as exc:\n print('Error:')\n print(exc)\n\ndef drop_user(p_user):\n try:\n v_table = database.v_connection.Query('''\n select *\n from users\n where user_name = '{0}'\n '''.format(p_user))\n if len(v_table.Rows) > 0:\n print('Dropping user {0}...'.format(p_user))\n database.v_connection.Execute('''\n delete\n from users\n where user_name = '{0}'\n '''.format(p_user))\n print('User {0} dropped.'.format(p_user))\n else:\n print('User {0} does not exist.'.format(p_user))\n except Exception as exc:\n print('Error:')\n print(exc)\n\ndef list_connections(p_user):\n try:\n v_table = database.v_connection.Query('''\n select c.conn_id as connid,\n c.dbt_st_name as technology,\n c.server as host,\n c.port as port,\n c.service as database,\n c.user as dbuser\n from users u\n inner join connections c\n on c.user_id = u.user_id\n where u.user_name = '{0}'\n order by conn_id\n '''.format(p_user))\n v_cryptor = Utils.Cryptor('omnidb', 'iso-8859-1')\n for v_row in v_table.Rows:\n v_row['host'] = v_cryptor.Decrypt(v_row['host'])\n v_row['port'] = v_cryptor.Decrypt(v_row['port'])\n v_row['database'] = v_cryptor.Decrypt(v_row['database'])\n v_row['dbuser'] = v_cryptor.Decrypt(v_row['dbuser'])\n print(v_table.Pretty())\n except Exception as exc:\n print('Error:')\n print(exc)\n\ndef create_connection(p_username, p_technology, p_host, p_port, p_database, p_dbuser):\n try:\n v_users = database.v_connection.Query('''\n select *\n from users\n where user_name = '{0}'\n '''.format(p_username))\n if len(v_users.Rows) > 0:\n v_technologies = database.v_connection.Query('''\n select *\n from db_type\n where dbt_in_enabled = 1\n and dbt_st_name = '{0}'\n '''.format(p_technology))\n if len(v_technologies.Rows) > 0:\n print('Creating connection...')\n v_cryptor = Utils.Cryptor('omnidb', 'iso-8859-1')\n database.v_connection.Execute('''\n insert into connections values (\n (select coalesce(max(conn_id), 0) + 1 from connections),\n {0}, '{1}', '{2}', '{3}', '{4}', '{5}',\n '', '', '', '', '', '', 0\n )\n '''.format(\n v_users.Rows[0]['user_id'],\n p_technology,\n v_cryptor.Encrypt(p_host),\n v_cryptor.Encrypt(p_port),\n v_cryptor.Encrypt(p_database),\n v_cryptor.Encrypt(p_dbuser),\n ))\n print('Connection created.')\n else:\n print('Technology {0} does not exist.'.format(p_technology))\n else:\n print('User {0} does not exist.'.format(p_user))\n except Exception as exc:\n print('Error:')\n print(exc)\n\ndef drop_connection(p_connid):\n try:\n v_table = database.v_connection.Query('''\n select *\n from connections\n where conn_id = {0}\n '''.format(p_connid))\n if len(v_table.Rows) > 0:\n print('Dropping connection...'.format(p_connid))\n database.v_connection.Execute('''\n delete\n from connections\n where conn_id = {0}\n '''.format(p_connid))\n print('Connection dropped.')\n else:\n print('Connection {0} does not exist.'.format(p_connid))\n except Exception as exc:\n print('Error:')\n print(exc)\n\nif __name__ == \"__main__\":\n\n if len(sys.argv[1:])==0:\n parser.print_help()\n sys.exit(0)\n\n if options.reset:\n print('*** ATENTION *** ALL USERS DATA WILL BE LOST')\n try:\n value = input('Would you like to continue? (y/n) ')\n if value.lower()=='y':\n #clean_chat()\n clean_users()\n clean_sessions()\n vacuum()\n clean_temp()\n create_superuser('admin', 'admin')\n except Exception as exc:\n print('Error:')\n print(exc)\n\n if options.vacuum:\n vacuum()\n\n if options.deletetemp:\n clean_temp()\n\n if options.listusers:\n list_users()\n\n if options.createuser:\n create_user(options.createuser[0], options.createuser[1])\n\n if options.createsuperuser:\n create_superuser(options.createsuperuser[0], options.createsuperuser[1])\n\n if options.dropuser:\n drop_user(options.dropuser)\n\n if options.listconnections:\n list_connections(options.listconnections)\n\n if options.createconnection:\n create_connection(options.createconnection[0], options.createconnection[1], options.createconnection[2], options.createconnection[3], options.createconnection[4], options.createconnection[5])\n\n if options.dropconnection:\n drop_connection(options.dropconnection)\n","sub_path":"OmniDB/omnidb-config.py","file_name":"omnidb-config.py","file_ext":"py","file_size_in_byte":12995,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"248751355","text":"#Use this program along with the \"train_save_tf_model.py\".\n#This function use the result by the above train-save script.\nimport tensorflow as tf\nimport os\n#Place to save the result\nsave_path = os.path.join(os.getcwd(),'trained_model\\\\mynet.ckpt')\ndef predict(model_path, x_data):\n tfx = tf.placeholder(dtype=tf.float32, shape=[1,1])\n l1 = tf.layers.dense(inputs=tfx, units=1024, activation=tf.nn.relu, name='layer_1')\n l2 = tf.layers.dense(inputs=l1, units=1024, activation=tf.nn.relu, name='layer_2')\n output = tf.layers.dense(inputs=l2, units=1, name='output_layer')\n session = tf.Session()\n restorer = tf.train.Saver()\n restorer.restore(session,save_path=model_path)\n pred = session.run(fetches=[output],feed_dict={tfx:x_data})\n print(pred)\n return pred\n\npredict(save_path,[[0.5]])\n\n#predict(save_path,[2.])\n","sub_path":"use_pretrained_tf_model.py","file_name":"use_pretrained_tf_model.py","file_ext":"py","file_size_in_byte":841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"333687595","text":"\"\"\"Creates a disk volume from a disk offering. This disk volume must still be attached to a virtual machine to make use of it.\"\"\"\nfrom baseCmd import *\nfrom baseResponse import *\n\n\nclass createVolumeCmd (baseCmd):\n typeInfo = {}\n\n def __init__(self):\n self.isAsync = \"true\"\n \"\"\"the account associated with the disk volume. Must be used with the domainId parameter.\"\"\"\n self.account = None\n self.typeInfo['account'] = 'string'\n \"\"\"an optional field, in case you want to set a custom id to the resource. Allowed to Root Admins only\"\"\"\n self.customid = None\n self.typeInfo['customid'] = 'string'\n \"\"\"the ID of the disk offering. Either diskOfferingId or snapshotId must be passed in.\"\"\"\n self.diskofferingid = None\n self.typeInfo['diskofferingid'] = 'uuid'\n \"\"\"an optional field, whether to display the volume to the end user or not.\"\"\"\n self.displayvolume = None\n self.typeInfo['displayvolume'] = 'boolean'\n \"\"\"the domain ID associated with the disk offering. If used with the account parameter returns the disk volume associated with the account for the specified domain.\"\"\"\n self.domainid = None\n self.typeInfo['domainid'] = 'uuid'\n \"\"\"max iops\"\"\"\n self.maxiops = None\n self.typeInfo['maxiops'] = 'long'\n \"\"\"min iops\"\"\"\n self.miniops = None\n self.typeInfo['miniops'] = 'long'\n \"\"\"the name of the disk volume\"\"\"\n self.name = None\n self.typeInfo['name'] = 'string'\n \"\"\"the project associated with the volume. Mutually exclusive with account parameter\"\"\"\n self.projectid = None\n self.typeInfo['projectid'] = 'uuid'\n \"\"\"Arbitrary volume size\"\"\"\n self.size = None\n self.typeInfo['size'] = 'long'\n \"\"\"the snapshot ID for the disk volume. Either diskOfferingId or snapshotId must be passed in.\"\"\"\n self.snapshotid = None\n self.typeInfo['snapshotid'] = 'uuid'\n \"\"\"the ID of the virtual machine; to be used with snapshot Id, VM to which the volume gets attached after creation\"\"\"\n self.virtualmachineid = None\n self.typeInfo['virtualmachineid'] = 'uuid'\n \"\"\"the ID of the availability zone\"\"\"\n self.zoneid = None\n self.typeInfo['zoneid'] = 'uuid'\n self.required = []\n\n\nclass createVolumeResponse (baseResponse):\n typeInfo = {}\n\n def __init__(self):\n \"\"\"ID of the disk volume\"\"\"\n self.id = None\n self.typeInfo['id'] = 'string'\n \"\"\"the account associated with the disk volume\"\"\"\n self.account = None\n self.typeInfo['account'] = 'string'\n \"\"\"the date the volume was attached to a VM instance\"\"\"\n self.attached = None\n self.typeInfo['attached'] = 'date'\n \"\"\"the chain info of the volume\"\"\"\n self.chaininfo = None\n self.typeInfo['chaininfo'] = 'string'\n \"\"\"the date the disk volume was created\"\"\"\n self.created = None\n self.typeInfo['created'] = 'date'\n \"\"\"the boolean state of whether the volume is destroyed or not\"\"\"\n self.destroyed = None\n self.typeInfo['destroyed'] = 'boolean'\n \"\"\"the ID of the device on user vm the volume is attahed to. This tag is not returned when the volume is detached.\"\"\"\n self.deviceid = None\n self.typeInfo['deviceid'] = 'long'\n \"\"\"bytes read rate of the disk volume\"\"\"\n self.diskBytesReadRate = None\n self.typeInfo['diskBytesReadRate'] = 'long'\n \"\"\"bytes write rate of the disk volume\"\"\"\n self.diskBytesWriteRate = None\n self.typeInfo['diskBytesWriteRate'] = 'long'\n \"\"\"io requests read rate of the disk volume\"\"\"\n self.diskIopsReadRate = None\n self.typeInfo['diskIopsReadRate'] = 'long'\n \"\"\"io requests write rate of the disk volume\"\"\"\n self.diskIopsWriteRate = None\n self.typeInfo['diskIopsWriteRate'] = 'long'\n \"\"\"the display text of the disk offering\"\"\"\n self.diskofferingdisplaytext = None\n self.typeInfo['diskofferingdisplaytext'] = 'string'\n \"\"\"ID of the disk offering\"\"\"\n self.diskofferingid = None\n self.typeInfo['diskofferingid'] = 'string'\n \"\"\"name of the disk offering\"\"\"\n self.diskofferingname = None\n self.typeInfo['diskofferingname'] = 'string'\n \"\"\"an optional field whether to the display the volume to the end user or not.\"\"\"\n self.displayvolume = None\n self.typeInfo['displayvolume'] = 'boolean'\n \"\"\"the domain associated with the disk volume\"\"\"\n self.domain = None\n self.typeInfo['domain'] = 'string'\n \"\"\"the ID of the domain associated with the disk volume\"\"\"\n self.domainid = None\n self.typeInfo['domainid'] = 'string'\n \"\"\"Hypervisor the volume belongs to\"\"\"\n self.hypervisor = None\n self.typeInfo['hypervisor'] = 'string'\n \"\"\"true if the volume is extractable, false otherwise\"\"\"\n self.isextractable = None\n self.typeInfo['isextractable'] = 'boolean'\n \"\"\"an alternate display text of the ISO attached to the virtual machine\"\"\"\n self.isodisplaytext = None\n self.typeInfo['isodisplaytext'] = 'string'\n \"\"\"the ID of the ISO attached to the virtual machine\"\"\"\n self.isoid = None\n self.typeInfo['isoid'] = 'string'\n \"\"\"the name of the ISO attached to the virtual machine\"\"\"\n self.isoname = None\n self.typeInfo['isoname'] = 'string'\n \"\"\"max iops of the disk volume\"\"\"\n self.maxiops = None\n self.typeInfo['maxiops'] = 'long'\n \"\"\"min iops of the disk volume\"\"\"\n self.miniops = None\n self.typeInfo['miniops'] = 'long'\n \"\"\"name of the disk volume\"\"\"\n self.name = None\n self.typeInfo['name'] = 'string'\n \"\"\"the path of the volume\"\"\"\n self.path = None\n self.typeInfo['path'] = 'string'\n \"\"\"the project name of the vpn\"\"\"\n self.project = None\n self.typeInfo['project'] = 'string'\n \"\"\"the project id of the vpn\"\"\"\n self.projectid = None\n self.typeInfo['projectid'] = 'string'\n \"\"\"provisioning type used to create volumes.\"\"\"\n self.provisioningtype = None\n self.typeInfo['provisioningtype'] = 'string'\n \"\"\"need quiesce vm or not when taking snapshot\"\"\"\n self.quiescevm = None\n self.typeInfo['quiescevm'] = 'boolean'\n \"\"\"the display text of the service offering for root disk\"\"\"\n self.serviceofferingdisplaytext = None\n self.typeInfo['serviceofferingdisplaytext'] = 'string'\n \"\"\"ID of the service offering for root disk\"\"\"\n self.serviceofferingid = None\n self.typeInfo['serviceofferingid'] = 'string'\n \"\"\"name of the service offering for root disk\"\"\"\n self.serviceofferingname = None\n self.typeInfo['serviceofferingname'] = 'string'\n \"\"\"size of the disk volume\"\"\"\n self.size = None\n self.typeInfo['size'] = 'long'\n \"\"\"ID of the snapshot from which this volume was created\"\"\"\n self.snapshotid = None\n self.typeInfo['snapshotid'] = 'string'\n \"\"\"the state of the disk volume\"\"\"\n self.state = None\n self.typeInfo['state'] = 'string'\n \"\"\"the status of the volume\"\"\"\n self.status = None\n self.typeInfo['status'] = 'string'\n \"\"\"name of the primary storage hosting the disk volume\"\"\"\n self.storage = None\n self.typeInfo['storage'] = 'string'\n \"\"\"id of the primary storage hosting the disk volume; returned to admin user only\"\"\"\n self.storageid = None\n self.typeInfo['storageid'] = 'string'\n \"\"\"shared or local storage\"\"\"\n self.storagetype = None\n self.typeInfo['storagetype'] = 'string'\n \"\"\"an alternate display text of the template for the virtual machine\"\"\"\n self.templatedisplaytext = None\n self.typeInfo['templatedisplaytext'] = 'string'\n \"\"\"the ID of the template for the virtual machine. A -1 is returned if the virtual machine was created from an ISO file.\"\"\"\n self.templateid = None\n self.typeInfo['templateid'] = 'string'\n \"\"\"the name of the template for the virtual machine\"\"\"\n self.templatename = None\n self.typeInfo['templatename'] = 'string'\n \"\"\"type of the disk volume (ROOT or DATADISK)\"\"\"\n self.type = None\n self.typeInfo['type'] = 'string'\n \"\"\"id of the virtual machine\"\"\"\n self.virtualmachineid = None\n self.typeInfo['virtualmachineid'] = 'string'\n \"\"\"display name of the virtual machine\"\"\"\n self.vmdisplayname = None\n self.typeInfo['vmdisplayname'] = 'string'\n \"\"\"name of the virtual machine\"\"\"\n self.vmname = None\n self.typeInfo['vmname'] = 'string'\n \"\"\"state of the virtual machine\"\"\"\n self.vmstate = None\n self.typeInfo['vmstate'] = 'string'\n \"\"\"ID of the availability zone\"\"\"\n self.zoneid = None\n self.typeInfo['zoneid'] = 'string'\n \"\"\"name of the availability zone\"\"\"\n self.zonename = None\n self.typeInfo['zonename'] = 'string'\n \"\"\"the list of resource tags associated with volume\"\"\"\n self.tags = []\n \"\"\"the ID of the latest async job acting on this object\"\"\"\n self.jobid = None\n self.typeInfo['jobid'] = ''\n \"\"\"the current status of the latest async job acting on this object\"\"\"\n self.jobstatus = None\n self.typeInfo['jobstatus'] = ''\n\nclass tags:\n def __init__(self):\n \"\"\"\"the account associated with the tag\"\"\"\n self.account = None\n \"\"\"\"customer associated with the tag\"\"\"\n self.customer = None\n \"\"\"\"the domain associated with the tag\"\"\"\n self.domain = None\n \"\"\"\"the ID of the domain associated with the tag\"\"\"\n self.domainid = None\n \"\"\"\"tag key name\"\"\"\n self.key = None\n \"\"\"\"the project name where tag belongs to\"\"\"\n self.project = None\n \"\"\"\"the project id the tag belongs to\"\"\"\n self.projectid = None\n \"\"\"\"id of the resource\"\"\"\n self.resourceid = None\n \"\"\"\"resource type\"\"\"\n self.resourcetype = None\n \"\"\"\"tag value\"\"\"\n self.value = None\n\n","sub_path":"marvin/cloudstackAPI/createVolume.py","file_name":"createVolume.py","file_ext":"py","file_size_in_byte":10332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"61608225","text":"from modules.parts_top_view_AE import Autoencoder\n\nimport torch\nfrom torch import nn\nfrom torch.autograd import Variable\nfrom torch.utils.data import DataLoader\nimport torchvision\nfrom torchvision import transforms\nimport torchvision.datasets as datasets\nimport os\n\nimport numpy as np\nimport utils.parser\n\nparser = parser.build_parser()\n(options, args) = parser.parse_args()\n\nRANDOM_SEED = options.random_seed\nAPP_DIR = options.app_dir\ndata = os.path.join(APP_DIR, \"artifacts/data/parts_data\")\nbatch_size = options.batch_size\nworkers = options.num_workers\nngpu = options.num_gpus\nlearning_rate = options.learning_rate\nnum_epochs = options.num_epochs\ntrain_dir = os.path.join(data, 'train')\nval_dir = os.path.join(data, 'val')\n\nnp.random.seed(RANDOM_SEED)\ntorch.manual_seed(RANDOM_SEED)\n\ntrain_dataset = datasets.ImageFolder(\n train_dir,\n transforms.Compose([\n# transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n ]))\n\ntrain_loader = torch.utils.data.DataLoader(\n train_dataset, \n batch_size = batch_size, \n shuffle = True,\n num_workers = workers, \n pin_memory = True, \n sampler = train_sampler\n)\n\nval_loader = torch.utils.data.DataLoader(\n datasets.ImageFolder(val_dir, transforms.Compose([\n# transforms.Grayscale(num_output_channels=1),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n ])),\n batch_size = batch_size, \n shuffle = True,\n num_workers = workers, \n pin_memory = True\n)\n\n\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\nmodel = Autoencoder()\n# if device.type == 'cuda' and ngpu > 1:\n# model = nn.DataParallel(model, list(range(ngpu)))\n\nmodel = model.to(device)\n\ncriterion = nn.MSELoss()\n\noptimizer = torch.optim.Adam(\n model.parameters(),\n lr=learning_rate,\n)\n\n\ndataset_len = len(train_loader.dataset)\nval_dataset_len = len(val_loader.dataset)\nvalidation_losses = []\nrunning_avg_training_losses = []\n\nfor epoch in range(num_epochs):\n torch.cuda.empty_cache()\n total = 0\n running_total_training_loss = 0\n\n print(f'-- running epoch {epoch + 1} --')\n\n for data in train_loader:\n img, _ = data\n img = img.to(device)\n # ===================forward=====================\n output = model(img) \n loss = criterion(output, img.data)\n # ===================backward====================\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n total += 1 \n\n running_total_training_loss += float(loss)\n\n running_avg_training_losses.append(running_total_training_loss/total)\n\n with torch.no_grad():\n total_vloss = 0\n for val_data in val_loader:\n vimg, _ = val_data\n vimg = vimg.to(device)\n voutput = model(vimg)\n vloss = criterion(voutput, vimg.data)\n total_vloss += float(vloss)\n validation_losses.append(total_vloss)\n \n print(f'epoch [{epoch + 1}/{num_epochs}], data trained:{100 * total / dataset_len :.3f}%, running avg training loss:{running_avg_training_losses[-1]:.4f}')\n print(validation_losses)\n\n if (epoch + 1) % 10 == 0:\n if torch.cuda.is_available():\n torch.save(model, '../artifacts/models/ae_latent_noise_gpu_model_b64_w2_e'+ str(epoch + 1) +'.pt')\n model.to(torch.device('cpu'))\n torch.save(model, '../artifacts/models/ae_latent_noise_cpu_model_b64_w2_e'+ str(epoch + 1) +'.pt')\n model.to(device) \n else:\n torch.save(model, '../artifacts/models/ae_latent_noise_cpu_model_b64_w2_e'+ str(epoch + 1) +'.pt')\n","sub_path":"src/train_parts_top_view_AE.py","file_name":"train_parts_top_view_AE.py","file_ext":"py","file_size_in_byte":3830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"260662569","text":"import os\nimport glob\nimport datetime\nimport pandas as pd\n\n\ndef countFiles(barcodes,files,portals,csvLogFilePath):\n\n # All the info to keep track of and report \n numBarcodes=str(len(set(barcodes)))\n numFiles=str(len(files))\n numVascular=str(portals.count('Vascular'))\n numAlgae=str(portals.count('Algae'))\n numBryophyte=str(portals.count('Bryophyte'))\n numFungi=str(portals.count('Fungi')) \n numLichen=str(portals.count('Lichen'))\n todaysDate=str(datetime.date.today().strftime(\"%Y-%m-%d\"))\n\n # Header for log file\n firstLine=['CSVDate','Barcodes','Files','Vascular','Algae','Bryophyte','Fungi','Lichen']\n logHeader = \",\".join(firstLine)\n \n # Get line to add to csv file\n csvLine = [todaysDate,numBarcodes,numFiles,numVascular,numAlgae,numBryophyte,numFungi,numLichen]\n csvLogLine=\",\".join(csvLine)\n\n # If csv file does not exist, create with header \n\n if not os.path.exists(csvLogFilePath):\n #print(header)\n with open(csvLogFilePath,\"w\") as csvLogFile:\n csvLogFile.write(\"%s\\n\" % logHeader)\n csvLogFile.write(\"%s\\n\" % csvLogLine)\n csvLogFile.close()\n \n # If csv file exists, add new line \n\n elif os.path.exists(csvLogFilePath):\n #print(csvLine)\n with open(csvLogFilePath,\"a\") as csvLogFile:\n csvLogFile.write(\"%s\\n\" % csvLogLine)\n csvLogFile.close()\n\ndef makeCSV(logFolder,csvFolder,webPath,header,newest,oldest,csvLogFilePath):\n\n # Get list of all log files edited on specified dates. \n logFilesList=[]\n #print(oldest,newest)\n\n # Get all files in Log Folder\n logFolderList = []\n for file in os.listdir(logFolder):\n if file.endswith(\".txt\"):\n path = os.path.join(logFolder, file)\n logFolderList.append(path)\n\n # Get all files that were imaged on the specified dates \n for p in logFolderList:\n #print(path)\n st = os.stat(p) \n mtime = datetime.date.fromtimestamp(st.st_mtime)\n # If mtime is greater(newer) than oldest date and smaller(older) than newest date\n if mtime >= oldest and mtime <= newest:\n #print(p)\n #print(mtime)\n logFilesList.append(p)\n\n # Set date name for csv file - today's date \n logDate = str(datetime.date.today().strftime(\"%Y-%m-%d\"))\n\n # Create lists to calculate summary numbers\n barcodes=[]\n files=[]\n portals=[]\n\n # Iterate through log files \n for logFile in logFilesList:\n\n # Open the file \n logF = open(logFile,\"r\")\n #print(logFile)\n\n # Loop through lines in log file \n for oPath in logF:\n\n # Get portal name \n portal=oPath.split(\"/\")[0]\n\n # Get path to csv file line will be written to.\n csvPath = os.path.join(csvFolder,logDate+\"_\"+portal+\".csv\")\n\n # Get path to original image\n path = os.path.split(oPath)[0]\n\n # Get file name without extension\n fileName=os.path.basename(oPath).split(\".\")[0]\n\n # Get barcode \n barCode=fileName.split(\"_\")[0]\n\n # Create web ready and thumbnail paths to web address \n wr=os.path.join(webPath,path,fileName+'_WR.JPG')\n tn=os.path.join(webPath,path,fileName+'_TN.JPG')\n lg=os.path.join(webPath,path,fileName+'_L.JPG')\n\n # Creat csv file line\n info=[barCode,lg,tn,wr]\n csvLine=','.join(info)\n\n # If csv file does not exist, create with header \n if not os.path.exists(csvPath):\n #print(header)\n with open(csvPath,\"w\") as csvFile:\n csvFile.write(\"%s\\n\" % header)\n csvFile.write(\"%s\\n\" % csvLine)\n csvFile.close()\n \n # If csv file exists, add new line \n elif os.path.exists(csvPath):\n #print(csvLine)\n with open(csvPath,\"a\") as csvFile:\n csvFile.write(\"%s\\n\" % csvLine)\n csvFile.close()\n else:\n print(\"This should never happen\")\n \n # Add to portal dict for reporting numbers\n barcodes.append(barCode)\n files.append(fileName)\n portals.append(portal)\n\n # Write out log file of all the files that got csv'd\n countFiles(barcodes,files,portals,csvLogFilePath)\n\ndef main():\n # Set times for the days that you want logs from\n # Days begin and end at midnight. \n # Ex: Logs from June 3rd-July 10th. \n # Ex: newest = datetime.datetime(year=2020,month=7,day=11)\n # Ex: oldest = datetime.datetime(year=2020,month=6,day=3)\n \n # Hash out the newest/oldest lines that you do not want to use. \n # Edit either the exact dates for the number of days wanted.\n \n # Span of days (default)\n newest = datetime.date.today()\n oldest = newest - datetime.timedelta(days=7)\n \n # Exact dates\n #newest = datetime.datetime(year=2020,month=7,day=11)\n #oldest = datetime.datetime(year=2020,month=6,day=3)\n\n # Path to log files that are made when images are uploaded to server\n logFolder = '/mnt/e/CFLA-LSU-Station2/LSUCollections/Logs/'\n #logFolder = '/Users/ChatNoir/Projects/HerbariumRA/test/'\n \n # Path to folder where CSV files will be made\n csvFolder='/mnt/e/CFLA-LSU-Station2/LSUCollections/CSVLogs/'\n #csvFolder='/Users/ChatNoir/Projects/HerbariumRA/test/csv/'\n\n # Web address for linking images \n webPath = 'http://cyberfloralouisiana.com/images/LSUCollections/' \n\n # Header for csv file, compatable with Symbiota\n\n csvHeader = ['catalogNumber','large JPG','thumbnail','webview']\n header = \",\".join(csvHeader)\n\n # Path to a log file that counts the number of files etc in each csv file\n # This log is extremely customized for LSU, if you want to implement it, edit the function\n\n csvLogFilePath = os.path.join(csvFolder,'csvLog.csv')\n\n # Call function\n makeCSV(logFolder,csvFolder,webPath,header,newest,oldest,csvLogFilePath)\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"WorkflowScripts/Old/WeeklyCSV.py","file_name":"WeeklyCSV.py","file_ext":"py","file_size_in_byte":6126,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"73739488","text":"import os\n# from azure.storage.blob import BlockBlobService\n# from azure.storage.blob.models import ContainerPermissions\nimport datetime\nimport pickle\n\ndef load_keys(secrets_file):\n secrets_file = os.path.abspath(secrets_file)\n if os.path.isfile(secrets_file):\n with open(secrets_file, 'rb') as f:\n keys = pickle.load(f)\n print('You have:')\n for key in keys:\n print(' ' + key)\n return keys\n else:\n raise NameError('Secrets file not found.')\n\n#def gen_token(container, account_key, expiration):\n# ooiopendata_service = BlockBlobService('ooiopendata', account_key)\n# container_sas = ooiopendata_service.generate_container_shared_access_signature(container,\n# ContainerPermissions.READ +\n# ContainerPermissions.WRITE +\n# ContainerPermissions.DELETE +\n# ContainerPermissions.LIST,\n# datetime.datetime.utcnow() + expiration)\n# return container_sas\n\ndef write_pickle(containers, account_key, expiration, filename, overwrite=False, include_account_key=False):\n if type(containers) == list:\n keys = dict()\n for container in containers:\n container_sas = gen_token(container, account_key, expiration)\n keys[container] = container_sas\n if include_account_key == True:\n keys['ooiopendata'] = account_key\n pickle_path = os.path.abspath(filename)\n if os.path.isfile(pickle_path) == False:\n with open(pickle_path, 'wb') as f:\n pickle.dump(keys, f, protocol=0)\n elif overwrite == True:\n with open(pickle_path, 'wb') as f:\n pickle.dump(keys, f, protocol=0)\n else:\n raise Exception('File exists.')\n else:\n raise TypeError('The \\'containers\\' argument must be a list.')\n","sub_path":"ooiod/secrets.py","file_name":"secrets.py","file_ext":"py","file_size_in_byte":1822,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"382857685","text":"\"\"\"Resources module.\"\"\"\n\nimport abc\nimport sys\nfrom typing import TypeVar, Generic\n\nif sys.version_info < (3, 7):\n from typing import GenericMeta\nelse:\n class GenericMeta(type):\n ...\n\n\nT = TypeVar('T')\n\n\nclass ResourceMeta(GenericMeta, abc.ABCMeta):\n def __getitem__(cls, item):\n # Spike for Python 3.6\n return cls(item)\n\n\nclass Resource(Generic[T], metaclass=ResourceMeta):\n\n @abc.abstractmethod\n def init(self, *args, **kwargs) -> T:\n ...\n\n @abc.abstractmethod\n def shutdown(self, resource: T) -> None:\n ...\n\n\nclass AsyncResource(Generic[T], metaclass=ResourceMeta):\n\n @abc.abstractmethod\n async def init(self, *args, **kwargs) -> T:\n ...\n\n @abc.abstractmethod\n async def shutdown(self, resource: T) -> None:\n ...\n","sub_path":"src/dependency_injector/resources.py","file_name":"resources.py","file_ext":"py","file_size_in_byte":798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"143261973","text":"import numpy as np\nfrom pandas import DataFrame\n\nclass FuzzyNumber:\n def __init__(self, polyline):\n self.polyline = np.array(polyline, np.float64)\n\n @staticmethod\n def get_ab( point1, point2 ):\n a = (point2[1]-point1[1])/(point2[0]-point1[0])\n b = point1[1] - a * point1[0]\n return a,b\n\n def get_membership(self, x):\n if x <= self.polyline[0,0]:\n return self.polyline[0,1]\n\n if x >= self.polyline[-1,0]:\n return self.polyline[-1,1]\n\n inds = np.where(self.polyline[:,0] == x)\n\n if len(inds)>0 and inds[0].size > 0:\n return self.polyline[inds[0][-1],1]\n\n inds = np.where((self.polyline[:-1,0] < x) & (self.polyline[1:,0] > x) )\n\n if len(inds)==0 or inds[0].size == 0:\n raise ValueError(\"Can not find point\")\n\n a,b = self.get_ab( self.polyline[inds[0][0]], self.polyline[inds[0][0]+1] )\n return (a * x + b)\n\n def prep_scale(self, other):\n scale = np.union1d(self.polyline[:,0], other.polyline[:,0])\n scale_add = []\n for x1, x2 in zip(scale[:-1],scale[1:]):\n ys1 = self.get_membership(x1)\n ys2 = self.get_membership(x2)\n yo1 = other.get_membership(x1)\n yo2 = other.get_membership(x2)\n if (ys1 > yo1 and ys2 < yo2) or (ys1 < yo1 and ys2 > yo2):\n a_s,b_s = self.get_ab( [x1,ys1], [x2,ys2] )\n a_o,b_o = self.get_ab( [x1,yo1], [x2,yo2] )\n scale_add.append(round((b_o-b_s)/(a_s-a_o),5))\n return np.union1d(scale,scale_add)\n\n def __eq__(self, other):\n scale = np.union1d(self.polyline[:,0], other.polyline[:,0])\n return 1.0 - max(abs(self.get_membership(x) - other.get_membership(x)) for x in scale)\n\n def __ge__(self, other):\n scale = self.prep_scale(other)\n mh = np.array(np.meshgrid(scale,scale)).T.reshape(-1,2)\n return max( min(self.get_membership(x[0]),other.get_membership(x[1]))\n for x in mh if x[0] >= x[1] )\n\n def __le__(self, other):\n return other >= self\n\n def general_operation(self, other,operation):\n scale = self.prep_scale(other)\n mh = np.array(np.meshgrid(scale,scale)).T.reshape(-1,2)\n y = np.round(operation(mh[:,0],mh[:,1]),5)\n mu = [ min(self.get_membership(x[0]),other.get_membership(x[1]))\n for x in mh ]\n df = DataFrame(dict(y=y, mu=mu))\n s = df.groupby('y')['mu'].max()\n a = self.polyline[0][0]\n b = self.polyline[-1][0]\n for yy in s.index:\n if yy == 0 and operation == np.divide:\n continue\n for s1 in np.linspace(a,b,1000):\n if s1 == 0 and operation == np.multiply:\n continue\n s2 = {\n np.multiply: yy / s1,\n np.add: yy - s1,\n np.subtract: s1 - yy,\n np.divide: s1 / yy\n }[operation]\n m = min(self.get_membership(s1),other.get_membership(s2))\n if s[yy] < m:\n s[yy] = m\n return FuzzyNumber(list(zip(s.index,s.values)))\n\n def __add__(self,other):\n return self.general_operation(other, np.add)\n\n def __sub__(self,other):\n return self.general_operation(other, np.subtract)\n\n def __mul__(self,other):\n return self.general_operation(other, np.multiply)\n\n def __truediv__(self,other):\n return self.general_operation(other, np.divide)\n\n def __repr__(self):\n spolyline = repr(self.polyline)\n spolyline = spolyline[spolyline.find('['):-1]\n return f\"FuzzyNumber({spolyline})\"\n\n def clip_membership( self, a ):\n b = self.polyline[:,1].reshape(-1,1)\n return FuzzyNumber(list(zip(self.polyline[:,0], np.min(np.insert(b,1,a,axis=1),axis=1))))\n\n def get_max_membership(self):\n return np.max(self.polyline[:,1])\n\n def logic_operation(self, other, operation):\n scale = self.prep_scale(other).reshape(-1,1)\n mus = np.apply_along_axis(self.get_membership, 1, scale)\n muo = np.apply_along_axis(other.get_membership, 1, scale)\n polyline = np.hstack((scale, operation(np.vstack((mus,muo)),axis=0).reshape(-1,1)))\n return FuzzyNumber(polyline)\n\n def __and__(self, other):\n return self.logic_operation(other, np.min)\n\n def __or__(self, other):\n return self.logic_operation(other, np.max)\n\n def defuzzification(self):\n a,b = self.get_ab(self.polyline[:-1].T, self.polyline[1:].T)\n s1 = np.sum(a * (self.polyline[1:,0]**3 - self.polyline[:-1,0]**3)/3.0 + \\\n b * (self.polyline[1:,0]**2 - self.polyline[:-1,0]**2)/2.0)\n s2 = np.sum(0.5 * (self.polyline[1:,0] - self.polyline[:-1,0]) * \\\n (self.polyline[1:,1] + self.polyline[:-1,1]))\n return s1 / s2\n","sub_path":"python/fuzzy_number.py","file_name":"fuzzy_number.py","file_ext":"py","file_size_in_byte":4901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"433763796","text":"from typing import Optional\n\n# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, val=0, next=None):\n self.val = val\n self.next = next\n\nclass Solution:\n def mergeTwoLists(self, l1: Optional[ListNode], l2: Optional[ListNode]) -> Optional[ListNode]:\n new = ListNode()\n top = new\n while l1 or l2:\n if l1 is None:\n new.next = l2\n break\n elif l2 is None:\n new.next = l1\n break\n \n if l1.val <= l2.val:\n new.next = l1\n l1 = l1.next\n new = new.next\n elif l1.val > l2.val:\n new.next = l2\n l2 = l2.next\n new = new.next\n \n return top.next","sub_path":"easy/merge-two-sorted-lists.py","file_name":"merge-two-sorted-lists.py","file_ext":"py","file_size_in_byte":819,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"539134808","text":"import math\n# utiliza math.exp\n\ndef salida_neurona(l1,l2):\n if len(l1)!=len(l2):\n return \"Error\"\n else:\n long = len(l1)\n x=0.0;\n for i in range(long):\n x = x + l1[i]*l2[i] \n sig = 1 / (1 + math.exp(-x)) \n return sig\n","sub_path":"src/perceptron.py","file_name":"perceptron.py","file_ext":"py","file_size_in_byte":289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"324588422","text":"import os\nMD5Dict = dict()\n\n\ndef walk(dirpath):\n global MD5Dict\n for name in os.listdir(dirpath):\n path = os.path.join(dirpath, name)\n if os.path.isdir(path):\n walk(path)\n else:\n cmd = 'md5sum ' + '\"' + path + '\"'\n res = os.popen(cmd).read()\n fileList = res.split()\n MD5 = fileList[0]\n MD5Dict[MD5] = MD5Dict.setdefault(MD5, [])\n MD5Dict[MD5].append(fileList[1])\n\n\nif __name__ == \"__main__\":\n p = os.sys.argv[1]\n walk(p)\n for key, value in MD5Dict.items():\n if len(value) > 1:\n print(value)\n","sub_path":"0-ThinkPython/find_duplicates.py","file_name":"find_duplicates.py","file_ext":"py","file_size_in_byte":622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"186832704","text":"#!/usr/bin/env python2.7\n\n''' Main operations in annonex2embl '''\n\n#####################\n# IMPORT OPERATIONS #\n#####################\n\nimport MyExceptions as ME\nimport CheckingOps as CkOps\nimport DegappingOps as DgOps\nimport GenerationOps as GnOps\nimport GlobalVariables as GlobVars\nimport ParsingOps as PrOps\nimport IOOps as IOOps\nimport datetime\nimport sys\nimport os\n\nfrom Bio import SeqIO\n#from Bio.Alphabet import generic_dna\n#from Bio.Seq import Seq\nfrom Bio import SeqFeature\nfrom collections import OrderedDict\nfrom copy import copy\nfrom distutils.util import strtobool\nfrom termcolor import colored\n\n# Add specific directory to sys.path in order to import its modules\n# NOTE: THIS RELATIVE IMPORTING IS AMATEURISH.\n# NOTE: COULD THE FOLLOWING IMPORT BE REPLACED WITH 'import annonex2embl'?\n\nsys.path.append(os.path.join(os.path.dirname(__file__), '..', 'annonex2embl'))\n\n###############\n# AUTHOR INFO #\n###############\n\n__author__ = 'Michael Gruenstaeudl '\n__copyright__ = 'Copyright (C) 2016-2019 Michael Gruenstaeudl'\n__info__ = 'annonex2embl'\n__version__ = '2019.05.15.1500'\n\n#############\n# DEBUGGING #\n#############\n\nimport pdb\n# pdb.set_trace()\n\n###########\n# CLASSES #\n###########\n\n#############\n# FUNCTIONS #\n#############\ndef annonex2embl(path_to_nex,\n path_to_csv,\n descr_DEline,\n email_addr,\n author_names,\n path_to_outfile,\n\n manifest_study='',\n manifest_name='',\n manifest_description='',\n product_check='False',\n tax_check='False',\n linemask='False',\n topology='linear',\n tax_division='PLN',\n uniq_seqid_col='isolate',\n transl_table='11',\n organelle='plastid',\n seq_version='1'):\n\n########################################################################\n\n# 0. MAKE SPECIFIC VARIABLES BOOLEAN\n productcheck_bool = strtobool(product_check)\n taxcheck_bool = strtobool(tax_check)\n linemask_bool = strtobool(linemask)\n\n########################################################################\n\n# 1. OPEN OUTFILE\n outp_handle = open(path_to_outfile, 'a')\n\n########################################################################\n\n# 2. PARSE DATA FROM .NEX-FILE\n try:\n charsets_global, alignm_global = IOOps.Inp().\\\n parse_nexus_file(path_to_nex)\n except ME.MyException as e:\n sys.exit('%s annonex2embl ERROR: %s' % ('\\n', e))\n\n\n########################################################################\n\n# 3. PARSE DATA FROM .CSV-FILE\n try:\n raw_qualifiers = IOOps.Inp().parse_csv_file(path_to_csv)\n except ME.MyException as e:\n sys.exit('%s annonex2embl ERROR: %s' % ('\\n', e))\n\n########################################################################\n\n# 4.1 CHECK QUALIFIERS\n# 4.1.1 Perform quality checks on qualifiers\n try:\n CkOps.QualifierCheck(raw_qualifiers, uniq_seqid_col).\\\n quality_of_qualifiers()\n except ME.MyException as e:\n sys.exit('%s annonex2embl ERROR: %s' % ('\\n', e))\n# 4.1.2 Remove qualifiers without content (i.e. empty qualifiers)\n nonempty_qualifiers = CkOps.QualifierCheck.\\\n _rm_empty_qual(raw_qualifiers)\n# 4.1.3 Enforce that all qualifier values consist of ASCII characters\n filtered_qualifiers = CkOps.QualifierCheck.\\\n _enforce_ASCII(nonempty_qualifiers)\n\n####################################\n\n# 4.2 CHECK SEQUENCES\n sorted_seqnames = sorted(alignm_global.keys())\n sorted_seqids = sorted([d[uniq_seqid_col] for d in filtered_qualifiers])\n# 4.2.1. Exit if seq names in NEX-file not identical to seq ids in csv-file\n not_shared = list(set(sorted_seqnames) - set(sorted_seqids))\n if not_shared:\n sys.exit('%s annonex2embl ERROR: Sequence names in `%s` '\n 'are NOT IDENTICAL to sequence IDs in `%s`.'\n '%s The following sequence names don\\'t have a match: `%s`'\n % ('\\n', colored(path_to_nex, 'red'),\n colored(path_to_csv, 'red'), '\\n',\n colored(','.join(not_shared), 'red')))\n\n########################################################################\n# 5. PARSE OUT FEATURE KEY, OBTAIN OFFICIAL GENE NAME AND GENE PRODUCT\n charset_dict = {}\n for charset_name in charsets_global.keys():\n try:\n charset_sym, charset_type, charset_orient, charset_product = PrOps.\\\n ParseCharsetName(charset_name, email_addr, productcheck_bool).parse()\n except ME.MyException as e:\n sys.exit('%s annonex2embl ERROR: %s' % ('\\n',\n colored(e, 'red')))\n\n charset_dict[charset_name] = (charset_sym, charset_type, charset_orient,\n charset_product)\n\n########################################################################\n# 6. GENERATING SEQ_RECORDS BY LOOPING THROUGH EACH SEQUENCE OF THE ALIGNMENT\n# Work off the sequences alphabetically.\n for counter, seq_name in enumerate(sorted_seqnames):\n # TFLs generate safe copies of charset and alignment for every\n # loop iteration\n charsets_withgaps = copy(charsets_global)\n alignm = copy(alignm_global)\n\n####################################\n\n# 6.1. SELECT CURRENT SEQUENCES AND CURRENT QUALIFIERS\n current_seq = alignm[seq_name]\n current_quals = [d for d in filtered_qualifiers\n if d[uniq_seqid_col] == seq_name][0]\n\n####################################\n\n# 6.2. GENERATE THE BASIC SEQ_RECORD (I.E., WITHOUT FEATURES)\n\n# 6.2.1. Generate the basic SeqRecord\n seq_record = GnOps.GenerateSeqRecord().base_record(\n current_seq, current_quals, uniq_seqid_col, seq_version,\n descr_DEline, topology, tax_division, organelle)\n\n # Add a function that automatically removes all sequences\n # that consist only of Ns (or ?s).\n skip = True\n for i in seq_record.seq:\n if i != 'N' and i != '?':\n skip = False\n break\n if skip:\n continue\n\n####################################\n\n# 6.3. CLEAN UP THE SEQUENCE OF THE SEQ_RECORD (i.e., remove leading or\n# trailing ambiguities, remove gaps), but maintain correct\n# annotations.\n# Note 1: This clean-up has to occur before (!) the SeqFeature\n# 'source' is generated, as the source feature provides info on\n# the full sequence length.\n# Note 2: Charsets are identical across all sequences.\n\n# 6.3.1. Replace question marks in DNA sequence with 'N'\n seq_record.seq._data = seq_record.seq._data.replace('?', 'N')\n # TFL generates a safe copy of sequence to work on\n seq_withgaps = copy(seq_record.seq)\n\n\n# 6.3.2. Remove leading ambiguities while maintaining\n# correct annotations\n seq_noleadambigs, charsets_noleadambigs = DgOps.\\\n RmAmbigsButMaintainAnno().rm_leadambig(seq_withgaps, 'N',\n charsets_withgaps)\n\n# 6.3.3. Remove trailing ambiguities while maintaining\n# correct annotations\n seq_notrailambigs, charsets_notrailambigs = DgOps.\\\n RmAmbigsButMaintainAnno().rm_trailambig(seq_noleadambigs,\n 'N', charsets_noleadambigs)\n\n# 6.3.4. (FUTURE) Give note that leading or trailing ambiguities were\n# removed; for future association with of fuzzy ends\n# if seq_noltambigs != seq_record.seq:\n# ltambigs_removed = True\n\n# 6.3.5. Degap the sequence while maintaining correct annotations\n seq_nogaps, charsets_degapped = DgOps.\\\n DegapButMaintainAnno(seq_notrailambigs, '-',\n charsets_notrailambigs).degap()\n# 6.3.6. Add gap features where stretches of Ns in sequence\n seq_final, charsets_final = DgOps.\\\n AddGapFeature(seq_nogaps, charsets_degapped).add()\n # TFL assigns the deambiged and degapped sequence back\n seq_record.seq = seq_final\n####################################\n\n# 6.4. GENERATE SEQFEATURE 'SOURCE' AND TEST TAXON NAME AGAINST\n# NCBI TAXONOMY\n\n# 6.4.1. Generate SeqFeature 'source' and append to features list\n charset_names = charsets_final.keys()\n source_feature = GnOps.GenerateSeqFeature().\\\n source_feat(len(seq_record), current_quals, charset_names)\n seq_record.features.append(source_feature)\n####################################\n\n# 6.5. VALIDATE TAXON NAME\n\n# 6.5.1. Test taxon name against NCBI taxonomy; if not listed, adjust\n# taxon name and append ecotype info\n if taxcheck_bool:\n seq_record = PrOps.ConfirmAdjustTaxonName().go(seq_record,\n email_addr)\n\n####################################\n\n# 6.6. POPULATE THE FEATURE KEYS WITH THE CHARSET INFORMATION\n# Note: Each charset represents a dictionary that must be added in\n# full to the list \"SeqRecord.features\"\n for charset_name, charset_range in charsets_final.items():\n\n# 6.6.1. Proceed in loop only if charset_range is not empty\n# An empty charset_range could be the case if the charset only\n# consisted of 'N' (which were removed in steps 6.3.2 and 6.3.3).\n if charset_range:\n\n# 6.6.2. Convert charset_range into Location Object\n location_object = GnOps.GenerateFeatLoc().make_location(charset_range)\n\n# 6.6.3. Assign a gene product to a gene name, unless it's a gap feature\n if charset_name[0:4] == \"gap\":\n charset_sym = None\n charset_type = \"gap\"\n charset_orient = \"forw\"\n charset_product = None\n else:\n charset_sym, charset_type, charset_orient, charset_product = charset_dict[charset_name]\n\n# 6.6.4. Generate a regular SeqFeature and append to seq_record.features\n# Note: The position indices for the stop codon are truncated in\n# this step.\n seq = []\n [seq.append(seq_record[obj]) for obj in location_object]\n seq = ''.join(seq)\n\n seq_feature = GnOps.GenerateSeqFeature().regular_feat(\n charset_sym, charset_type, charset_orient, location_object, transl_table,\n seq, charset_product)\n seq_record.features.append(seq_feature)\n\n####################################\n\n# 6.7. SORT ALL SEQ_RECORD.FEATURES EXCEPT THE FIRST ONE (WHICH\n# CONSTITUTES THE SOURCE FEATURE) BY THEIR RELATIVE START\n# POSITIONS\n sorted_features = sorted(seq_record.features[1:],\n key=lambda x: x.location.start.position)\n seq_record.features = [seq_record.features[0]] + sorted_features\n####################################\n\n# 6.8. TRANSLATE AND CHECK QUALITY OF TRANSLATION\n removal_list = []\n last_seen = [\"type\", \"before\", \"after\"]\n for indx, feature in enumerate(seq_record.features):\n # Check if feature is a coding region\n if feature.type == 'CDS' or feature.type == 'gene':\n try:\n # In TFL, features are truncated to the first\n # internal stop codon, if present.\n last_seen[0] = feature.type\n last_seen[1] = feature.location\n feature = CkOps.TranslCheck().\\\n transl_and_quality_of_transl(seq_record,\n feature, transl_table)\n last_seen[2] = feature.location\n except ME.MyException as e:\n print('%s annonex2embl WARNING: %s Feature `%s` '\n '(type: `%s`) of sequence `%s` is not saved to '\n 'output.' % ('\\n', colored(e, 'red'),\n colored(feature.id, 'red'),\n colored(feature.type, 'red'),\n colored(seq_record.id, 'red')))\n removal_list.append(indx)\n elif feature.type == 'IGS' or feature.type == 'intron':\n if last_seen[0] == 'CDS' or last_seen[0] == 'gene':\n if not last_seen[1] == last_seen[2]:\n feature.location = CkOps.TranslCheck().\\\n adjustLocation(feature.location, last_seen[2])\n last_seen = [\"type\",\"loc_before\",\"loc_after\"]\n else:\n last_seen = [\"type\",\"loc_before\",\"loc_after\"]\n # TFL removes the objects in reverse order, because otherwise\n # each removal would shift the indices of subsequent objects\n # to the left.\n for indx in sorted(removal_list, reverse=True):\n seq_record.features.pop(indx)\n\n####################################\n# 6.9. INTRODUCE FUZZY ENDS\n for feature in seq_record.features:\n # Check if feature is a coding region\n if feature.type == 'CDS' or feature.type == 'gene':\n # Note: Don't use \"feature.extract(seq_record.seq)\" in TFLs,\n # as stop codon was truncated from feature under\n # Step 6.8, because in an ENA record, the AA sequence\n # of the translation does not have the stop codon\n # (i.e., the '*'), while the feature location\n # range (i.e., 738..2291) very much includes\n # its position (which is biologically logical).\n charset_range_updated = range(feature.location.start.position,\n feature.location.end.position)\n coding_seq = ''.join([seq_record.seq[i] for i in charset_range_updated])\n if not coding_seq.startswith(GlobVars.nex2ena_start_codon):\n feature.location = GnOps.GenerateFeatLoc().\\\n make_start_fuzzy(feature.location)\n if all([not coding_seq.endswith(c)\n for c in GlobVars.nex2ena_stop_codons]):\n feature.location = GnOps.GenerateFeatLoc().\\\n make_end_fuzzy(feature.location)\n\n# (FUTURE) Also introduce fuzzy ends to features when those had leading or trailing Ns removed,\n# because the removed Ns may constitute start of stop codons.\n\n####################################\n\n# 6.10. DECISION ON OUTPUT FORMAT\n IOOps.Outp().write_EntryUpload(seq_record, outp_handle,\n linemask_bool)\n\n########################################################################\n\n# 7. CLOSE OUTFILE\n outp_handle.close()\n\n########################################################################\n\n# 8. POST-PROCESSING OF EntryUpload FILES\n# 8.1. Addition of author name\n date_today = datetime.date.today().strftime(\"%d-%b-%Y\").upper()\n os.system(\"sed -i $'s/FH Key Location\\/Qualifiers/\" +\n \"RN \\[1\\]\" +\n \"\\\\\\nRA \" + author_names +\n \"\\\\\\nRT \\;\" +\n \"\\\\\\nRL Submitted \\(\" + date_today + \"\\) to the INSDC.\" +\n \"\\\\\\nXX\" +\n \"\\\\\\nFH Key Location\\/Qualifiers/g' \" + path_to_outfile)\n# 8.2. Corrections\n os.system(\"sed -i 's/\\; DNA\\;/\\; genomic DNA\\;/g' \"+path_to_outfile)\n\n# 9. Create Manifest file\n if(manifest_study!='' and manifest_name!=''):\n IOOps.Outp().create_manifest_file(path_to_outfile, manifest_study, manifest_name, manifest_description)\n elif(manifest_study!='' or manifest_name!=''):\n raise ME.MyException('Error by creating manifest file. Please give both information -ms study name and -mn your name.')\n","sub_path":"annonex2embl/Annonex2emblMain.py","file_name":"Annonex2emblMain.py","file_ext":"py","file_size_in_byte":15928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"150668298","text":"#\n# Copyright (c) 2018-2019 One Identity\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to\n# deal in the Software without restriction, including without limitation the\n# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n# sell copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n# IN THE SOFTWARE.\n#\nimport requests\nfrom pytest import fixture\nfrom unittest.mock import patch, MagicMock\nfrom ..client import StarlingClient\n\n\n@fixture\ndef mocked_client():\n cache_mock = MagicMock()\n cache_mock.get.return_value = \"token\"\n return StarlingClient(cache=cache_mock)\n\n\n@fixture\ndef mocked_response():\n def get_mocked_response(error_json):\n response = MagicMock()\n error_json.update({\"id\": \"user_id\"})\n response.json.return_value = error_json\n response.status_code = requests.codes.ok\n return response\n\n return get_mocked_response\n\n\n@patch(\"requests.post\")\ndef test_user_doesnt_exist_gets_set(post, mocked_client, mocked_response):\n response = mocked_response({\"errorMessage\": {\"errorCode\": 60016}})\n post.return_value = response\n mocked_client.provision_user(\"phone\", \"email\", \"name\")\n assert mocked_client.user_doesnt_exist\n\n\n@patch(\"requests.post\")\ndef test_user_doesnt_exist_gets_set_on_general_error(post, mocked_client, mocked_response):\n response = mocked_response({\"errorMessage\": \"General error\"})\n post.return_value = response\n mocked_client.provision_user(\"phone\", \"email\", \"name\")\n assert not mocked_client.user_doesnt_exist\n","sub_path":"lib/tests/test_client.py","file_name":"test_client.py","file_ext":"py","file_size_in_byte":2306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"488278679","text":"### A sample logster parser file that can be used to count the number\n### of various stats from Digimap.\n###\n### This class was copied from SampleLogster.\n###\n### For example:\n### sudo ./logster --dry-run --output=ganglia DMWebLogster /var/log/httpd/access_log\n###\n###\n### Copyright 2011, Etsy, Inc., 2013 University of Edinburgh\n###\n### This file is part of Logster.\n###\n### Logster is free software: you can redistribute it and/or modify\n### it under the terms of the GNU General Public License as published by\n### the Free Software Foundation, either version 3 of the License, or\n### (at your option) any later version.\n###\n### Logster is distributed in the hope that it will be useful,\n### but WITHOUT ANY WARRANTY; without even the implied warranty of\n### MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n### GNU General Public License for more details.\n###\n### You should have received a copy of the GNU General Public License\n### along with Logster. If not, see .\n###\n\nimport time\nimport re\n\nfrom logster.logster_helper import MetricObject, LogsterParser\nfrom logster.logster_helper import LogsterParsingException\n\nclass DMWebLogster(LogsterParser):\n\n def __init__(self, option_string=None):\n '''Initialize any data structures or variables needed for keeping track\n of the tasty bits we find in the log we are parsing.'''\n self.logins = {} # logins via EdiAuth/Digimap\n self.loginsResponse = {}\n self.loginsApi = {} # logins via the schools API(DataNation)\n self.loginsApiResponse = {}\n self.registrations = {}\n self.registrationsResponse = {}\n self.downloads = {}\n self.downloadsResponse = {}\n self.mapproxy = {}\n self.mapproxyResponse = {}\n self.mapproxyWms = {}\n self.mapproxyWmsResponse = {}\n\n self.schoolsV1Mapproxy = {}\n self.schoolsV1MapproxyResponse = {}\n\n # Regular expression for matching lines we are interested in, and capturing\n # fields from the line.\n self.regLogin = re.compile('.*GET /login.* HTTP/\\d.\\d\" (?P\\d+) .* (?P\\d+) [\\w\\d-]+ .$')\n self.regLoginApi = re.compile('.*POST /roam/api/schools/login.* HTTP/\\d.\\d\" (?P\\d+) .* (?P\\d+) [\\w\\d-]+ .$')\n self.regRegister = re.compile('.*PUT /api/user/register HTTP/\\d.\\d\" (?P\\d+) .* (?P\\d+) [\\w\\d-]+ .$')\n self.regDownloads = re.compile('.*POST (/roam/api/download/orders|/datadownload/submitorder).* HTTP/\\d.\\d\" (?P\\d+) .* (?P\\d+) [\\w\\d-]+ .$')\n self.regMapproxy = re.compile('.*GET /mapproxy/wmsMap.* HTTP/\\d.\\d\" (?P\\d+) .* (?P\\d+) [\\w\\d|-]+ .$')\n self.regMapproxyWms = re.compile('.*GET /mapproxy/wms/.*GetMap.* HTTP/\\d.\\d\" (?P\\d+) .* (?P\\d+) [\\w\\d-]+ .$')\n self.regSchoolsV1Mapproxy = re.compile('.*GET /dfsmapproxy/wmsMap.* HTTP/\\d.\\d\" (?P\\d+) .* (?P\\d+) [\\w\\d|-]+ .$')\n\n def parse_line(self, line):\n '''This function should digest the contents of one line at a time, updating\n object's state variables. Takes a single argument, the line to be parsed.'''\n\n # Apply regular expression to each line and extract interesting bits.\n regLoginMatch = False\n if \"MONITOR\" not in line and \"idp.edina.ac.uk\" not in line:\n regLoginMatch = self.regLogin.match(line)\n regLoginApiMatch = self.regLoginApi.match(line)\n regRegisterMatch = self.regRegister.match(line)\n regDownloadMatch = self.regDownloads.match(line)\n regMapproxyMatch = self.regMapproxy.match(line)\n regMapproxyWmsMatch = self.regMapproxyWms.match(line)\n regSchoolsV1MapproxyMatch = self.regSchoolsV1Mapproxy.match(line)\n\n if regLoginMatch:\n linebits = regLoginMatch.groupdict()\n self.populate(self.logins, self.loginsResponse, linebits)\n elif regLoginApiMatch:\n linebits = regLoginApiMatch.groupdict()\n self.populate(self.loginsApi, self.loginsApiResponse, linebits)\n elif regRegisterMatch:\n linebits = regRegisterMatch.groupdict()\n self.populate(self.registrations, self.registrationsResponse, linebits)\n elif regDownloadMatch:\n linebits = regDownloadMatch.groupdict()\n self.populate(self.downloads, self.downloadsResponse, linebits)\n elif regMapproxyMatch:\n linebits = regMapproxyMatch.groupdict()\n self.populate(self.mapproxy, self.mapproxyResponse, linebits)\n elif regMapproxyWmsMatch:\n linebits = regMapproxyWmsMatch.groupdict()\n self.populate(self.mapproxyWms, self.mapproxyWmsResponse, linebits)\n elif regSchoolsV1MapproxyMatch:\n linebits = regSchoolsV1MapproxyMatch.groupdict()\n self.populate(self.schoolsV1Mapproxy, self.schoolsV1MapproxyResponse, linebits)\n # ignore non-matching lines\n\n def populate(self, countDict, responseDict, linebits):\n code = linebits['code']\n response = int(linebits['response']) / float(1000) # convert usec to msec\n if code in countDict:\n countDict[code] += 1\n responseDict[code] += response\n else:\n countDict[code] = 1\n responseDict[code] = response\n\n def get_state(self, duration):\n '''Run any necessary calculations on the data collected from the logs\n and return a list of metric objects.'''\n\n metricObjects = []\n self.record_metric(metricObjects, self.logins, self.loginsResponse, \"logins\", \"Logins per minute\")\n self.record_metric(metricObjects, self.loginsApi, self.loginsApiResponse, \"logins_api\", \"API Logins per minute\")\n self.record_metric(metricObjects, self.registrations, self.registrationsResponse, \"registrations\", \"Registrations per minute\")\n self.record_metric(metricObjects, self.downloads, self.downloadsResponse, \"download_submit\", \"Download Submits per minute\")\n self.record_metric(metricObjects, self.mapproxy, self.mapproxyResponse, \"mapproxy\", \"Mapproxy tiles per minute\")\n self.record_metric(metricObjects, self.mapproxyWms, self.mapproxyWmsResponse, \"mapproxy_wms\", \"Mapproxy WMS requests per minute\")\n self.record_metric(metricObjects, self.schoolsV1Mapproxy, self.schoolsV1MapproxyResponse, \"schools_v1_mapproxy\", \"Schools V1 Mapproxy tiles per minute\")\n\n return metricObjects\n\n def record_metric(self, metricObjects, countDict, responseDict, metricName, description):\n for code, count in countDict.items():\n metricObjects.append( MetricObject( metricName + \"_count.\" + code, count, description ) )\n for code, responseTotal in responseDict.items():\n count = countDict[code]\n response = responseTotal / float(count)\n metricObjects.append( MetricObject( metricName + \"_response.\" + code, response, \"Avg Response \" + description ) )\n","sub_path":"logster/parsers/DMWebLogster.py","file_name":"DMWebLogster.py","file_ext":"py","file_size_in_byte":6960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"184517653","text":"# File: food.py\n# Author: Dane Magbuhos\n# Date: 9/20/17\n# Section: 20\n# E-mail: mag4@umbc.edu\n# Description: Asks the user what they had for breakfast and outputs excellent \n# choice when they enter a specific input.\n\ndef main():\n \n foodInput = input(\"Please enter what you ate for breakfast: \")\n if foodInput == \"green eggs\" or foodInput == \"ham\":\n print(\"Excellent choice!\")\n else:\n print(foodInput,\" is a strange choice for breakfast\")\n\n\n\nmain()\n","sub_path":"Labs/lab03/food.py","file_name":"food.py","file_ext":"py","file_size_in_byte":487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"356990569","text":"from datetime import datetime\nimport sys\n\nfrom amcrest import Http\n\n\ndef lines(ret):\n line = ''\n for char in ret.iter_content(decode_unicode=True):\n line = line + char\n if line.endswith('\\r\\n'):\n yield line.strip()\n line = ''\n\n\ndef main():\n if len(sys.argv) != 5:\n print(f'{sys.argv[0]} host port user password')\n sys.exit(1)\n\n host = sys.argv[1]\n port = sys.argv[2]\n user = sys.argv[3]\n pswd = sys.argv[4]\n\n cam = Http(host, port, user, pswd, retries_connection=1, timeout_protocol=3.05)\n\n print(cam.device_type)\n print(*cam.software_information)\n print(cam.version_http_api)\n\n ret = cam.command(\n 'eventManager.cgi?action=attach&codes=[VideoMotion]',\n timeout_cmd=(3.05, None), stream=True)\n ret.encoding = 'utf-8'\n\n try:\n for line in lines(ret):\n if line.lower().startswith('content-length:'):\n chunk_size = int(line.split(':')[1])\n print(\n datetime.now().replace(microsecond=0),\n repr(next(ret.iter_content(\n chunk_size=chunk_size, decode_unicode=True))),\n )\n except KeyboardInterrupt:\n ret.close()\n print(' Done!')\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"tools/test_events.py","file_name":"test_events.py","file_ext":"py","file_size_in_byte":1307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"412623771","text":"import os\r\nimport time\r\nimport math\r\nimport json\r\nimport hashlib\r\nimport datetime\r\nimport pandas as pd\r\nimport numpy as np\r\nfrom run_pyspark import PySparkMgr\r\n\r\n\r\ngraph_type = \"loan_agent/\"\r\n\r\n\r\ndef make_md5(x):\r\n\r\n md5 = hashlib.md5()\r\n\r\n md5.update(x.encode('utf-8'))\r\n\r\n return md5.hexdigest()\r\n\r\n\r\ndef make_node_schema(entity_name, entity_df, comp_index_properties = None, mix_index_properties = None):\r\n\r\n properties = {\"propertyKeys\": []}\r\n\r\n for col in entity_df.columns:\r\n if entity_df[col].dtype == np.float:\r\n prop = {\"name\": col, \"dataType\": \"Float\", \"cardinality\": \"SINGLE\"}\r\n elif entity_df[col].dtype == np.integer:\r\n prop = {\"name\": col, \"dataType\": \"Integer\", \"cardinality\": \"SINGLE\"}\r\n else:\r\n prop = {\"name\": col, \"dataType\": \"String\", \"cardinality\": \"SINGLE\"}\r\n properties[\"propertyKeys\"].append(prop)\r\n\r\n vertexLabels = {\"vertexLabels\": []}\r\n\r\n vertexLabels[\"vertexLabels\"].append({\"name\": entity_name})\r\n\r\n vertexIndexes = {\"vertexIndexes\": []}\r\n\r\n if comp_index_properties is not None:\r\n for prop in comp_index_properties:\r\n vertexIndexes[\"vertexIndexes\"].append({\r\n \"name\" : entity_name + \"_\" + prop + \"_comp\",\r\n \"propertyKeys\" : [ prop ],\r\n \"composite\" : True,\r\n \"unique\" : False\r\n })\r\n\r\n if mix_index_properties is not None:\r\n for prop in mix_index_properties:\r\n vertexIndexes[\"vertexIndexes\"].append({\r\n \"name\" : entity_name + \"_\" + prop + \"_mixed\",\r\n \"propertyKeys\" : [ prop ],\r\n \"composite\" : False,\r\n \"unique\" : False,\r\n \"mixedIndex\" : \"search\"\r\n })\r\n\r\n vertexIndexes[\"vertexIndexes\"].append({\r\n \"name\" : entity_name + \"_graph_label_mixed\",\r\n \"propertyKeys\" : [ \"graph_label\" ],\r\n \"composite\" : False,\r\n \"unique\" : False,\r\n \"mixedIndex\" : \"search\"\r\n })\r\n\r\n return {**properties, **vertexLabels, **vertexIndexes}\r\n\r\n\r\ndef make_node_mapper(entity_name, entity_df):\r\n entity_file = \"gra_\" + entity_name + \".csv\"\r\n\r\n vertexMap = {\"vertexMap\": {entity_file: {}}}\r\n\r\n vertexMap[\"vertexMap\"][entity_file] = {\r\n \"[VertexLabel]\" : entity_name\r\n }\r\n\r\n for col in entity_df.columns:\r\n vertexMap[\"vertexMap\"][entity_file][col] = col\r\n\r\n return vertexMap\r\n\r\n\r\ndef make_vertex_centric_schema(edge_name, index_property, direction, order):\r\n if direction not in [\"BOTH\", \"IN\", \"OUT\"]:\r\n print(\"direction should be in {}\".format([\"BOTH\", \"IN\", \"OUT\"]))\r\n return None\r\n\r\n if order not in [\"incr\", \"decr\"]:\r\n print(\"order should be in {}\".format([\"incr\", \"decr\"]))\r\n return None\r\n\r\n vertexCentricIndexes = {\"vertexCentricIndexes\": []}\r\n\r\n vertexCentricIndexes[\"vertexIndexes\"].append({\r\n \"name\" : edge_name + \"_\" + index_property,\r\n \"edge\" : edge_name,\r\n \"propertyKeys\" : [ index_property ],\r\n \"order\": order,\r\n \"direction\": direction\r\n })\r\n\r\n return vertexCentricIndexes\r\n\r\n\r\ndef make_edge_schema(relation_df = None, relation_comp_index_properties = None, relation_mix_index_properties = None):\r\n\r\n properties = {\"propertyKeys\": []}\r\n\r\n relation_columns = relation_df.columns.tolist()\r\n if \"Left\" not in relation_columns or \"Right\" not in relation_columns:\r\n print(\"relation df lacks Left and Right columns \")\r\n\r\n for col in relation_df.columns:\r\n if col in [\"Left\", \"Right\", \"Type\"]:\r\n continue\r\n\r\n if relation_df[col].dtype == np.float:\r\n prop = {\"name\": col, \"dataType\": \"Float\", \"cardinality\": \"SINGLE\"}\r\n elif relation_df[col].dtype == np.integer:\r\n prop = {\"name\": col, \"dataType\": \"Integer\", \"cardinality\": \"SINGLE\"}\r\n else:\r\n prop = {\"name\": col, \"dataType\": \"String\", \"cardinality\": \"SINGLE\"}\r\n\r\n properties[\"propertyKeys\"].append(prop)\r\n\r\n relation_names = relation_df[\"Type\"].value_counts().index.tolist()\r\n\r\n edgeLabels = {\"edgeLabels\": []}\r\n\r\n for relation in relation_names:\r\n edgeLabels[\"edgeLabels\"].append({\r\n \"name\": relation,\r\n \"multiplicity\": \"MULTI\",\r\n \"unidirected\": False\r\n })\r\n\r\n edgeIndexes = {\"edgeIndexes\": []}\r\n\r\n for relation_name in relation_names:\r\n if relation_comp_index_properties is not None:\r\n for prop in relation_comp_index_properties:\r\n edgeIndexes[\"edgeIndexes\"].append({\r\n \"name\": relation_name + \"_\" + prop + \"_comp\",\r\n \"propertyKeys\": [ prop ],\r\n \"composite\": True,\r\n \"unique\": False,\r\n \"indexOnly\": relation_name\r\n })\r\n\r\n if relation_mix_index_properties is not None:\r\n for prop in relation_mix_index_properties:\r\n edgeIndexes[\"edgeIndexes\"].append({\r\n \"name\" : relation_name + \"_\" + prop + \"_mixed\",\r\n \"propertyKeys\": [ prop ],\r\n \"composite\": False,\r\n \"unique\": False,\r\n \"mixedIndex\": \"search\",\r\n \"indexOnly\": relation_name\r\n })\r\n\r\n return {**properties, **edgeLabels, **edgeIndexes}\r\n\r\n\r\ndef make_edge_mapper(entity_relations, relation_df=None, specific_relation=None):\r\n\r\n edgeMap = {\"edgeMap\": {}}\r\n\r\n for relation_name, entity_pairs in entity_relations.items():\r\n if specific_relation is not None and relation_name != specific_relation:\r\n continue\r\n\r\n for pair in entity_pairs:\r\n\r\n relation_file = \"gra_\" + relation_name + \".csv\"\r\n\r\n edge = {\"[edge_left]\": {\"Left\": pair[0]},\r\n \"[EdgeLabel]\": relation_name,\r\n \"[edge_right]\": {\"Right\": pair[1]}}\r\n\r\n if relation_df is not None:\r\n relation_columns = relation_df.columns.tolist()\r\n if \"Left\" not in relation_columns or \"Right\" not in relation_columns:\r\n print(\"relation df lacks Left and Right columns \")\r\n\r\n for col in relation_df.columns:\r\n if col in [\"Left\", \"Right\", \"Type\"]:\r\n continue\r\n\r\n edge[col] = col\r\n\r\n edgeMap[\"edgeMap\"][relation_file] = edge\r\n\r\n return edgeMap\r\n\r\n\r\ndef dump_schema(schema, datamapper, folder):\r\n if not os.path.exists(graph_type + folder):\r\n os.makedirs(graph_type + folder)\r\n\r\n f = open(graph_type + folder + \"/schema.json\", 'w')\r\n f.write(json.dumps(schema))\r\n f.close()\r\n\r\n f = open(graph_type + folder + \"/datamapper.json\", 'w')\r\n f.write(json.dumps(datamapper))\r\n f.close()\r\n\r\n\r\nspark_args = {}\r\n\r\npysparkmgr = PySparkMgr(spark_args)\r\n_, spark, sc = pysparkmgr.start('xubin.xu')\r\n\r\n# 申请表\r\napply_loan_df = spark.sql(\"select * from adm.adm_credit_apply_quota_doc\").toPandas()\r\n\r\n# 支用表\r\nzhiyong_loan_df = spark.sql(\"select * from adm.adm_credit_loan_apply_doc\").toPandas()\r\nzhiyong_loan_df.quota_apply_id = zhiyong_loan_df.quota_apply_id.astype(\"int\")\r\n\r\n# 逾期表\r\noverdue_sql = \"\"\"select \r\n*\r\nfrom adm.adm_credit_apply_quota_doc t1\r\n--逾期关联,存在一个客户不同时间多笔申请,不同申请会对应不同的逾期状态\r\n--当前逾期天数和历史最大逾期天数\r\nleft join \r\n(\r\n select \r\n quota_apply_id,\r\n max(overdue_days_now) as overdue_days_now,\r\n max(his_max_overdue_days) as his_max_overdue_days\r\n from \r\n (\r\n select \r\n c4.quota_apply_id,\r\n c3.overdue_days_now,\r\n c3.his_max_overdue_days\r\n from \r\n adm.adm_credit_loan_apply_doc c4\r\n left join\r\n (\r\n select \r\n c2.business_id,\r\n max(overdue_days_now) as overdue_days_now,\r\n max(overdue_day_calc) as his_max_overdue_days\r\n from\r\n (\r\n select \r\n c1.*,\r\n (case when (overdue_day_calc>0 and latest_actual_repay_date is not null) then 0 else overdue_day_calc end) as overdue_days_now\r\n FROM adm.adm_credit_rpt_risk_overdue_bill c1\r\n ) c2\r\n group by c2.business_id\r\n ) c3\r\n on c4.loan_no=c3.business_id\r\n ) c5\r\n group by quota_apply_id\r\n) t4\r\non t1.quota_apply_id=t4.quota_apply_id\r\n--首逾天数:当前首逾天数,历史最大首逾天数----------------------------------------------------------\r\nleft join\r\n(\r\n select \r\n quota_apply_id,\r\n max(fpd) as fpd,\r\n max(fpd_ever) as fpd_ever\r\n from\r\n (\r\n select \r\n a1.*,a2.*\r\n from \r\n adm.adm_credit_loan_apply_doc a1\r\n left join\r\n (\r\n select \r\n c1.business_id,\r\n (case when (overdue_day_calc>0 and latest_actual_repay_date is null) then overdue_day_calc else 0 end) as fpd,--当前首逾天数\r\n c1.overdue_day_calc as fpd_ever--历史首逾天数\r\n from \r\n adm.adm_credit_rpt_risk_overdue_bill c1\r\n where periods=1\r\n ) a2\r\n on a1.loan_no=a2.business_id\r\n ) a3\r\n group by quota_apply_id\r\n) t5\r\non t1.quota_apply_id=t5.quota_apply_id\"\"\"\r\n\r\noverday_df = spark.sql(overdue_sql).toPandas()\r\n\r\n\r\n# 构建借款者实体\r\ndef make_borrower_entity():\r\n shouxin_zhiyong_df = pd.merge(apply_loan_df, zhiyong_loan_df[\r\n [\"quota_apply_id\", \"apply_id\", \"apply_status_risk\", \"loan_status\", \"loan_amount\", \"repayment_principal\"]],\r\n how='left', on='quota_apply_id')\r\n\r\n borrower_basic_df = shouxin_zhiyong_df[\r\n [\"name\", \"uus_id\", \"employee_no\", \"identity_no\", \"sex\", \"age\", \"zociac\", \"educate_level\", \"marital_status\",\r\n \"city\", \"access_role\", \"entry_date\",\r\n \"resign_date\", \"on_job_status\", \"current_working_days\", \"uc_job_level_name\", \"store_city\", \"apply_id\",\r\n \"team_code\", \"shop_code\", \"area_code\", \"marketing_code\", \"region_code\"]]\r\n\r\n borrower = shouxin_zhiyong_df.groupby(\"identity_no\")\r\n\r\n borrower_ext_df = pd.DataFrame([], columns=[\"identity_no\", \"累计贷款笔数\", \"未结清贷款笔数\", \"累计贷款金额\", \"当前贷款余额\"])\r\n idx = 0\r\n\r\n for group, df in borrower:\r\n loans_cnt = df[(~pd.isnull(df.apply_id)) & (df.apply_status_risk_y == \"放款成功\")].apply_id.count()\r\n\r\n unclosed_loans_cnt = df[(~pd.isnull(df.apply_id)) & (df.apply_status_risk_y == \"放款成功\") & (\r\n df.loan_status == \"REPAYING\")].apply_id.count()\r\n\r\n loans_amt = df[(~pd.isnull(df.apply_id)) & (df.apply_status_risk_y == \"放款成功\")].loan_amount_y.sum()\r\n\r\n unpayed_amt = loans_amt - df[\r\n (~pd.isnull(df.apply_id)) & (df.apply_status_risk_y == \"放款成功\")].repayment_principal.sum()\r\n\r\n borrower_ext_df.loc[idx] = {\"identity_no\": group, \"累计贷款笔数\": loans_cnt, \"未结清贷款笔数\": unclosed_loans_cnt,\r\n \"累计贷款金额\": loans_amt, \"当前贷款余额\": unpayed_amt}\r\n\r\n idx += 1\r\n\r\n borrower_basic_df.drop_duplicates(borrower_basic_df.columns, keep='first', inplace=True)\r\n\r\n borrower_entity_df = pd.merge(borrower_basic_df, borrower_ext_df, on=\"identity_no\")\r\n\r\n borrower_entity_df = borrower_entity_df.fillna(0)\r\n\r\n overday_gp = overday_df[(~pd.isnull(overday_df.overdue_days_now))].groupby(\"identity_no\")[\"overdue_days_now\"].max()\r\n overday_now_df = pd.DataFrame({\"identity_no\": overday_gp.index, \"overdue_days_now\": overday_gp.values})\r\n\r\n borrower_entity_df = pd.merge(borrower_entity_df, overday_now_df, how=\"left\", on=\"identity_no\")\r\n\r\n his_overday_gp = overday_df[(~pd.isnull(overday_df.his_max_overdue_days))].groupby(\"identity_no\")[\r\n \"his_max_overdue_days\"].max()\r\n his_overday_df = pd.DataFrame({\"identity_no\": his_overday_gp.index, \"his_max_overdue_days\": his_overday_gp.values})\r\n\r\n borrower_entity_df = pd.merge(borrower_entity_df, his_overday_df, how=\"left\", on=\"identity_no\")\r\n\r\n borrower_entity_df = borrower_entity_df.fillna(0)\r\n\r\n borrower_entity_df[\"tag\"] = \"\"\r\n\r\n for idx in borrower_entity_df.index:\r\n\r\n max_overday = borrower_entity_df.loc[idx, \"overdue_days_now\"]\r\n\r\n his_max_overday = borrower_entity_df.loc[idx, \"his_max_overdue_days\"]\r\n\r\n loan_amt = borrower_entity_df.loc[idx, \"累计贷款金额\"]\r\n\r\n job_status = borrower_entity_df.loc[idx, \"on_job_status\"]\r\n\r\n tag = borrower_entity_df.loc[idx, \"tag\"]\r\n\r\n if his_max_overday > 90:\r\n tag = tag + \",坏客户\"\r\n\r\n if max_overday > 30:\r\n tag = tag + \",首逾30+\"\r\n\r\n if job_status == \"离职\":\r\n tag = tag + \",离职\"\r\n\r\n if loan_amt > 0:\r\n tag = tag + \",放款\"\r\n else:\r\n tag = tag + \",未放款\"\r\n\r\n p = tag.find(\",\")\r\n if p == 0:\r\n tag = tag[1:]\r\n\r\n borrower_entity_df.loc[idx, \"tag\"] = tag\r\n\r\n borrower_entity_df.drop([\"apply_id\"], axis=1, inplace=True)\r\n\r\n borrower_entity_df.drop_duplicates(borrower_entity_df.columns, inplace=True)\r\n\r\n return borrower_entity_df\r\n\r\n\r\nborrower_entity_df = make_borrower_entity()\r\n\r\nborrower_entity_df.columns = [\"姓名\", \"uus_id\", \"员工号\", \"身份证号\", \"性别\", \"年龄\", \"星座\", \"教育程度\", \"婚姻状态\", \"城市\", \"角色\", \"入职日期\",\r\n \"离职日期\",\r\n \"当前在职状态\", \"当前在职天数\", \"当前职级\", \"门店所在城市\", \"team_code\", \"shop_code\", \"area_code\",\r\n \"marketing_code\", \"region_code\",\r\n \"累计贷款笔数\", \"未结清贷款笔数\", \"累计贷款金额\", \"当前贷款余额\", \"当前逾期天数\", \"历史最大逾期天数\", \"tag\"]\r\n\r\n\r\n# 构建联系人实体\r\ndef make_contact_entity():\r\n contact_df = spark.sql(\"select * from credit_loan_api_service.personal_contact_info\").toPandas()\r\n contact_df = contact_df[contact_df.product_id == \"ELOAN_AGENT\"]\r\n\r\n contact_df = contact_df[[\"contact_name\", \"contact_way\", \"contact_relationship\", \"uid\"]]\r\n\r\n contact_df.columns = [\"姓名\", \"联系方式\", \"关系\", \"uid\"]\r\n\r\n contact_df.drop_duplicates(contact_df.columns, inplace=True)\r\n\r\n return contact_df\r\n\r\n\r\ncontact_entity_df = make_contact_entity()\r\n\r\ncontact_entity_df[\"ext_id\"] = contact_entity_df[\"姓名\"] + contact_entity_df[\"联系方式\"] + contact_entity_df[\"关系\"] + \\\r\n contact_entity_df[\"uid\"]\r\n\r\ncontact_entity_df.ext_id = contact_entity_df.ext_id.apply(lambda x: make_md5(x))\r\n\r\n\r\n# 构建地址实体\r\ndef make_address_entity():\r\n\r\n address_df = spark.sql(\"select * from credit_loan_api_service.credit_personal_info\").toPandas()\r\n address_df = address_df[address_df.product_id == \"ELOAN_AGENT\"]\r\n\r\n address_df = address_df[[\"address\", \"province\", \"city\", \"district\", \"uid\"]]\r\n\r\n address_df.columns = [\"地址\", \"省份\", \"城市\", \"区\", \"uid\"]\r\n\r\n address_df.drop_duplicates(address_df.columns, inplace=True)\r\n\r\n return address_df\r\n\r\n\r\naddress_entity_df = make_address_entity()\r\n\r\n\r\n# 构建手机实体\r\ndef make_phone_entity():\r\n\r\n phones_df = apply_loan_df[[\"uus_id\", \"telephone\"]]\r\n phones_df = pd.concat([phones_df, zhiyong_loan_df[[\"uus_id\", \"telephone\"]]])\r\n\r\n phones_df = pd.merge(borrower_entity_df[[\"uus_id\"]], phones_df, how=\"left\", on=\"uus_id\")\r\n\r\n phones_df = phones_df[~pd.isnull(phones_df.telephone)]\r\n\r\n phones_df[\"tag\"] = \"借款人\"\r\n\r\n contact_phones_df = contact_entity_df[[\"uid\", \"联系方式\"]]\r\n\r\n contact_phones_df.rename(columns={\"uid\": \"uus_id\", \"联系方式\": \"telephone\"}, inplace=True)\r\n\r\n contact_phones_df = contact_phones_df[~pd.isnull(contact_phones_df.telephone)]\r\n\r\n contact_phones_df[\"tag\"] = \"联系人\"\r\n\r\n phones_df = pd.concat([phones_df, contact_phones_df])\r\n\r\n phones_df.rename(columns={\"telephone\": \"手机号\"}, inplace=True)\r\n\r\n phones_df.drop_duplicates(phones_df.columns, keep='first', inplace=True)\r\n\r\n return phones_df\r\n\r\n\r\nphones_entity_df = make_phone_entity()\r\n\r\n\r\n# 构建团队,门店,区域,市场,大区实体\r\ndef build_teams(code):\r\n\r\n team_gp = borrower_entity_df.groupby(code)\r\n\r\n team_df = pd.DataFrame([], columns=[\"编号\", \"名称\", \"放款总人数\", \"放款总金额\", \"当前总贷款余额\", \"总坏客户人数\"])\r\n\r\n idx = 0\r\n\r\n for group, df in team_gp:\r\n\r\n loan_cnt = df[df[\"累计贷款笔数\"] > 0][\"累计贷款笔数\"].count()\r\n\r\n loan_amt = df[\"累计贷款金额\"].sum()\r\n\r\n unpaid_amt = df[\"当前贷款余额\"].sum()\r\n\r\n bad_cnt = df[df.tag.str.contains(\"坏客户\")][\"身份证号\"].count()\r\n\r\n team_df.loc[idx] = {\"编号\": group, \"名称\": \"\", \"放款总人数\": loan_cnt, \"放款总金额\": loan_amt,\r\n \"当前总贷款余额\": unpaid_amt, \"总坏客户人数\": bad_cnt}\r\n\r\n idx += 1\r\n\r\n team_df.drop_duplicates(team_df.columns, inplace=True)\r\n\r\n return team_df\r\n\r\n\r\ndef make_shop_entity():\r\n shop_df = build_teams(\"shop_code\")\r\n\r\n shop_df = shop_df[(shop_df[\"编号\"].str.strip().str.len() > 0) & (shop_df[\"编号\"]!=0)]\r\n\r\n shop_address_df = spark.sql(\"select shop_id, shop_code, shop_name, address, city_name from spark_dw.dw_ke_bkjf_shh_house_shop_base_da\").toPandas()\r\n\r\n shop_df = pd.merge(shop_df, shop_address_df[[\"shop_code\", \"shop_name\", \"address\", \"city_name\"]],\r\n how = \"left\", left_on=\"编号\", right_on=\"shop_code\")\r\n\r\n shop_df[\"名称\"] = shop_df.shop_name\r\n shop_df.drop([\"shop_name\", \"shop_code\"], axis=1, inplace=True)\r\n\r\n shop_df.rename(columns={\"address\": \"地址\", \"city_name\": \"城市\"}, inplace=True)\r\n\r\n shop_df.drop_duplicates(shop_df.columns, inplace=True)\r\n\r\n return shop_df\r\n\r\n\r\ndef make_group_entity(group):\r\n\r\n team_df = build_teams(group + \"_code\")\r\n\r\n team_df = team_df[(team_df[\"编号\"].str.strip().str.len() > 0) & (team_df[\"编号\"]!=0)]\r\n\r\n tmp_df = apply_loan_df[[group + \"_code\", group + \"_name\"]]\r\n\r\n team_df = pd.merge(team_df, tmp_df, how=\"left\", left_on=\"编号\", right_on=group + \"_code\")\r\n\r\n team_df[\"名称\"] = team_df[group + \"_name\"]\r\n\r\n team_df.drop([group + \"_code\", group + \"_name\"], axis=1, inplace=True)\r\n\r\n team_df.drop_duplicates(team_df.columns, inplace=True)\r\n\r\n return team_df\r\n\r\n\r\nteam_df = make_group_entity(\"team\")\r\nteam_df['tag'] = np.where(team_df['总坏客户人数'] > 1, '高风险组', '正常组')\r\n\r\nshop_entity_df = make_shop_entity()\r\nshop_entity_df['tag'] = np.where(shop_entity_df['总坏客户人数'] > 2, '高风险门店', '正常门店')\r\n\r\narea_df = make_group_entity(\"area\")\r\n\r\nmarketing_df = make_group_entity(\"marketing\")\r\n\r\nregion_df = make_group_entity(\"region\")\r\n\r\n\r\n# 构建设备ip实体\r\ndef make_device_ip():\r\n ip_df = spark.sql(\"\"\"select ip, udid, union_id, event_time from credit_biz_metrics.device_fingerprint \r\n where date(event_time)>=date('2020-08-24') and udid!='2408c710977177815f01fbc344dedc8b'\"\"\").toPandas()\r\n\r\n ip_df.sort_values(by=\"event_time\", inplace=True)\r\n ip_df.drop_duplicates(list(set(ip_df.columns).difference({\"event_time\"})), keep='first', inplace=True)\r\n\r\n return ip_df\r\n\r\n\r\nip_df = make_device_ip()\r\n\r\n\r\n# 构建设备实体\r\ndef make_device_entity():\r\n device_df = spark.sql(\"\"\"select udid, union_id, imei, idfa, meid, event_time from credit_biz_metrics.device_fingerprint \r\nwhere date(event_time)>=date('2020-08-24') and udid!='2408c710977177815f01fbc344dedc8b'\"\"\").toPandas()\r\n\r\n device_df.sort_values(by=\"event_time\", inplace=True)\r\n device_df.drop_duplicates(list(set(device_df.columns).difference({\"event_time\"})), keep='first', inplace=True)\r\n\r\n return device_df\r\n\r\n\r\ndevice_df = make_device_entity()\r\n\r\n\r\n# 构建借款者-联系人关系\r\ndef make_borrower_contact():\r\n\r\n borrower_contact_df = pd.merge(borrower_entity_df[[\"uus_id\"]], contact_entity_df, left_on=\"uus_id\", right_on=\"uid\")[[\"uus_id\", \"关系\", \"uid\", \"ext_id\"]]\r\n\r\n borrower_contact_df.rename(columns={\"uus_id\": \"Left\", \"关系\": \"Type\", \"ext_id\": \"Right\"}, inplace=True)\r\n\r\n borrower_contact_df = borrower_contact_df[[\"Left\", \"Type\", \"Right\"]]\r\n\r\n borrower_contact_df.drop_duplicates(borrower_contact_df.columns, inplace=True)\r\n\r\n return borrower_contact_df\r\n\r\n\r\nborrower_contact_df = make_borrower_contact()\r\n\r\n\r\n# 构建借款者-手机关系\r\ndef make_borrower_phones():\r\n\r\n borrower_phones = phones_entity_df[phones_entity_df.tag == \"借款人\"]\r\n\r\n borrower_phones.rename(columns={\"uus_id\": \"Left\", \"手机号\": \"Right\"}, inplace=True)\r\n\r\n borrower_phones[\"Type\"] = \"借款人号码\"\r\n\r\n borrower_phones = borrower_phones[[\"Left\", \"Type\", \"Right\"]]\r\n\r\n borrower_phones.drop_duplicates(borrower_phones.columns, inplace=True)\r\n\r\n return borrower_phones\r\n\r\n\r\nborrower_phones_df = make_borrower_phones()\r\n\r\n\r\n# 构建联系人-手机关系\r\ndef make_contact_phones():\r\n\r\n contact_phones = phones_entity_df[phones_entity_df.tag == \"联系人\"]\r\n\r\n contact_phones.rename(columns={\"uus_id\": \"Left\", \"手机号\": \"Right\"}, inplace=True)\r\n\r\n contact_phones[\"Type\"] = \"联系人号码\"\r\n\r\n contact_phones = contact_phones[[\"Left\", \"Type\", \"Right\"]]\r\n\r\n contact_phones.drop_duplicates(contact_phones.columns, inplace=True)\r\n\r\n return contact_phones\r\n\r\n\r\ncontact_phones_df = make_contact_phones()\r\n\r\n\r\n# 构建借款人-地址关系\r\ndef make_borrower_address():\r\n\r\n borrower_address = pd.merge(borrower_entity_df[[\"uus_id\"]], address_entity_df[\"uid\"], left_on=\"uus_id\", right_on=\"uid\")\r\n\r\n borrower_address[\"Type\"] = \"居住\"\r\n\r\n borrower_address.rename(columns={\"uus_id\": \"Left\", \"uid\": \"Right\"}, inplace=True)\r\n\r\n borrower_address = borrower_address[[\"Left\", \"Type\", \"Right\"]]\r\n\r\n borrower_address.drop_duplicates(borrower_address.columns, inplace=True)\r\n\r\n return borrower_address\r\n\r\n\r\nborrower_address_df = make_borrower_address()\r\n\r\n\r\n# 构建借款者-团队关系\r\ndef make_borrower_team():\r\n\r\n tmp_gp = zhiyong_loan_df.groupby([\"identity_no\", \"team_code\"])\r\n\r\n borrower_team = pd.DataFrame([], columns=['Left', 'Type', 'Right', '放款时间', '放款状态'])\r\n idx = 0\r\n\r\n for group, df in tmp_gp:\r\n loans = df[(~pd.isnull(df.apply_id)) & (df.apply_status_risk==\"放款成功\")]\r\n if loans.shape[0] == 0:\r\n borrower_team.loc[idx] = {\"Left\": group[0], \"Type\": \"所属团队\", \"Right\": group[1], \"放款时间\": \"\", \"放款状态\": df.apply_status_risk.values[0]}\r\n idx += 1\r\n continue\r\n\r\n min_loan_time = loans.loan_success_time.min()\r\n\r\n team_code = loans[loans.loan_success_time == min_loan_time].team_code.values[0]\r\n\r\n borrower_team.loc[idx] = {\"Left\": group[0], \"Type\": \"所属团队\", \"Right\": team_code, \"放款时间\": min_loan_time, \"放款状态\": \"放款成功\"}\r\n idx += 1\r\n\r\n borrower_team.drop_duplicates(borrower_team.columns, keep='first', inplace=True)\r\n\r\n apply_no_zhiyong = pd.merge(borrower_entity_df[[\"身份证号\", \"team_code\"]], borrower_team[\"Left\"], how=\"left\", left_on=\"身份证号\", right_on=\"Left\")\r\n apply_no_zhiyong = apply_no_zhiyong[pd.isnull(apply_no_zhiyong.Left)]\r\n apply_no_zhiyong.drop_duplicates(apply_no_zhiyong.columns, inplace=True)\r\n apply_no_zhiyong.drop([\"Left\"], axis=1, inplace=True)\r\n\r\n apply_no_zhiyong.rename(columns={\"身份证号\": \"Left\", \"team_code\": \"Right\"}, inplace=True)\r\n apply_no_zhiyong[\"Type\"] = \"所属团队\"\r\n apply_no_zhiyong[\"放款时间\"] = \"\"\r\n apply_no_zhiyong[\"放款状态\"] = \"未支用\"\r\n\r\n apply_no_zhiyong = apply_no_zhiyong[[\"Left\", \"Type\", \"Right\", \"放款时间\", \"放款状态\"]]\r\n\r\n return pd.concat([borrower_team, apply_no_zhiyong])\r\n\r\n\r\nborrower_team = make_borrower_team()\r\n\r\n\r\n# 构建团队-门店关系\r\ndef make_team_shop():\r\n\r\n tmp_gp = zhiyong_loan_df.groupby([\"team_code\", \"shop_code\"])\r\n\r\n team_shop = pd.DataFrame([], columns=['Left', 'Type', 'Right', '放款时间', '放款状态'])\r\n idx = 0\r\n\r\n for group, df in tmp_gp:\r\n if pd.isnull(group):\r\n continue\r\n\r\n loans = df[(~pd.isnull(df.apply_id)) & (df.apply_status_risk==\"放款成功\")]\r\n if loans.shape[0] == 0:\r\n team_shop.loc[idx] = {\"Left\": group[0], \"Type\": \"所属门店\", \"Right\": group[1], \"放款时间\": \"\", \"放款状态\": \",\".join(df.apply_status_risk.unique())}\r\n idx += 1\r\n continue\r\n\r\n min_loan_time = loans.loan_success_time.min()\r\n\r\n shop_code = loans[loans.loan_success_time == min_loan_time].shop_code.values[0]\r\n\r\n team_shop.loc[idx] = {\"Left\": group[0], \"Type\": \"所属门店\", \"Right\": shop_code, \"放款时间\": min_loan_time, \"放款状态\": \"放款成功\"}\r\n idx += 1\r\n\r\n tmp_df = pd.merge(team_df, borrower_entity_df[['team_code', 'shop_code']], how=\"left\", left_on=\"编号\", right_on=\"team_code\")\r\n tmp_df.drop_duplicates(tmp_df.columns, inplace=True)\r\n\r\n apply_no_zhiyong = pd.merge(tmp_df[[\"编号\", 'shop_code']], team_shop[\"Left\"], how=\"left\", left_on=\"编号\", right_on=\"Left\")\r\n apply_no_zhiyong = apply_no_zhiyong[pd.isnull(apply_no_zhiyong.Left)]\r\n apply_no_zhiyong.drop_duplicates(apply_no_zhiyong.columns, inplace=True)\r\n apply_no_zhiyong.drop([\"Left\"], axis=1, inplace=True)\r\n\r\n apply_no_zhiyong.rename(columns={\"编号\": \"Left\", \"shop_code\": \"Right\"}, inplace=True)\r\n apply_no_zhiyong[\"Type\"] = \"所属门店\"\r\n apply_no_zhiyong[\"放款时间\"] = \"\"\r\n apply_no_zhiyong[\"放款状态\"] = \"未支用\"\r\n\r\n apply_no_zhiyong = apply_no_zhiyong[[\"Left\", \"Type\", \"Right\", \"放款时间\", \"放款状态\"]]\r\n\r\n return pd.concat([team_shop, apply_no_zhiyong])\r\n\r\n\r\nteam_shop = make_team_shop()\r\n\r\n\r\n# 构建门店-区域关系\r\ndef make_shop_area():\r\n\r\n tmp_gp = zhiyong_loan_df.groupby([\"shop_code\", \"area_code\"])\r\n\r\n shop_area = pd.DataFrame([], columns=['Left', 'Type', 'Right', '放款时间', '放款状态'])\r\n idx = 0\r\n\r\n for group, df in tmp_gp:\r\n if pd.isnull(group):\r\n continue\r\n\r\n loans = df[(~pd.isnull(df.apply_id)) & (df.apply_status_risk==\"放款成功\")]\r\n if loans.shape[0] == 0:\r\n shop_area.loc[idx] = {\"Left\": group[0], \"Type\": \"所属区域\", \"Right\": group[1], \"放款时间\": \"\", \"放款状态\": \",\".join(df.apply_status_risk.unique())}\r\n idx += 1\r\n continue\r\n\r\n min_loan_time = loans.loan_success_time.min()\r\n\r\n area_code = loans[loans.loan_success_time == min_loan_time].area_code.values[0]\r\n\r\n shop_area.loc[idx] = {\"Left\": group[0], \"Type\": \"所属区域\", \"Right\": area_code, \"放款时间\": min_loan_time, \"放款状态\": \"放款成功\"}\r\n idx += 1\r\n\r\n tmp_df = pd.merge(shop_entity_df, borrower_entity_df[['shop_code','area_code']], how=\"left\", left_on=\"编号\", right_on=\"shop_code\")\r\n tmp_df.drop_duplicates(tmp_df.columns, inplace=True)\r\n\r\n apply_no_zhiyong = pd.merge(tmp_df[[\"编号\", 'area_code']], shop_area[\"Left\"], how=\"left\", left_on=\"编号\", right_on=\"Left\")\r\n apply_no_zhiyong = apply_no_zhiyong[pd.isnull(apply_no_zhiyong.Left)]\r\n apply_no_zhiyong.drop_duplicates(apply_no_zhiyong.columns, inplace=True)\r\n apply_no_zhiyong.drop([\"Left\"], axis=1, inplace=True)\r\n\r\n apply_no_zhiyong.rename(columns={\"编号\": \"Left\", \"area_code\": \"Right\"}, inplace=True)\r\n apply_no_zhiyong[\"Type\"] = \"所属区域\"\r\n apply_no_zhiyong[\"放款时间\"] = \"\"\r\n apply_no_zhiyong[\"放款状态\"] = \"未支用\"\r\n\r\n apply_no_zhiyong = apply_no_zhiyong[[\"Left\", \"Type\", \"Right\", \"放款时间\", \"放款状态\"]]\r\n\r\n return pd.concat([shop_area, apply_no_zhiyong])\r\n\r\n\r\nshop_area = make_shop_area()\r\n\r\n\r\n# 构建区域-市场关系\r\ndef make_area_marketing():\r\n\r\n tmp_gp = zhiyong_loan_df.groupby([\"area_code\", \"marketing_code\"])\r\n\r\n area_marketing = pd.DataFrame([], columns=['Left', 'Type', 'Right', '放款时间', '放款状态'])\r\n idx = 0\r\n\r\n for group, df in tmp_gp:\r\n if pd.isnull(group):\r\n continue\r\n\r\n loans = df[(~pd.isnull(df.apply_id)) & (df.apply_status_risk==\"放款成功\")]\r\n if loans.shape[0] == 0:\r\n area_marketing.loc[idx] = {\"Left\": group[0], \"Type\": \"所属市场\", \"Right\": group[1], \"放款时间\": \"\", \"放款状态\": \",\".join(df.apply_status_risk.unique())}\r\n idx += 1\r\n continue\r\n\r\n min_loan_time = loans.loan_success_time.min()\r\n\r\n marketing_code = loans[loans.loan_success_time == min_loan_time].marketing_code.values[0]\r\n\r\n area_marketing.loc[idx] = {\"Left\": group[0], \"Type\": \"所属市场\", \"Right\": marketing_code, \"放款时间\": min_loan_time, \"放款状态\": \"放款成功\"}\r\n idx += 1\r\n\r\n tmp_df = pd.merge(area_df, borrower_entity_df[['area_code','marketing_code']], how=\"left\", left_on=\"编号\", right_on=\"area_code\")\r\n tmp_df.drop_duplicates(tmp_df.columns, inplace=True)\r\n\r\n apply_no_zhiyong = pd.merge(tmp_df[[\"编号\", 'marketing_code']], area_marketing[\"Left\"], how=\"left\", left_on=\"编号\", right_on=\"Left\")\r\n apply_no_zhiyong = apply_no_zhiyong[pd.isnull(apply_no_zhiyong.Left)]\r\n apply_no_zhiyong.drop_duplicates(apply_no_zhiyong.columns, inplace=True)\r\n apply_no_zhiyong.drop([\"Left\"], axis=1, inplace=True)\r\n\r\n apply_no_zhiyong.rename(columns={\"编号\": \"Left\", \"marketing_code\": \"Right\"}, inplace=True)\r\n apply_no_zhiyong[\"Type\"] = \"所属市场\"\r\n apply_no_zhiyong[\"放款时间\"] = \"\"\r\n apply_no_zhiyong[\"放款状态\"] = \"未支用\"\r\n\r\n apply_no_zhiyong = apply_no_zhiyong[[\"Left\", \"Type\", \"Right\", \"放款时间\", \"放款状态\"]]\r\n\r\n return pd.concat([area_marketing, apply_no_zhiyong])\r\n\r\n\r\narea_marketing = make_area_marketing()\r\n\r\n\r\n# 构建市场-大区关系\r\ndef make_marketing_region():\r\n\r\n tmp_gp = zhiyong_loan_df.groupby([\"marketing_code\", \"region_code\"])\r\n\r\n marketing_region = pd.DataFrame([], columns=['Left', 'Type', 'Right', '放款时间', '放款状态'])\r\n idx = 0\r\n\r\n for group, df in tmp_gp:\r\n if pd.isnull(group):\r\n continue\r\n\r\n loans = df[(~pd.isnull(df.apply_id)) & (df.apply_status_risk==\"放款成功\")]\r\n if loans.shape[0] == 0:\r\n marketing_region.loc[idx] = {\"Left\": group[0], \"Type\": \"所属大区\", \"Right\": group[1], \"放款时间\": \"\", \"放款状态\": \",\".join(df.apply_status_risk.unique())}\r\n idx += 1\r\n continue\r\n\r\n min_loan_time = loans.loan_success_time.min()\r\n\r\n region_code = loans[loans.loan_success_time == min_loan_time].region_code.values[0]\r\n\r\n marketing_region.loc[idx] = {\"Left\": group[0], \"Type\": \"所属大区\", \"Right\": region_code, \"放款时间\": min_loan_time, \"放款状态\": \"放款成功\"}\r\n idx += 1\r\n\r\n tmp_df = pd.merge(marketing_df, borrower_entity_df[['marketing_code','region_code']], how=\"left\", left_on=\"编号\", right_on=\"marketing_code\")\r\n tmp_df.drop_duplicates(tmp_df.columns, inplace=True)\r\n\r\n apply_no_zhiyong = pd.merge(tmp_df[[\"编号\", 'region_code']], marketing_region[\"Left\"], how=\"left\", left_on=\"编号\", right_on=\"Left\")\r\n apply_no_zhiyong = apply_no_zhiyong[pd.isnull(apply_no_zhiyong.Left)]\r\n apply_no_zhiyong.drop_duplicates(apply_no_zhiyong.columns, inplace=True)\r\n apply_no_zhiyong.drop([\"Left\"], axis=1, inplace=True)\r\n\r\n apply_no_zhiyong.rename(columns={\"编号\": \"Left\", \"region_code\": \"Right\"}, inplace=True)\r\n apply_no_zhiyong[\"Type\"] = \"所属大区\"\r\n apply_no_zhiyong[\"放款时间\"] = \"\"\r\n apply_no_zhiyong[\"放款状态\"] = \"未支用\"\r\n\r\n apply_no_zhiyong = apply_no_zhiyong[[\"Left\", \"Type\", \"Right\", \"放款时间\", \"放款状态\"]]\r\n\r\n return pd.concat([marketing_region, apply_no_zhiyong])\r\n\r\n\r\nmarketing_region = make_marketing_region()\r\n\r\n\r\n# 构建借款者-设备ip关系\r\ndef get_borrower_ip():\r\n\r\n borrower_ip_df = pd.merge(borrower_entity_df[\"uus_id\"], ip_df, how=\"left\", left_on=\"uus_id\", right_on=\"union_id\")\r\n\r\n borrower_ip_df = borrower_ip_df[~pd.isnull(borrower_ip_df.union_id)]\r\n\r\n borrower_ip_df = borrower_ip_df[[\"uus_id\", \"udid\", \"event_time\"]]\r\n\r\n borrower_ip_df.rename(columns={\"uus_id\": \"Left\", \"udid\": \"Right\"}, inplace=True)\r\n\r\n borrower_ip_df[\"Type\"] = \"ip地址\"\r\n\r\n borrower_ip_df = borrower_ip_df[[\"Left\", \"Type\", \"Right\", \"event_time\"]]\r\n\r\n borrower_ip_df.sort_values(by=\"event_time\", inplace=True)\r\n\r\n borrower_ip_df.drop_duplicates([\"Left\", \"Type\", \"Right\"], inplace=True)\r\n\r\n return borrower_ip_df[~pd.isnull(borrower_ip_df.Right)]\r\n\r\n\r\nborrower_ip_df = get_borrower_ip()\r\n\r\n\r\n# 构建借款人-设备关系\r\ndef get_borrower_device():\r\n\r\n borrower_device_df = pd.merge(borrower_entity_df[\"uus_id\"], device_df, how=\"left\", left_on=\"uus_id\", right_on=\"union_id\")\r\n\r\n borrower_device_df = borrower_device_df[~pd.isnull(borrower_device_df.union_id)]\r\n\r\n borrower_device_df.rename(columns={\"uus_id\": \"Left\", \"udid\": \"Right\"}, inplace=True)\r\n\r\n borrower_device_df[\"Type\"] = \"使用设备\"\r\n\r\n borrower_device_df = borrower_device_df[[\"Left\", \"Type\", \"Right\", \"event_time\"]]\r\n\r\n borrower_device_df.sort_values(by=\"event_time\", inplace=True)\r\n\r\n borrower_device_df.drop_duplicates([\"Left\", \"Type\", \"Right\"], inplace=True)\r\n\r\n return borrower_device_df[~pd.isnull(borrower_device_df.Right)]\r\n\r\n\r\nborrower_device_df = get_borrower_device()\r\n\r\n\r\n# 解析借款人实体schema并存储\r\nborrower_entity_df.drop([\"team_code\", \"shop_code\", \"area_code\", \"marketing_code\", \"region_code\"], axis=1, inplace=True)\r\n\r\nborrower_entity_df[\"graph_label\"] = \"借款用户\"\r\n\r\nborrower_schema = make_node_schema(\"借款用户\", borrower_entity_df,\r\n comp_index_properties = [\"身份证号\", \"uus_id\"],\r\n mix_index_properties = [\"tag\", \"员工号\", \"姓名\", '性别', '年龄', '星座', '教育程度', '婚姻状态', '城市',\r\n '角色', '入职日期', '离职日期', '当前在职状态', '当前在职天数', '当前职级', '门店所在城市', '累计贷款笔数',\r\n '未结清贷款笔数', '累计贷款金额', '当前贷款余额', '当前逾期天数', '历史最大逾期天数'])\r\n\r\nborrower_schema['propertyKeys'][5][\"dataType\"] = \"Float\"\r\nborrower_schema['propertyKeys'][14][\"dataType\"] = \"Float\"\r\nborrower_schema['propertyKeys'][17][\"dataType\"] = \"Float\"\r\nborrower_schema['propertyKeys'][18][\"dataType\"] = \"Float\"\r\nborrower_schema['propertyKeys'][19][\"dataType\"] = \"Float\"\r\nborrower_schema['propertyKeys'][20][\"dataType\"] = \"Float\"\r\nborrower_schema['propertyKeys'][21][\"dataType\"] = \"Float\"\r\nborrower_schema['propertyKeys'][22][\"dataType\"] = \"Float\"\r\n\r\nborrower_mapper = make_node_mapper(\"借款用户\", borrower_entity_df)\r\n\r\ndump_schema(borrower_schema, borrower_mapper, \"borrower\")\r\n\r\nborrower_entity_df.to_csv(graph_type + \"borrower/gra_借款用户.csv\", sep=',', header=True, index=False)\r\n\r\n\r\n# 解析联系人实体schema并存储\r\ncontact_entity_df[\"tag\"] = \"联系人\"\r\ncontact_entity_df[\"graph_label\"] = \"联系人\"\r\n\r\ncontact_schema = make_node_schema(\"联系人\", contact_entity_df,\r\n comp_index_properties = [\"uid\", \"联系方式\", \"ext_id\"],\r\n mix_index_properties = [\"姓名\", '关系', 'tag'])\r\n\r\ncontact_mapper = make_node_mapper(\"联系人\", contact_entity_df)\r\n\r\ndump_schema(contact_schema, contact_mapper, \"contact\")\r\n\r\ncontact_entity_df.to_csv(graph_type + \"contact/gra_联系人.csv\", sep=',', header=True, index=False)\r\n\r\n\r\n# 解析手机实体schema并存储\r\nphones_entity_df[\"graph_label\"] = \"手机\"\r\n\r\nphones_schema = make_node_schema(\"手机\", phones_entity_df,\r\n comp_index_properties = [\"uus_id\", \"手机号\"],\r\n mix_index_properties = [\"tag\"])\r\n\r\nphones_mapper = make_node_mapper(\"手机\", phones_entity_df)\r\n\r\ndump_schema(phones_schema, phones_mapper, \"phone\")\r\n\r\nphones_entity_df.to_csv(graph_type + \"phone/gra_手机.csv\", sep=',', header=True, index=False)\r\n\r\n\r\n# 解析地址实体schema并存储\r\naddress_entity_df[\"tag\"] = \"地址\"\r\naddress_entity_df[\"graph_label\"] = \"地址\"\r\n\r\naddress_schema = make_node_schema(\"地址\", address_entity_df,\r\n comp_index_properties = [\"uid\"],\r\n mix_index_properties = [\"地址\", \"省份\", \"城市\", \"区\", \"tag\"])\r\n\r\naddress_mapper = make_node_mapper(\"地址\", address_entity_df)\r\n\r\ndump_schema(address_schema, address_mapper, \"address\")\r\n\r\naddress_entity_df.to_csv(graph_type + \"address/gra_地址.csv\", sep=',', header=True, index=False)\r\n\r\n\r\n# 解析团队实体schema并存储\r\nteam_df[\"graph_label\"] = \"团队\"\r\n\r\nteam_schema = make_node_schema(\"团队\", team_df,\r\n comp_index_properties = [\"编号\"],\r\n mix_index_properties = [\"名称\", \"放款总人数\", \"放款总金额\", \"当前总贷款余额\", \"总坏客户人数\", \"tag\"])\r\n\r\nteam_schema['propertyKeys'][2][\"dataType\"] = \"Float\"\r\nteam_schema['propertyKeys'][3][\"dataType\"] = \"Float\"\r\nteam_schema['propertyKeys'][4][\"dataType\"] = \"Float\"\r\nteam_schema['propertyKeys'][5][\"dataType\"] = \"Float\"\r\n\r\nteam_mapper = make_node_mapper(\"团队\", team_df)\r\n\r\ndump_schema(team_schema, team_mapper, \"team\")\r\n\r\nteam_df.to_csv(graph_type + \"team/gra_团队.csv\", sep=',', header=True, index=False)\r\n\r\n\r\n# 解析门店实体schema并存储\r\nshop_entity_df[\"graph_label\"] = \"门店\"\r\n\r\nshop_schema = make_node_schema(\"门店\", shop_entity_df,\r\n comp_index_properties = [\"编号\"],\r\n mix_index_properties = [\"名称\", \"放款总人数\", \"放款总金额\", \"当前总贷款余额\", \"总坏客户人数\", \"地址\", \"城市\", \"tag\"])\r\n\r\nshop_schema['propertyKeys'][2][\"dataType\"] = \"Float\"\r\nshop_schema['propertyKeys'][3][\"dataType\"] = \"Float\"\r\nshop_schema['propertyKeys'][4][\"dataType\"] = \"Float\"\r\nshop_schema['propertyKeys'][5][\"dataType\"] = \"Float\"\r\n\r\nshop_mapper = make_node_mapper(\"门店\", shop_entity_df)\r\n\r\ndump_schema(shop_schema, shop_mapper, \"shop\")\r\n\r\nshop_entity_df.to_csv(graph_type + \"shop/gra_门店.csv\", sep=',', header=True, index=False)\r\n\r\n\r\n# 解析区域实体schema并存储\r\narea_df[\"tag\"] = \"区域\"\r\narea_df[\"graph_label\"] = \"区域\"\r\n\r\narea_schema = make_node_schema(\"区域\", area_df,\r\n comp_index_properties=[\"编号\"],\r\n mix_index_properties=[\"名称\", \"放款总人数\", \"放款总金额\", \"当前总贷款余额\", \"总坏客户人数\", \"tag\"])\r\n\r\narea_schema['propertyKeys'][2][\"dataType\"] = \"Float\"\r\narea_schema['propertyKeys'][3][\"dataType\"] = \"Float\"\r\narea_schema['propertyKeys'][4][\"dataType\"] = \"Float\"\r\narea_schema['propertyKeys'][5][\"dataType\"] = \"Float\"\r\n\r\narea_mapper = make_node_mapper(\"区域\", area_df)\r\n\r\ndump_schema(area_schema, area_mapper, \"area\")\r\n\r\narea_df.to_csv(graph_type + \"area/gra_区域.csv\", sep=',', header=True, index=False)\r\n\r\n\r\n# 解析市场实体schema并存储\r\nmarketing_df[\"tag\"] = \"市场\"\r\nmarketing_df[\"graph_label\"] = \"市场\"\r\n\r\nmarketing_schema = make_node_schema(\"市场\", marketing_df,\r\n comp_index_properties=[\"编号\"],\r\n mix_index_properties=[\"名称\", \"放款总人数\", \"放款总金额\", \"当前总贷款余额\", \"总坏客户人数\", \"tag\"])\r\n\r\nmarketing_schema['propertyKeys'][2][\"dataType\"] = \"Float\"\r\nmarketing_schema['propertyKeys'][3][\"dataType\"] = \"Float\"\r\nmarketing_schema['propertyKeys'][4][\"dataType\"] = \"Float\"\r\nmarketing_schema['propertyKeys'][5][\"dataType\"] = \"Float\"\r\n\r\nmarketing_mapper = make_node_mapper(\"市场\", marketing_df)\r\n\r\ndump_schema(marketing_schema, marketing_mapper, \"market\")\r\n\r\nmarketing_df.to_csv(graph_type + \"market/gra_市场.csv\", sep=',', header=True, index=False)\r\n\r\n\r\n# 解析大区实体schema并存储\r\nregion_df[\"tag\"] = \"大区\"\r\nregion_df[\"graph_label\"] = \"大区\"\r\n\r\nregion_schema = make_node_schema(\"大区\", region_df,\r\n comp_index_properties=[\"编号\"],\r\n mix_index_properties=[\"名称\", \"放款总人数\", \"放款总金额\", \"当前总贷款余额\", \"总坏客户人数\", \"tag\"])\r\n\r\nregion_schema['propertyKeys'][2][\"dataType\"] = \"Float\"\r\nregion_schema['propertyKeys'][3][\"dataType\"] = \"Float\"\r\nregion_schema['propertyKeys'][4][\"dataType\"] = \"Float\"\r\nregion_schema['propertyKeys'][5][\"dataType\"] = \"Float\"\r\n\r\nregion_mapper = make_node_mapper(\"大区\", region_df)\r\n\r\ndump_schema(region_schema, region_mapper, \"region\")\r\n\r\nregion_df.to_csv(\"loan_agent/region/gra_大区.csv\", sep=',', header=True, index=False)\r\n\r\n\r\n# 解析设备ip实体schema并存储\r\nip_df[\"tag\"] = \"设备ip\"\r\nip_df[\"graph_label\"] = \"设备ip\"\r\n\r\nip_schema = make_node_schema(\"设备ip\", ip_df,\r\n comp_index_properties=[\"ip\", \"udid\", \"union_id\"],\r\n mix_index_properties=[\"event_time\", \"tag\"])\r\n\r\nip_mapper = make_node_mapper(\"设备ip\", ip_df)\r\n\r\ndump_schema(ip_schema, ip_mapper, \"ip\")\r\n\r\nip_df.to_csv(\"loan_agent/ip/gra_设备ip.csv\", sep=',', header=True, index=False)\r\n\r\n\r\n# 解析设备实体schema并存储\r\ndevice_df[\"tag\"] = \"设备\"\r\ndevice_df[\"graph_label\"] = \"设备\"\r\n\r\ndevice_schema = make_node_schema(\"设备\", device_df,\r\n comp_index_properties=[\"udid\", \"union_id\", \"imei\", \"idfa\", \"meid\"],\r\n mix_index_properties=[\"event_time\", \"tag\"])\r\n\r\ndevice_mapper = make_node_mapper(\"设备\", device_df)\r\n\r\ndump_schema(device_schema, device_mapper, \"device\")\r\n\r\ndevice_df.to_csv(\"loan_agent/device/gra_设备.csv\", sep=',', header=True, index=False)\r\n\r\n\r\n# 定义关系schema\r\nentity_relations = {\r\n \"所属团队\": [(\"借款用户.身份证号\", \"团队.编号\")],\r\n \"所属门店\": [(\"团队.编号\", \"门店.编号\")],\r\n \"所属区域\": [(\"门店.编号\", \"区域.编号\")],\r\n \"所属市场\": [(\"区域.编号\", \"市场.编号\")],\r\n \"所属大区\": [(\"市场.编号\", \"大区.编号\")],\r\n \"联系人\": [(\"借款用户.uus_id\", \"联系人.ext_id\")],\r\n \"借款人号码\": [(\"借款用户.uus_id\", \"手机.手机号\")],\r\n \"联系人号码\": [(\"联系人.uid\", \"手机.手机号\")],\r\n \"居住\": [(\"借款用户.uus_id\", \"地址.uid\")],\r\n \"ip地址\": [(\"借款用户.uus_id\", \"设备ip.udid\")],\r\n \"使用设备\": [(\"借款用户.uus_id\", \"设备.udid\")],\r\n}\r\n\r\n\r\n# 解析借款人-联系人关系schema并存储\r\nborrower_contact_schema = make_edge_schema(borrower_contact_df)\r\n\r\nborrower_contact_mapper = make_edge_mapper(entity_relations, borrower_contact_df, \"联系人\")\r\n\r\ndump_schema(borrower_contact_schema, borrower_contact_mapper, \"borrower_contact\")\r\n\r\nborrower_contact_df.to_csv(\"loan_agent/borrower_contact/gra_联系人.csv\", sep=',', header=True, index=False)\r\n\r\n\r\n# 解析借款人-手机号关系schema并存储\r\nborrower_phone_schema = make_edge_schema(borrower_phones_df)\r\n\r\nborrower_phone_mapper = make_edge_mapper(entity_relations, borrower_phones_df, \"借款人号码\")\r\n\r\ndump_schema(borrower_phone_schema, borrower_phone_mapper, \"borrower_phone\")\r\n\r\nborrower_phones_df.to_csv(\"loan_agent/borrower_phone/gra_借款人号码.csv\", sep=',', header=True, index=False)\r\n\r\n\r\n# 解析联系人-手机号关系schema并存储\r\ncontact_phones_schema = make_edge_schema(contact_phones_df)\r\n\r\ncontact_phones_mapper = make_edge_mapper(entity_relations, contact_phones_df, \"联系人号码\")\r\n\r\ndump_schema(contact_phones_schema, contact_phones_mapper, \"contact_phone\")\r\n\r\ncontact_phones_df.to_csv(\"loan_agent/contact_phone/gra_联系人号码.csv\", sep=',', header=True, index=False)\r\n\r\n\r\n# 解析借款人-地址关系schema并存储\r\nborrower_address_schema = make_edge_schema(borrower_address_df)\r\n\r\nborrower_address_mapper = make_edge_mapper(entity_relations, borrower_address_df, \"居住\")\r\n\r\ndump_schema(borrower_address_schema, borrower_address_mapper, \"borrower_address\")\r\n\r\nborrower_address_df.to_csv(\"loan_agent/borrower_address/gra_居住.csv\", sep=',', header=True, index=False)\r\n\r\n\r\n# 解析借款人-设备ip的schema并存储\r\nborrower_ip_schema = make_edge_schema(borrower_ip_df, relation_mix_index_properties=[\"event_time\"])\r\n\r\nborrower_ip_mapper = make_edge_mapper(entity_relations, borrower_ip_df, \"ip地址\")\r\n\r\ndump_schema(borrower_ip_schema, borrower_ip_mapper, \"borrower_ip\")\r\n\r\nborrower_ip_df.to_csv(\"loan_agent/borrower_ip/gra_ip地址.csv\", sep=',', header=True, index=False)\r\n\r\n\r\n# 解析借款人-设备schema并存储\r\nborrower_device_schema = make_edge_schema(borrower_device_df, relation_mix_index_properties=[\"event_time\"])\r\n\r\nborrower_device_mapper = make_edge_mapper(entity_relations, borrower_device_df, \"使用设备\")\r\n\r\ndump_schema(borrower_device_schema, borrower_device_mapper, \"borrower_device\")\r\n\r\nborrower_device_df.to_csv(\"loan_agent/borrower_device/gra_使用设备.csv\", sep=',', header=True, index=False)\r\n\r\n\r\n# 解析借款人-团队关系schema并存储\r\nborrower_team_schema = make_edge_schema(borrower_team, relation_mix_index_properties=[\"放款时间\", \"放款状态\"])\r\n\r\nborrower_team_mapper = make_edge_mapper(entity_relations, borrower_team, \"所属团队\")\r\n\r\ndump_schema(borrower_team_schema, borrower_team_mapper, \"borrower_team\")\r\n\r\nborrower_team.to_csv(\"loan_agent/borrower_team/gra_所属团队.csv\", sep=',', header=True, index=False)\r\n\r\n\r\n# 解析团队-门店关系schema并存储\r\nteam_shop_schema = make_edge_schema(team_shop, relation_mix_index_properties=[\"放款时间\", \"放款状态\"])\r\n\r\nteam_shop_mapper = make_edge_mapper(entity_relations, team_shop, \"所属门店\")\r\n\r\ndump_schema(team_shop_schema, team_shop_mapper, \"team_shop\")\r\n\r\nteam_shop.to_csv(\"loan_agent/team_shop/gra_所属门店.csv\", sep=',', header=True, index=False)\r\n\r\n\r\n# 解析门店-区域关系schema并存储\r\nshop_area_schema = make_edge_schema(shop_area, relation_mix_index_properties=[\"放款时间\", \"放款状态\"])\r\n\r\nshop_area_mapper = make_edge_mapper(entity_relations, shop_area, \"所属区域\")\r\n\r\ndump_schema(shop_area_schema, shop_area_mapper, \"shop_area\")\r\n\r\nshop_area.to_csv(\"loan_agent/shop_area/gra_所属区域.csv\", sep=',', header=True, index=False)\r\n\r\n\r\n# 解析区域-市场关系schema并存储\r\narea_marketing_schema = make_edge_schema(area_marketing, relation_mix_index_properties=[\"放款时间\", \"放���状态\"])\r\n\r\narea_marketing_mapper = make_edge_mapper(entity_relations, area_marketing, \"所属市场\")\r\n\r\ndump_schema(area_marketing_schema, area_marketing_mapper, \"area_market\")\r\n\r\narea_marketing.to_csv(\"loan_agent/area_market/gra_所属市场.csv\", sep=',', header=True, index=False)\r\n\r\n\r\n# 解析市场-大区关系schema并存储\r\nmarketing_region_schema = make_edge_schema(marketing_region, relation_mix_index_properties=[\"放款时间\", \"放款状态\"])\r\n\r\nmarketing_region_mapper = make_edge_mapper(entity_relations, marketing_region, \"所属大区\")\r\n\r\ndump_schema(marketing_region_schema, marketing_region_mapper, \"market_region\")\r\n\r\nmarketing_region.to_csv(\"loan_agent/market_region/gra_所属大区.csv\", sep=',', header=True, index=False)\r\n\r\n","sub_path":"graph_scripts/knowledge_loan_agent_graph.py","file_name":"knowledge_loan_agent_graph.py","file_ext":"py","file_size_in_byte":47474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"338819937","text":"import shutil\nimport requests\nimport os\nimport json\nimport glob\nimport yaml\nimport sys\nimport urllib\nimport ssl\nimport csv\nimport time\nimport hashlib\n\ndir = \"../../../docs/api/iiif/curation\"\n\npath = dir + \"/top_text.json\"\n\nwith open(path) as f:\n curation = json.load(f)\n\n selections = curation[\"selections\"]\n\n idMap = {}\n\n for selection in selections:\n members = selection[\"members\"]\n\n for member in members:\n\n uri = member[\"@id\"]\n id = hashlib.md5(uri.encode('utf-8')).hexdigest()\n\n print(id)\n\n path = \"yolo/data/\"+id+\".json\"\n\n if os.path.exists(path):\n with open(path) as f:\n data = json.load(f)\n\n members2 = data[\"selections\"][0][\"members\"]\n\n arr = []\n\n for member2 in members2:\n metadata = member2[\"metadata\"]\n\n for m in metadata:\n if m[\"label\"] == \"Tag\":\n value = m[\"value\"]\n\n if value not in arr:\n arr.append(value)\n\n if len(arr) > 0:\n member[\"metadata\"].append({\n \"label\" : \"機械タグ\",\n \"value\" : arr\n })\n\n '''\n for selection in selections:\n members = selection[\"members\"]\n\n for member in members:\n\n thumbnail = member[\"thumbnail\"]\n\n uri = member[\"@id\"]\n\n print(uri)\n id = hashlib.md5(uri.encode('utf-8')).hexdigest()\n\n path = \"data/json/similar_images/\"+id+\".json\"\n\n if os.path.exists(path):\n with open(path) as f:\n data = json.load(f)\n\n images = []\n max = 20\n if len(data) < max:\n max = len(data)\n for i in range(0, max):\n tid = data[i]\n if tid in idMap:\n images.append(idMap[tid])\n member[\"images\"] = images\n '''\n\nfilename = \"/top_yolo.json\"\nwith open(dir + filename, 'w') as outfile:\n json.dump(curation, outfile, ensure_ascii=False,\n indent=4, sort_keys=True, separators=(',', ': '))","sub_path":"src/common/image/600_merge_yolo.py","file_name":"600_merge_yolo.py","file_ext":"py","file_size_in_byte":2300,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"237046405","text":"#!/usr/bin/env python\n\nimport logging.config\n\nimport os\nfrom flask import Flask, Blueprint\nfrom api import settings\nfrom api.endpoints.cmr import ns as cmr_collections_namespace\nfrom api.endpoints.algorithm import ns as algorithm_namespace\nfrom api.endpoints.job import ns as job_namespace\nfrom api.endpoints.wmts import ns as wmts_namespace\nfrom api.endpoints.wms import ns as wms_namespace\nfrom api.endpoints.members import ns as members_namespace\nfrom api.endpoints.query_service import ns as query_service_namespace\nfrom api.endpoints.three_dimensional_tiles import ns as three_d_tiles_namespace\nfrom api.endpoints.environment import ns as environment_namespace\nfrom api.endpoints.ogcapi_features import ns as ogcapi_features_namespace\nfrom api.restplus import api\nfrom api.maap_database import db\nfrom api.models import initialize_sql\n\napp = Flask(__name__)\napp.secret_key = settings.CAS_SECRET_KEY\nlogging_conf_path = os.path.normpath(os.path.join(os.path.dirname(__file__), '../logging.conf'))\nlogging.config.fileConfig(logging_conf_path)\nlog = logging.getLogger(__name__)\n\nblueprint = Blueprint('baseapi', __name__, url_prefix='/api')\napi.init_app(blueprint)\napi.add_namespace(cmr_collections_namespace)\napp.register_blueprint(blueprint)\n\n\napp.config['CAS_SERVER'] = settings.CAS_SERVER_NAME\napp.config['CAS_AFTER_LOGIN'] = settings.CAS_AFTER_LOGIN\napp.config['SQLALCHEMY_DATABASE_URI'] = settings.DATABASE_URL\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\n\napp.app_context().push()\ndb.init_app(app)\ninitialize_sql(db.engine)\n#Base.ini .metadata.create_all(db.engine)\n# db.create_all()\n# db.session.commit()\n\n\n@app.route('/')\ndef index():\n return 'MAAP API'\n\n\ndef configure_app(flask_app):\n flask_app.config['SERVER_NAME'] = settings.FLASK_SERVER_NAME\n flask_app.config['CMR_API_TOKEN'] = settings.CMR_API_TOKEN\n flask_app.config['CMR_CLIENT_ID'] = settings.CMR_CLIENT_ID\n flask_app.config['SWAGGER_UI_DOC_EXPANSION'] = settings.RESTPLUS_SWAGGER_UI_DOC_EXPANSION\n flask_app.config['RESTPLUS_VALIDATE'] = settings.RESTPLUS_VALIDATE\n flask_app.config['RESTPLUS_MASK_SWAGGER'] = settings.RESTPLUS_MASK_SWAGGER\n flask_app.config['ERROR_404_HELP'] = settings.RESTPLUS_ERROR_404_HELP\n flask_app.config['TILER_ENDPOINT'] = settings.TILER_ENDPOINT\n flask_app.config['OGCAPI_FEATURES_ENDPOINT'] = settings.OGCAPI_FEATURES_ENDPOINT\n flask_app.config['_3DTILES_API_ENDPOINT'] = settings._3DTILES_API_ENDPOINT\n flask_app.config['DATA_SYSTEM_FILES_PATH'] = settings.DATA_SYSTEM_FILES_PATH\n flask_app.config['QS_STATE_MACHINE_ARN'] = settings.QS_STATE_MACHINE_ARN\n flask_app.config['QS_RESULT_BUCKET'] = settings.QS_RESULT_BUCKET\n\n\ndef initialize_app(flask_app):\n configure_app(flask_app)\n\n blueprint = Blueprint('api', __name__, url_prefix='/api')\n api.init_app(blueprint)\n api.add_namespace(cmr_collections_namespace)\n api.add_namespace(algorithm_namespace)\n api.add_namespace(job_namespace)\n api.add_namespace(wmts_namespace)\n api.add_namespace(wms_namespace)\n api.add_namespace(members_namespace)\n api.add_namespace(query_service_namespace)\n api.add_namespace(three_d_tiles_namespace)\n api.add_namespace(environment_namespace)\n api.add_namespace(ogcapi_features_namespace)\n flask_app.register_blueprint(blueprint)\n\n\ndef main():\n initialize_app(app)\n log.info('>>>>> Starting development server at http://{}/api/ <<<<<'.format(app.config['SERVER_NAME']))\n app.run(debug=settings.FLASK_DEBUG)\n\n\nif __name__ == \"__main__\":\n main()\n\n","sub_path":"api/maapapp.py","file_name":"maapapp.py","file_ext":"py","file_size_in_byte":3548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"133852178","text":"\n\nimport pygame\n\nimport battle2.eventmanager as evm\nimport battle2.statemachine as stm\n\n\nclass HumanInput(object):\n \"\"\"\n Handles keyboard and mouse input.\n \"\"\"\n def __init__(self, ev_manager, model):\n \"\"\"\n ev_manager (EventManager): Allows posting messages to the event queue.\n model (GameEngine): a strong reference to the game Model.\n \"\"\"\n self.ev_manager = ev_manager\n ev_manager.register_listener(self)\n self.model = model\n\n def notify(self, event):\n \"\"\"\n Receive events posted to the message queue.\n \"\"\"\n if isinstance(event, evm.TickEvent):\n\n for event in pygame.event.get(): # Called for each game tick. We check our input here.\n\n if event.type == pygame.QUIT: # handle window manager closing our window\n self.ev_manager.post(evm.QuitEvent())\n\n if event.type == pygame.MOUSEBUTTONDOWN: # handle mouse down events\n if event.button:\n self.ev_manager.post(evm.InputEvent(clickpos=event.pos, button=event.button))\n\n if event.type == pygame.KEYDOWN: # handle key down events\n currentstate = self.model.state.peek()\n if currentstate == stm.State.Menu:\n self.keydown_menu(event, currentstate)\n if currentstate == stm.State.Play:\n self.keydown_play(event, currentstate)\n if currentstate == stm.State.Help:\n self.keydown_help(event, currentstate)\n\n def keydown_menu(self, event, currentstate):\n \"\"\"\n Handles menu key events.\n \"\"\"\n if event.key == pygame.K_ESCAPE: # escape pops the menu\n self.ev_manager.post(evm.ChangeStateEvent(None, currentstate))\n if event.key == pygame.K_SPACE: # space plays the game\n self.ev_manager.post(evm.ChangeStateEvent(stm.State.Play))\n\n def keydown_help(self, event, currentstate):\n \"\"\"\n Handles help key events.\n \"\"\"\n if event.key in [pygame.K_ESCAPE, pygame.K_SPACE, pygame.K_RETURN]: # space, enter or escape pops help\n self.ev_manager.post(evm.ChangeStateEvent(None, currentstate))\n\n def keydown_play(self, event, currentstate):\n \"\"\"\n Handles play key events.\n \"\"\"\n if event.key == pygame.K_ESCAPE:\n self.ev_manager.post(evm.ChangeStateEvent(None, currentstate))\n elif event.key == pygame.K_F1: # F1 shows the help\n self.ev_manager.post(evm.ChangeStateEvent(stm.State.Help))\n elif event.key == pygame.K_F12:\n self.ev_manager.post(evm.InputEvent(key=event.key))\n elif (event.key == pygame.K_UP or event.key == pygame.K_DOWN or\n event.key == pygame.K_LEFT or event.key == pygame.K_RIGHT):\n self.ev_manager.post(evm.InputEvent(key=event.key))\n else:\n self.ev_manager.post(evm.InputEvent(key=event.unicode))\n","sub_path":"battle2/controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":3220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"511776293","text":"#!/usr/bin/python3\r\n# -*- coding:utf-8 -*-\r\n\r\nimport sys, os, io\r\nfrom Search import FullTextSearchWhoosh as Searcher\r\nimport socket\r\nimport json\r\n\r\n\r\n#sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8')\r\nSearcher.init()\r\n#sys.stdout.flush()\r\nIP_PORT = ('localhost', 8088)\r\ns = socket.socket(socket.AF_INET,socket.SOCK_STREAM)\r\ns.bind(IP_PORT)\r\ns.listen(7)\r\nprint('Serve is listening')\r\nwhile True:\r\n conn, addr = s.accept()\r\n print(conn)\r\n conn.send(b'Sever Connected')\r\n while True:\r\n print(\">>>Linked: \" + str(addr))\r\n buf = conn.recv(65535)\r\n #print(str(buf, encoding='utf-8'))\r\n ret_str = str(buf, encoding='utf-8')\r\n print(ret_str)\r\n if ret_str == 'exit':\r\n break\r\n else:\r\n ret_list = Searcher.Search(ret_str)\r\n if ret_list == None:\r\n ret_list = \"None\"\r\n #print(type(ret_list))\r\n #print(ret_list)\r\n print(ret_list)\r\n re = json.dumps({'content': ret_list})\r\n conn.sendall(bytes(re, encoding='utf-8'))\r\n conn.send(bytes('SearchCompleted', encoding='utf-8'))\r\n\r\n\r\n#不能查一个单字 例如 1 ,a,b 等等\r\n","sub_path":"201700301097-DengBoxin/Search(Whoosh/SearchServe.py","file_name":"SearchServe.py","file_ext":"py","file_size_in_byte":1270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"34214072","text":"#!/usr/bin/env python3\n\nimport csv\nimport fnmatch\nimport glob\nimport json\nimport sys\nimport linecache\nimport re\nimport shutil\nfrom json import JSONDecodeError\n\nimport requests\nimport yaml\nimport pickle\nimport markdown2\n\nfrom collections import OrderedDict\nfrom functools import partial, wraps\nfrom itertools import chain\nfrom multiprocessing.pool import ThreadPool as Pool\nfrom optparse import OptionParser\nfrom os import sep, makedirs, getenv, remove\nfrom os.path import (\n isdir,\n exists,\n basename,\n curdir,\n dirname,\n join,\n abspath,\n normpath,\n)\n\n\ndef cache_by_sha(func):\n \"\"\" only downloads fresh file, if we don't have one or we do and the sha has changed \"\"\"\n\n @wraps(func)\n def cached_func(*args, **kwargs):\n cache = {}\n list_item = args[1]\n dest_dir = kwargs.get(\"dest_dir\")\n path_to_file = list_item.get(\"path\", \"\")\n file_out = \"{}{}\".format(dest_dir, path_to_file)\n p_file_out = \"{}{}.pickle\".format(\n dest_dir, path_to_file\n )\n makedirs(dirname(file_out), exist_ok=True)\n if exists(p_file_out) and exists(file_out):\n with open(p_file_out, \"rb\") as pf:\n cache = pickle.load(pf)\n cache_sha = cache.get(\"sha\", False)\n input_sha = list_item.get(\"sha\", False)\n if (cache_sha and input_sha and cache_sha == input_sha):\n # do nothing as we have the up to date file already\n return None\n else:\n with open(p_file_out, mode=\"wb+\") as pf:\n pickle.dump(\n list_item, pf, pickle.HIGHEST_PROTOCOL\n )\n return func(*args, **kwargs)\n\n return cached_func\n\n\nclass GitHub:\n def __init__(self, token=None):\n self.token = token\n\n def __enter__(self):\n return self\n\n def __exit__(self, *exc):\n return False\n\n def headers(self):\n return (\n {\"Authorization\": \"token {}\".format(self.token)}\n if self.token\n else {}\n )\n\n def extract(self, data):\n out = []\n for item in data.get(\"tree\", []):\n out.append(\n {\n \"path\": item.get(\"path\", \"\"),\n \"url\": item.get(\"url\", \"\"),\n \"type\": item.get(\"type\", \"\"),\n \"sha\": item.get(\"sha\", \"\"),\n }\n )\n if item.get(\"tree\", None):\n out.append(self.extract(item.get(\"tree\")))\n return out\n\n def list(self, org, repo, branch, globs=None):\n globs = [] if globs is None else globs\n listing = []\n # get the latest sha\n url = \"https://api.github.com/repos/{0}/{1}/git/refs/heads/{2}\".format(\n org, repo, branch\n )\n headers = self.headers()\n print(\n \"\\x1b[32mINFO\\x1b[0m: Getting latest sha from {}/{}..\".format(\n repo, branch\n )\n )\n sha_response = requests.get(url, headers=headers)\n if sha_response.status_code == requests.codes.ok:\n sha = (\n sha_response.json()\n .get(\"object\", {})\n .get(\"sha\", None)\n )\n if sha:\n print(\n \"\\x1b[32mINFO\\x1b[0m: Getting tree from {}/{} @ {}\".format(\n repo, branch, sha\n )\n )\n tree_response = requests.get(\n \"https://api.github.com/repos/{0}/{1}/git/trees/{2}?recursive=1\".format(\n org, repo, sha\n ),\n headers=headers,\n )\n if tree_response.status_code == 200:\n listing = self.extract(\n tree_response.json()\n )\n\n if globs:\n filtered_listing = []\n for item in listing:\n path = item.get(\"path\", \"\")\n for glob_string in globs:\n if fnmatch.fnmatch(path, glob_string):\n filtered_listing.append(item)\n return filtered_listing\n else:\n return listing\n\n @cache_by_sha\n def raw(\n self,\n list_item,\n request_session,\n org,\n repo,\n branch,\n dest_dir,\n ):\n headers = self.headers()\n path_to_file = list_item.get(\"path\", \"\")\n file_out = \"{}{}\".format(dest_dir, path_to_file)\n raw_response = request_session.get(\n \"https://raw.githubusercontent.com/{0}/{1}/{2}/{3}\".format(\n org, repo, branch, path_to_file\n ),\n headers=headers,\n )\n if raw_response.status_code == requests.codes.ok:\n makedirs(dirname(file_out), exist_ok=True)\n with open(file_out, mode=\"wb+\") as f:\n f.write(raw_response.content)\n\n\nclass PreBuild:\n def __init__(self, opts):\n super().__init__()\n self.options = opts\n self.list_of_contents = []\n self.tempdir = (\n \"./integrations_data\"\n )\n self.data_dir = \"{0}{1}{2}\".format(\n abspath(normpath(options.source)),\n sep,\n \"data\" + sep,\n )\n self.content_dir = \"{0}{1}{2}\".format(\n abspath(normpath(options.source)),\n sep,\n \"content\" + sep + \"en\" + sep,\n )\n self.data_integrations_dir = (\n join(self.data_dir, \"integrations\") + sep\n )\n self.data_service_checks_dir = (\n join(self.data_dir, \"service_checks\") + sep\n )\n self.content_integrations_dir = (\n join(self.content_dir, \"integrations\") + sep\n )\n self.extract_dir = \"{0}\".format(\n join(self.tempdir, \"extracted\") + sep\n )\n self.integration_datafile = \"{0}{1}{2}\".format(\n abspath(normpath(self.options.source)),\n sep,\n \"integrations.json\",\n )\n self.regex_h1 = re.compile(\n r\"^#{1}(?!#)(.*)\", re.MULTILINE\n )\n self.regex_h1_replace = re.compile(\n r\"^(#{1})(?!#)(.*)\", re.MULTILINE\n )\n self.regex_metrics = re.compile(\n r\"(#{3} Metrics\\n)([\\s\\S]*this integration.|[\\s\\S]*this check.)([\\s\\S]*)(#{3} Events\\n)\",\n re.DOTALL,\n )\n self.regex_service_check = re.compile(\n r\"(#{3} Service Checks\\n)([\\s\\S]*does not include any service checks at this time.)([\\s\\S]*)(#{2} Troubleshooting\\n)\",\n re.DOTALL,\n )\n self.regex_fm = re.compile(\n r\"(?:-{3})(.*?)(?:-{3})(.*)\", re.DOTALL\n )\n self.regex_source = re.compile(\n r\"(\\S*FROM_DISPLAY_NAME\\s*=\\s*\\{)(.*?)\\}\",\n re.DOTALL,\n )\n self.datafile_json = []\n self.pool_size = 5\n self.integration_mutations = OrderedDict(\n {\n \"hdfs\": {\n \"action\": \"create\",\n \"target\": \"hdfs\",\n \"remove_header\": False,\n \"fm\": {\n \"is_public\": True,\n \"kind\": \"integration\",\n \"integration_title\": \"Hdfs\",\n \"short_description\": \"Track cluster disk usage, volume failures, dead DataNodes, and more.\",\n },\n },\n \"mesos\": {\n \"action\": \"create\",\n \"target\": \"mesos\",\n \"remove_header\": False,\n \"fm\": {\n \"aliases\": [\n \"/integrations/mesos_master/\",\n \"/integrations/mesos_slave/\",\n ],\n \"is_public\": True,\n \"kind\": \"integration\",\n \"integration_title\": \"Mesos\",\n \"short_description\": \"Track cluster resource usage, master and slave counts, tasks statuses, and more.\",\n },\n },\n \"activemq_xml\": {\n \"action\": \"merge\",\n \"target\": \"activemq\",\n \"remove_header\": False,\n },\n \"cassandra_nodetool\": {\n \"action\": \"merge\",\n \"target\": \"cassandra\",\n \"remove_header\": False,\n },\n \"gitlab_runner\": {\n \"action\": \"merge\",\n \"target\": \"gitlab\",\n \"remove_header\": False,\n },\n \"hdfs_datanode\": {\n \"action\": \"merge\",\n \"target\": \"hdfs\",\n \"remove_header\": False,\n },\n \"hdfs_namenode\": {\n \"action\": \"merge\",\n \"target\": \"hdfs\",\n \"remove_header\": False,\n },\n \"mesos_master\": {\n \"action\": \"merge\",\n \"target\": \"mesos\",\n \"remove_header\": True,\n },\n \"mesos_slave\": {\n \"action\": \"merge\",\n \"target\": \"mesos\",\n \"remove_header\": False,\n },\n \"kafka_consumer\": {\n \"action\": \"merge\",\n \"target\": \"kafka\",\n \"remove_header\": False,\n },\n \"kube_dns\": {\n \"action\": \"discard\",\n \"target\": \"none\",\n \"remove_header\": False,\n },\n \"kube_proxy\": {\n \"action\": \"discard\",\n \"target\": \"none\",\n \"remove_header\": False,\n },\n \"kubernetes_state\": {\n \"action\": \"discard\",\n \"target\": \"none\",\n \"remove_header\": False,\n },\n \"system_core\": {\n \"action\": \"discard\",\n \"target\": \"system\",\n \"remove_header\": False,\n },\n \"system_swap\": {\n \"action\": \"discard\",\n \"target\": \"system\",\n \"remove_header\": False,\n },\n \"hbase_regionserver\": {\n \"action\": \"merge\",\n \"target\": \"hbase_master\",\n \"remove_header\": False,\n },\n }\n )\n self.initial_integration_files = glob.glob(\n \"{}*.md\".format(self.content_integrations_dir)\n )\n makedirs(self.data_integrations_dir, exist_ok=True)\n makedirs(\n self.data_service_checks_dir, exist_ok=True\n )\n makedirs(\n self.content_integrations_dir, exist_ok=True\n )\n\n @staticmethod\n def metric_csv_to_yaml(key_name, csv_filename, yml_filename):\n \"\"\"\n Given a file path to a single csv file convert it to a yaml file\n\n :param key_name: integration key name for root object\n :param csv_filename: path to input csv file\n :param yml_filename: path to output yml file\n \"\"\"\n yaml_data = {key_name: []}\n with open(csv_filename) as csv_file:\n reader = csv.DictReader(csv_file, delimiter=\",\")\n yaml_data[key_name] = [\n dict(line) for line in reader\n ]\n if yaml_data[key_name]:\n # Transforming the metric description to html in order to interpret markdown in\n # integrations metrics table.\n # the char strip is to compensate for the lib adding


tags\n for metric in yaml_data[key_name]:\n metric['description'] = str(\n markdown2.markdown(metric['description']))[3:-5]\n with open(\n file=yml_filename,\n mode=\"w\",\n encoding=\"utf-8\",\n ) as f:\n f.write(\n yaml.dump(\n yaml_data, default_flow_style=False\n )\n )\n\n def download_from_repo(self, org, repo, branch, globs):\n \"\"\"\n Takes github info and file globs and downloads files from github using multiple processes\n :param org: github organization or person\n :param repo: github repo name\n :param branch: the branch name\n :param globs: list of strings in glob format of what to extract\n :return:\n \"\"\"\n with GitHub(self.options.token) as gh:\n listing = gh.list(org, repo, branch, globs)\n dest = \"{0}{1}{2}\".format(\n self.extract_dir, repo, sep\n )\n with Pool(processes=self.pool_size) as pool:\n with requests.Session() as s:\n r = [\n x\n for x in pool.imap_unordered(\n partial(\n gh.raw,\n request_session=s,\n org=org,\n repo=repo,\n branch=branch,\n dest_dir=dest,\n ),\n listing,\n )\n ]\n\n def process(self):\n \"\"\"\n This represents the overall workflow of the build of the documentation\n \"\"\"\n print(\"\\x1b[34mStarting Processing...\\x1b[0m\")\n\n self.extract_config()\n\n try:\n self.local_or_upstream()\n except:\n if getenv(\"LOCAL\") == 'True':\n print(\n \"\\x1b[33mWARNING\\x1b[0m: Local mode detected: Downloading files failed, documentation is now in degraded mode.\")\n else:\n print(\n \"\\x1b[31mERROR\\x1b[0m: Downloading files failed, stoping build.\")\n sys.exit(1)\n\n try:\n self.process_filenames()\n except:\n if getenv(\"LOCAL\") == 'True':\n print(\n \"\\x1b[33mWARNING\\x1b[0m: Local mode detected: Processing files failed, documentation is now in degraded mode.\")\n else:\n print(\n \"\\x1b[31mERROR\\x1b[0m: Processing files failed, stoping build.\")\n sys.exit(1)\n\n try:\n self.merge_integrations()\n except:\n if getenv(\"LOCAL\") == 'True':\n print(\n \"\\x1b[33mWARNING\\x1b[0m: Local mode detected: Integration merge failed, documentation is now in degraded mode.\")\n else:\n print(\n \"\\x1b[31mERROR\\x1b[0m: Integration merge failed, stoping build.\")\n sys.exit(1)\n\n def extract_config(self):\n \"\"\"\n This pulls the content from the configuration file at CONFIGURATION_FILE location\n then parses it to populate the list_of_content variable that contains all contents\n that needs to be pulled and processed.\n \"\"\"\n print(\n \"\\x1b[32mINFO\\x1b[0m: Loading {} configuration file\".format(\n getenv(\"CONFIGURATION_FILE\")\n )\n )\n configuration = yaml.load(open(getenv(\"CONFIGURATION_FILE\")))\n for org in configuration:\n for repo in org[\"repos\"]:\n for content in repo[\"contents\"]:\n content_temp = {}\n content_temp[\"org_name\"] = org[\n \"org_name\"\n ]\n content_temp[\"repo_name\"] = repo[\n \"repo_name\"\n ]\n content_temp[\"branch\"] = content[\n \"branch\"\n ]\n content_temp[\"action\"] = content[\n \"action\"\n ]\n content_temp[\"globs\"] = content[\"globs\"]\n\n if (content[\"action\"] == \"pull-and-push-folder\" or content[\"action\"] == \"pull-and-push-file\"):\n content_temp[\"options\"] = content[\"options\"]\n\n self.list_of_contents.append(\n content_temp\n )\n # print(\n # \"Adding content {} \".format(\n # content_temp\n # )\n # )\n\n def local_or_upstream(self):\n \"\"\"\n This goes through the list_of_contents and check for each repo specified\n If a local version exists otherwise we download it from the upstream repo on Github\n Local version of the repo should be in the same folder as the documentation/ folder.\n \"\"\"\n for content in self.list_of_contents:\n repo_name = \"../\" + content[\"repo_name\"] + sep\n if isdir(repo_name):\n print(\"\\x1b[32mINFO\\x1b[0m: Local version of {} found\".format(\n content[\"repo_name\"]))\n content[\"globs\"] = self.update_globs(\n repo_name,\n content[\"globs\"],\n )\n elif self.options.token != \"False\":\n print(\n \"\\x1b[32mINFO\\x1b[0m: No local version of {} found, downloading content from upstream version\".format(\n content[\"repo_name\"]\n )\n )\n self.download_from_repo(\n content[\"org_name\"],\n content[\"repo_name\"],\n content[\"branch\"],\n content[\"globs\"],\n )\n content[\n \"globs\"\n ] = self.update_globs(\n \"{0}{1}{2}\".format(\n self.extract_dir,\n content[\"repo_name\"],\n sep,\n ),\n content[\"globs\"],\n )\n elif getenv(\"LOCAL\") == 'True':\n print(\n \"\\x1b[33mWARNING\\x1b[0m: Local mode detected: No local version of {} found, no GITHUB_TOKEN available. Documentation is now in degraded mode\".format(content[\"repo_name\"]))\n content[\"action\"] = \"Not Available\"\n else:\n print(\n \"\\x1b[31mERROR\\x1b[0m: No local version of {} found, no GITHUB_TOKEN available.\".format(\n content[\"repo_name\"]\n )\n )\n raise ValueError\n\n def update_globs(self, new_path, globs):\n \"\"\"\n Depending if the repo is local or we downloaded it we need to update the globs to match\n the final version of the repo to use\n :param new_path: new_path to update the globs with\n :param globs: list of globs to update\n \"\"\"\n new_globs = []\n for item in globs:\n new_globs.append(\"{}{}\".format(new_path, item))\n\n return new_globs\n\n def process_filenames(self):\n \"\"\"\n Goes through the list_of_contents and for each content\n triggers the right action to apply.\n \"\"\"\n for content in self.list_of_contents:\n # print(\"Processing content: {}\".format(content))\n try:\n if content[\"action\"] == \"integrations\":\n self.process_integrations(content)\n\n elif (content[\"action\"] == \"pull-and-push-folder\"):\n self.pull_and_push_folder(content)\n\n elif content[\"action\"] == \"pull-and-push-file\":\n self.pull_and_push_file(content)\n elif content[\"action\"] == \"Not Available\":\n if getenv(\"LOCAL\") == 'True':\n print(\"\\x1b[33mWARNING\\x1b[0m: Processing of {} canceled, since content is not available. Documentation is in degraded mode\".format(\n content[\"repo_name\"]))\n else:\n print(\n \"\\x1b[31mERROR\\x1b[0m: Action {} unknown for {}\".format(content[\"action\"], content))\n raise ValueError\n except:\n if getenv(\"LOCAL\") == 'True':\n print(\n \"\\x1b[33mWARNING\\x1b[0m: Unsuccessful processing of {}\".format(content))\n else:\n print(\n \"\\x1b[31mERROR\\x1b[0m: Unsuccessful processing of {}\".format(content))\n raise ValueError\n\n def process_integrations(self, content):\n \"\"\"\n Goes through all files needed for integrations build\n and triggers the right function for the right type of file.\n See https://github.com/DataDog/documentation/wiki/Documentation-Build#integrations to learn more.\n :param content: integrations content to process\n \"\"\"\n for file_name in chain.from_iterable(\n glob.iglob(pattern, recursive=True)\n for pattern in content[\"globs\"]\n ):\n if file_name.endswith(\".csv\"):\n self.process_integration_metric(file_name)\n\n elif file_name.endswith(\"manifest.json\"):\n self.process_integration_manifest(file_name)\n\n elif file_name.endswith(\"service_checks.json\"):\n self.process_service_checks(file_name)\n\n elif file_name.endswith(\".md\"):\n self.process_integration_readme(file_name)\n\n def pull_and_push_file(self, content):\n \"\"\"\n Takes the content from a file from a github repo and\n pushed it to the doc\n See https://github.com/DataDog/documentation/wiki/Documentation-Build#pull-and-push-files to learn more\n :param content: object with a file_name, a file_path, and options to apply\n \"\"\"\n with open(\"\".join(content[\"globs\"]), mode=\"r+\") as f:\n file_content = f.read()\n\n # If options include front params, then the H1 title of the source file is striped\n # and the options front params are inlined\n\n if \"front_matters\" in content[\"options\"]:\n front_matters= \"---\\n\" + yaml.dump(content[\"options\"][\"front_matters\"],default_flow_style=False) + \"---\\n\"\n file_content = re.sub(r'^(#{1}).*', front_matters, file_content, count=1)\n\n with open(\n \"{}{}{}\".format(\n self.content_dir,\n content[\"options\"][\"dest_path\"][1:],\n basename(content[\"options\"][\"file_name\"]),\n ),\n mode=\"w+\",\n encoding=\"utf-8\",\n ) as f:\n f.write(file_content)\n\n def pull_and_push_folder(self, content):\n \"\"\"\n Take the content from a folder following github logic\n and transform it to be displayed in the doc in dest_dir folder\n See https://github.com/DataDog/documentation/wiki/Documentation-Build#pull-and-push-folder to learn more\n :param content: content to process\n \"\"\"\n\n for file_name in chain.from_iterable(glob.iglob(pattern, recursive=True) for pattern in content[\"globs\"]):\n\n with open(file_name, mode=\"r+\") as f:\n file_content = f.read()\n\n # Replacing the master README.md by _index.md to follow Hugo logic\n if file_name.endswith(\"README.md\"):\n file_name = \"_index.md\"\n\n # Replacing links that point to the Github folder by link that point to the doc.\n new_link = (\n content[\"options\"][\"dest_dir\"] + \"\\\\2\"\n )\n regex_github_link = re.compile(\n r\"(https:\\/\\/github\\.com\\/{}\\/{}\\/blob\\/{}\\/{})(\\S+)\\.md\".format(\n content[\"org_name\"],\n content[\"repo_name\"],\n content[\"branch\"],\n content[\"options\"][\n \"path_to_remove\"\n ],\n )\n )\n file_content = re.sub(\n regex_github_link,\n new_link,\n file_content,\n count=0,\n )\n\n # Writing the new content to the documentation file\n dirp = \"{}{}\".format(\n self.content_dir,\n content[\"options\"][\"dest_dir\"][1:],\n )\n makedirs(dirp, exist_ok=True)\n with open(\n \"{}{}\".format(dirp, basename(file_name)),\n mode=\"w+\",\n encoding=\"utf-8\",\n ) as f:\n f.write(file_content)\n\n def merge_integrations(self):\n \"\"\" Merges integrations that come under one \"\"\"\n for (\n name,\n action_obj,\n ) in self.integration_mutations.items():\n if name not in self.initial_integration_files:\n action = action_obj.get(\"action\")\n target = action_obj.get(\"target\")\n input_file = \"{}{}.md\".format(\n self.content_integrations_dir, name\n )\n output_file = \"{}{}.md\".format(\n self.content_integrations_dir, target\n )\n if action == \"merge\":\n with open(\n input_file, \"r\"\n ) as content_file, open(\n output_file, \"a\"\n ) as target_file:\n content = content_file.read()\n content = re.sub(\n self.regex_fm,\n r\"\\2\",\n content,\n count=0,\n )\n if action_obj.get(\n \"remove_header\", False\n ):\n content = re.sub(\n self.regex_h1,\n \"\",\n content,\n count=0,\n )\n else:\n content = re.sub(\n self.regex_h1_replace,\n r\"##\\2\",\n content,\n count=0,\n )\n target_file.write(content)\n try:\n remove(input_file)\n except OSError:\n print(\n \"\\x1b[31mERROR\\x1b[0m: The file {} was not found and could not be removed during merge action\".format(\n input_file\n )\n )\n elif action == \"truncate\":\n if exists(output_file):\n with open(\n output_file, \"r+\"\n ) as target_file:\n content = target_file.read()\n content = re.sub(\n self.regex_fm,\n r\"---\\n\\1\\n---\\n\",\n content,\n count=0,\n )\n target_file.truncate(0)\n target_file.seek(0)\n target_file.write(content)\n else:\n open(output_file, \"w\").close()\n elif action == \"discard\":\n try:\n remove(input_file)\n except OSError:\n print(\n \"\\x1b[31mERROR\\x1b[0m: The file {} was not found and could not be removed during discard action\".format(\n input_file\n )\n )\n elif action == \"create\":\n with open(output_file, \"w+\") as f:\n fm = yaml.dump(\n action_obj.get(\"fm\"),\n default_flow_style=False,\n ).rstrip()\n data = \"---\\n{0}\\n---\\n\".format(fm)\n f.write(data)\n\n def process_integration_metric(self, file_name):\n \"\"\"\n Take a single metadata csv file and convert it to yaml\n :param file_name: path to a metadata csv file\n \"\"\"\n if file_name.endswith(\"/metadata.csv\"):\n key_name = basename(\n dirname(normpath(file_name))\n )\n else:\n key_name = basename(\n file_name.replace(\"_metadata.csv\", \"\")\n )\n new_file_name = \"{}{}.yaml\".format(\n self.data_integrations_dir, key_name\n )\n self.metric_csv_to_yaml(key_name, file_name, new_file_name)\n\n def process_integration_manifest(self, file_name):\n \"\"\"\n Take a single manifest json file and upsert to integrations.json data\n set is_public to false to hide integrations we merge later\n :param file_name: path to a manifest json file\n \"\"\"\n\n names = [\n d.get(\"name\", \"\").lower()\n for d in self.datafile_json\n if \"name\" in d\n ]\n with open(file_name) as f:\n try:\n data = json.load(f)\n data_name = data.get(\"name\", \"\").lower()\n if data_name in [\n k\n for k, v in self.integration_mutations.items()\n if v.get(\"action\") == \"merge\"\n ]:\n data[\"is_public\"] = False\n if data_name in names:\n item = [\n d\n for d in self.datafile_json\n if d.get(\"name\", \"\").lower() == data_name\n ]\n if len(item) > 0:\n item[0].update(data)\n else:\n self.datafile_json.append(data)\n except JSONDecodeError:\n if getenv(\"LOCAL\") == 'True':\n print(\n \"\\x1b[33mWARNING\\x1b[0m: manifest could not be parsed {}\".format(file_name))\n else:\n print(\n \"\\x1b[31mERROR\\x1b[0m: manifest could not be parsed {}\".format(file_name))\n raise JSONDecodeError\n\n def process_service_checks(self, file_name):\n \"\"\"\n Take a single service_checks.json file and copies it to the data folder\n as the integration name it came from e.g /data/service_checks/docker.json\n :param file_name: path to a service_checks json file\n \"\"\"\n new_file_name = \"{}.json\".format(\n basename(dirname(normpath(file_name)))\n )\n shutil.copy(\n file_name,\n self.data_service_checks_dir + new_file_name,\n )\n\n def process_integration_readme(self, file_name):\n \"\"\"\n Take a single README.md file and\n 1. extract the first h1, if this isn't a merge item\n 2. inject metrics after ### Metrics header if metrics exists for file\n 3. inject service checks after ### Service Checks if file exists\n 4. inject hugo front matter params at top of file\n 5. write out file to content/integrations with filename changed to integrationname.md\n :param file_name: path to a readme md file\n \"\"\"\n no_integration_issue = True\n\n metrics = glob.glob(\n \"{path}{sep}*metadata.csv\".format(\n path=dirname(file_name), sep=sep\n )\n )\n metrics = metrics[0] if len(metrics) > 0 else None\n metrics_exist = (metrics and exists(metrics)\n and linecache.getline(metrics, 2))\n service_check = glob.glob(\"{file}.json\".format(\n file=self.data_service_checks_dir + basename(dirname(file_name))))\n service_check = (\n service_check[0]\n if len(service_check) > 0\n else None\n )\n service_check_exist = service_check and exists(\n service_check\n )\n manifest = \"{0}{1}{2}\".format(\n dirname(file_name), sep, \"manifest.json\"\n )\n\n if exists(manifest):\n try:\n manifest_json = json.load(open(manifest))\n except JSONDecodeError:\n no_integration_issue = False\n manifest_json = {}\n if getenv(\"LOCAL\") == 'True':\n print(\n \"\\x1b[33mWARNING\\x1b[0m: manifest could not be parsed {}\".format(manifest))\n else:\n print(\n \"\\x1b[31mERROR\\x1b[0m: manifest could not be parsed {}\".format(manifest))\n raise JSONDecodeError\n else:\n no_integration_issue = False\n manifest_json = {}\n print(\"\\x1b[33mWARNING\\x1b[0m: No manifest found for {}\".format(file_name))\n\n dependencies = self.add_dependencies(file_name)\n new_file_name = \"{}.md\".format(\n basename(dirname(file_name))\n )\n exist_already = exists(\n self.content_integrations_dir + new_file_name\n )\n with open(file_name, \"r\") as f:\n result = f.read()\n title = manifest_json.get(\"name\", \"\").lower()\n if title not in [\n k\n for k, v in self.integration_mutations.items()\n if v.get(\"action\") == \"merge\"\n ]:\n result = re.sub(\n self.regex_h1, \"\", result, 1\n )\n if metrics_exist:\n result = re.sub(\n self.regex_metrics,\n r'\\1{{< get-metrics-from-git \"%s\" >}}\\n\\3\\4'\n % format(title),\n result,\n 0,\n )\n if service_check_exist:\n result = re.sub(\n self.regex_service_check,\n r'\\1{{< get-service-checks-from-git \"%s\" >}}\\n\\3\\4'\n % format(title),\n result,\n 0,\n )\n result = \"{0}\\n\\n{1}\".format(\n result, \"{{< get-dependencies >}}\"\n )\n result = self.add_integration_frontmatter(\n new_file_name, result, dependencies\n )\n if not exist_already and no_integration_issue:\n with open(self.content_integrations_dir + new_file_name, \"w\", ) as out:\n out.write(result)\n\n def add_integration_frontmatter(\n self, file_name, content, dependencies=[]\n ):\n \"\"\"\n Takes an integration README.md and injects front matter yaml based on manifest.json data of the same integration\n :param file_name: new integration markdown filename e.g airbrake.md\n :param content: string of markdown content\n :return: formatted string\n \"\"\"\n fm = {}\n template = \"---\\n{front_matter}\\n---\\n\\n{content}\\n\"\n if file_name not in self.initial_integration_files:\n item = [\n d\n for d in self.datafile_json\n if d.get(\"name\", \"\").lower() == basename(file_name).replace(\".md\", \"\")\n ]\n if item and len(item) > 0:\n item[0][\"kind\"] = \"integration\"\n item[0][\"integration_title\"] = (\n item[0]\n .get(\"public_title\", \"\")\n .replace(\"Datadog-\", \"\")\n .replace(\"Integration\", \"\")\n .strip()\n )\n item[0][\"git_integration_title\"] = (\n item[0].get(\"name\", \"\").lower()\n )\n if item[0].get(\"type\", None):\n item[0][\"ddtype\"] = item[0].get(\"type\")\n del item[0][\"type\"]\n item[0][\"dependencies\"] = dependencies\n fm = yaml.dump(\n item[0], width=150, default_style='\"', default_flow_style=False\n ).rstrip()\n else:\n fm = {\"kind\": \"integration\"}\n return template.format(\n front_matter=fm, content=content\n )\n\n def add_dependencies(self, file_name):\n dependencies = []\n if file_name.startswith(\n \"{0}{1}{2}\".format(\n self.extract_dir, \"integrations-core\", sep\n )\n ):\n dependencies.append(\n file_name.replace(\n \"{0}{1}{2}\".format(\n self.extract_dir,\n \"integrations-core\",\n sep,\n ),\n \"https://github.com/DataDog/integrations-core/blob/master/\",\n )\n )\n\n elif file_name.startswith(\n \"{0}{1}{2}\".format(\n self.extract_dir, \"integrations-extras\", sep\n )\n ):\n dependencies.append(\n file_name.replace(\n \"{0}{1}{2}\".format(\n self.extract_dir,\n \"integrations-extras\",\n sep,\n ),\n \"https://github.com/DataDog/integrations-extras/blob/master/\",\n )\n )\n\n return dependencies\n\n\nif __name__ == \"__main__\":\n parser = OptionParser(\n usage=\"usage: %prog [options] link_type\"\n )\n parser.add_option(\n \"-t\",\n \"--token\",\n help=\"github access token\",\n default=None,\n )\n parser.add_option(\n \"-s\",\n \"--source\",\n help=\"location of src files\",\n default=curdir,\n )\n\n options, args = parser.parse_args()\n options.token = (\n getenv(\"GITHUB_TOKEN\", options.token)\n if not options.token\n else options.token\n )\n\n pre = PreBuild(options)\n pre.process()\n","sub_path":"local/bin/py/update_pre_build.py","file_name":"update_pre_build.py","file_ext":"py","file_size_in_byte":38391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"591541368","text":"import os\nimport shlex\nimport pathlib\n\nimport matplotlib.pylab as pylab\nimport numpy\nimport scipy.optimize\nfrom matplotlib.ticker import FormatStrFormatter\nfrom scipy.interpolate import griddata\n\nimport azcam\nfrom azcam_testers.basetester import Tester\n\n\nclass Metrology(Tester):\n \"\"\"\n Metrology analysis.\n This class analyzes a grid of (x,y,z) image surface points.\n \"\"\"\n\n def __init__(self):\n\n super().__init__(\"metrology\")\n\n self.itl_sn = -1\n self.itl_id = \"\"\n\n # inputs\n self.data_file = \"metrology.txt\"\n self.report_file = \"metrology\"\n\n self.standard_correct = 0\n self.standard_zheight = 13.000\n\n self.show_height_grade = 0\n self.z_nom = None # must be defined before .analyze()\n self.z_spec = [] # spec [Zmin,Zmax]\n\n self.grade_mounting = \"UNKNOWN\"\n\n self.height_half_band_spec = None\n self.height_fraction_limit = 0.95\n\n self.show_quantiles = 0\n self.quantile_percents = [\n 0.0,\n 0.5,\n 1.0,\n 2.5,\n 25.0,\n 50.0,\n 75.0,\n 97.5,\n 99.0,\n 99.5,\n 100.0,\n ]\n self.qfh0 = 3 # quantile index\n self.qfh1 = 7\n\n self.flatness_half_band_spec = None # from best fit plane\n self.flatness_fraction_limit = 0.95\n\n # outputs\n self.grade_height = \"UNKNOWN\"\n self.z_mean = -1\n self.z_median = -1\n self.zsdev = -1\n self.z_mid = -1\n self.z_halfband = -1\n\n self.quantile_values = []\n self.flatness_quantile_values = []\n\n self.grade_flatness = \"UNKNOWN\"\n self.fmin = -1\n self.fmax = -1\n self.fsdev = -1\n self.fmid = -1\n self.f_halfband = -1\n\n # filename\n self.HistogramFlatnessPlot = \"HistogramFlatnessPlot.png\"\n self.ColorZPlot = \"ColorZPlot.png\"\n self.WiskerPlot = \"WhiskerPlot.png\"\n self.HistogramHeightPlot = \"HistogramHeightPlot.png\"\n self.SurfacePlot = \"SurfacePlot.png\"\n self.StandardPlot = \"StandardPlot.png\"\n\n # new\n self.stage_temp = None\n self.start_time = None\n self.end_time = None\n self.date = None\n\n def find_file(self, filename):\n \"\"\"\n Find a filename starting with filename.\n \"\"\"\n\n path = pathlib.Path(azcam.utils.curdir())\n\n for _, _, files in os.walk(path):\n break\n\n for f in files:\n if f.startswith(filename):\n break\n\n try:\n if not f.startswith(filename):\n raise azcam.AzcamError(\"file not found\")\n except Exception:\n raise azcam.AzcamError(\"file not found\")\n\n return f\n\n def analyze(self, filename=None):\n \"\"\"\n Analyze existing metrology data.\n \"\"\"\n\n azcam.log(\"Analyzing metrology data\")\n\n if filename is None:\n filename = self.find_file(\"sn\")\n\n # read data\n self.read_data(filename)\n\n # get stats\n self.z_mean = self.z.mean()\n self.z_median = numpy.median(self.z)\n self.zsdev = self.z.std()\n numpoints = len(self.z)\n\n azcam.log(\"%s\\t\\t%.04f\" % (\"Mean\", self.z_mean))\n azcam.log(\"%s\\t\\t%.04f\" % (\"Median\", self.z_median))\n azcam.log(\"%s\\t\\t%.04f\" % (\"Sdev\", self.zsdev))\n if self.z_nom is not None:\n azcam.log(\"%s\\t\\t%.04f\" % (\"z_nom\", self.z_nom))\n\n # ***********************************************************\n # analyze height\n # ***********************************************************\n\n if self.z_spec != []:\n fails1 = self.z < self.z_spec[0] - self.height_half_band_spec\n fails2 = self.z > self.z_spec[1] + self.height_half_band_spec\n fails = numpy.count_nonzero(fails1) + numpy.count_nonzero(fails2)\n self.heightpassfrac = (numpoints - fails) / float(numpoints)\n\n if self.z_spec != [] and self.show_height_grade:\n azcam.log(\n \"Height: %.01f%% of points are within height specification\"\n % (self.heightpassfrac * 100.0)\n )\n if self.heightpassfrac >= self.height_fraction_limit:\n self.grade_height = \"PASS\"\n else:\n self.grade_height = \"FAIL\"\n\n azcam.log(\"Height grade is %s\" % self.grade_height)\n\n # make quantiles\n self.quantile_values = numpy.percentile(self.z, self.quantile_percents)\n if self.show_quantiles:\n azcam.log(\"Quantile\\tHeight\")\n for i, p in enumerate(self.quantile_percents):\n azcam.log(\"%5.01f\\t\\t%.04f\" % (p, self.quantile_values[i]))\n self.z_halfband = (\n self.quantile_values[self.qfh1] - self.quantile_values[self.qfh0]\n ) / 2.0\n self.z_mid = (\n self.quantile_values[self.qfh1] + self.quantile_values[self.qfh0]\n ) / 2.0\n azcam.log(\"%s\\t\\t%.04f\" % (\"Z-mid\", self.z_mid))\n azcam.log(\"%s\\t%.04f\" % (\"Z-mid_HalfBand\", self.z_halfband))\n\n # ***********************************************************\n # analyze flatness\n # ***********************************************************\n\n # make best fit plane\n def residuals(parameter, f, x, y):\n return [(f[i] - model(parameter, x[i], y[i])) for i in range(len(f))]\n\n def model(parameter, x, y):\n a, b, c = parameter\n return a * x + b * y + c\n\n p0 = [1.0, 1.0, 1.0] # initial guess\n result = scipy.optimize.leastsq(residuals, p0, args=(self.z, self.x, self.y))[0]\n a = result[0]\n b = result[1]\n c = result[2]\n\n # make fitted plane z values\n self.zfit = a * self.x + b * self.y + c\n self.flatnessresiduals = self.z - self.zfit\n self.flatnessresiduals = numpy.array(self.flatnessresiduals)\n\n # get flatness Quantiles\n self.flatness_quantile_values = numpy.percentile(\n self.flatnessresiduals, self.quantile_percents\n )\n if self.show_quantiles:\n azcam.log(\"Quantile\\tFlatness\")\n for i, p in enumerate(self.quantile_percents):\n azcam.log(\"%5.01f\\t\\t%.01f\" % (p, self.flatness_quantile_values[i]))\n self.f_halfband = (\n self.flatness_quantile_values[self.qfh1]\n - self.flatness_quantile_values[self.qfh0]\n ) / 2.0\n self.fmid = (\n self.flatness_quantile_values[self.qfh1]\n + self.flatness_quantile_values[self.qfh0]\n ) / 2.0\n azcam.log(\"%s\\t\\t%.04f\" % (\"F-mid\", self.fmid))\n azcam.log(\"%s\\t%.04f\" % (\"F-mid_HalfBand\", self.f_halfband))\n\n # get flatness stats\n self.FMean = self.flatnessresiduals.mean()\n self.FMedian = numpy.median(self.flatnessresiduals)\n self.fsdev = self.flatnessresiduals.std()\n self.fmin = self.flatnessresiduals.min()\n self.fmax = self.flatnessresiduals.max()\n numpoints = len(self.flatnessresiduals) # same as numpoints above\n\n # measure flatness residuals\n if self.flatness_half_band_spec is not None:\n fails = abs(self.flatnessresiduals) > self.flatness_half_band_spec\n fails = numpy.count_nonzero(fails)\n self.flatnesspassfract = (numpoints - fails) / float(numpoints)\n\n azcam.log(\n \"Flatness: %.01f%% of points are within flatness spec (%.03f)\"\n % (self.flatnesspassfract * 100.0, self.flatness_half_band_spec)\n )\n\n if self.flatnesspassfract >= self.flatness_fraction_limit:\n self.grade_flatness = \"PASS\"\n else:\n self.grade_flatness = \"FAIL\"\n\n azcam.log(\"Flatness grade is %s\" % self.grade_flatness)\n\n self.valid = 1\n\n # report on standard\n if self.standard_correct:\n zstandard_min = self.zstandard.min()\n zstandard_max = self.zstandard.max()\n # zstandard_mean=self.zstandard.mean()\n # zstandard_std=self.zstandard.std()\n s = \"Z-Standard (mm): Min=%.04f, Max=%.04f\" % (zstandard_min, zstandard_max)\n azcam.log(s)\n\n # make plots\n if self.create_plots:\n self.plot()\n\n # define dataset\n self.dataset = {\n \"data_file\": self.data_file,\n \"grade\": self.grade,\n \"itl_sn\": self.itl_sn,\n \"itl_id\": self.itl_id,\n \"grade_mounting\": self.grade_mounting,\n \"z_nom\": self.z_nom,\n \"z_spec\": self.z_spec,\n \"z_mean\": self.z_mean,\n \"z_median\": self.z_median,\n \"zsdev\": self.zsdev,\n \"z95\": self.z_mid,\n \"halfband\": self.z_halfband,\n \"quantile_percents\": list(self.quantile_percents),\n \"quantile_values\": list(self.quantile_values),\n \"grade_flatness\": self.grade_flatness,\n \"f_halfband\": self.f_halfband,\n \"fsdev\": self.fsdev,\n \"fmin\": self.fmin,\n \"fmax\": self.fmax,\n \"fmid\": self.fmid,\n \"flatness_quantile_values\": list(self.flatness_quantile_values),\n \"ColorZPlot\": self.ColorZPlot,\n \"WiskerPlot\": self.WiskerPlot,\n \"HistogramHeightPlot\": self.HistogramHeightPlot,\n \"HistogramFlatnessPlot\": self.HistogramFlatnessPlot,\n \"SurfacePlot\": self.SurfacePlot,\n }\n\n # write output files\n self.write_datafile()\n if self.create_reports:\n self.report()\n\n return\n\n def plot(self):\n \"\"\"\n Make metrology plots.\n \"\"\"\n\n zmin = self.z_mid - 0.010\n zmax = self.z_mid + 0.010\n\n # surface plot\n zz = list(map(float, self.z))\n grid_x, grid_y = numpy.mgrid[\n min(self.x) : max(self.x) : 100j, min(self.y) : max(self.y) : 100j\n ]\n grid_z = griddata((self.x, self.y), zz, (grid_x, grid_y), method=\"cubic\")\n\n fig = azcam.plot.plt.figure()\n fignum = fig.number\n azcam.plot.move_window(fignum)\n ax = azcam.plot.plt.axes(projection=\"3d\")\n ax.plot_surface(\n grid_x,\n grid_y,\n grid_z,\n rstride=1,\n cstride=1,\n cmap=pylab.get_cmap(\"coolwarm\"),\n linewidth=0,\n antialiased=False,\n alpha=0.9,\n )\n ax.zaxis.set_major_formatter(FormatStrFormatter(\"%.03f\"))\n ax.set_xlabel(\"X [mm]\")\n ax.set_ylabel(\"Y [mm]\")\n ax.set_zlabel(\"Z [mm]\")\n azcam.plot.plt.title(\"Surface Plot with Best Fit Plane\")\n ax.view_init(elev=25.0, azim=-55) # improve perspective\n\n # least squares plane plot on surface plot\n zz = list(map(float, self.zfit))\n grid_x, grid_y = numpy.mgrid[\n min(self.x) : max(self.x) : 100j, min(self.y) : max(self.y) : 100j\n ]\n grid_z = griddata((self.x, self.y), zz, (grid_x, grid_y), method=\"cubic\")\n ax.plot_surface(\n grid_x,\n grid_y,\n grid_z,\n rstride=1,\n cstride=1,\n cmap=pylab.get_cmap(\"gray\"),\n linewidth=0,\n antialiased=False,\n alpha=0.1,\n )\n ax.set_zlim(zmin, zmax)\n\n azcam.plot.save_figure(fignum, self.SurfacePlot)\n\n # height histogram plot\n fig = azcam.plot.plt.figure()\n fignum = fig.number\n azcam.plot.move_window(fignum)\n azcam.plot.plt.title(\"Height Histogram Plot\")\n azcam.plot.plt.hist(\n self.z,\n bins=\"auto\",\n facecolor=\"green\",\n alpha=0.8,\n histtype=\"stepfilled\",\n rwidth=0.8,\n )\n ax = azcam.plot.plt.gca()\n ax.set_xlabel(\"Z (mm)\")\n ax.xaxis.set_major_formatter(FormatStrFormatter(\"%.03f\"))\n ax.set_ylabel(\"Points\")\n ax.grid(1)\n\n # draw lines for interesting data\n if self.z_nom is not None:\n names = [\"Z_nom\", \"Z-mid\", \"Z-L\", \"Z-U\", \"Z-1\", \"Z-2\"]\n colors = [\"g\", \"b\", \"r\", \"r\", \"black\", \"black\"]\n xlines = [\n self.z_nom,\n self.z_mid,\n self.z_nom - self.height_half_band_spec,\n self.z_nom + self.height_half_band_spec,\n self.z_mid - self.z_halfband,\n self.z_mid + self.z_halfband,\n ]\n else:\n xlines = [self.z_mid]\n names = [\"Z-mid\"]\n colors = [\"b\", \"r\", \"r\"]\n for i, xline in enumerate(xlines):\n azcam.plot.plt.axvline(x=xline, linewidth=1, color=colors[i])\n ypos = 0.9 * ax.get_ylim()[1]\n if names[i] in [\"Z_nom\", \"Z-L\", \"Z-U\"]:\n ypos = 0.9 * ax.get_ylim()[1]\n else:\n ypos = 0.8 * ax.get_ylim()[1]\n ax.text(\n xline,\n ypos,\n names[i],\n bbox=dict(facecolor=\"red\", alpha=0.2),\n horizontalalignment=\"center\",\n rotation=\"horizontal\",\n fontsize=12,\n )\n\n azcam.plot.save_figure(fignum, self.HistogramHeightPlot)\n\n # flatness histogram plot\n fig = azcam.plot.plt.figure()\n fignum = fig.number\n azcam.plot.move_window(fignum)\n azcam.plot.plt.title(\"Flatness Histogram Plot\")\n azcam.plot.plt.hist(\n self.flatnessresiduals,\n bins=\"auto\",\n facecolor=\"green\",\n alpha=0.8,\n histtype=\"stepfilled\",\n rwidth=0.8,\n )\n ax = azcam.plot.plt.gca()\n ax.set_xlabel(\"Relative Z (mm)\")\n ax.xaxis.set_major_formatter(FormatStrFormatter(\"%.03f\"))\n ax.set_ylabel(\"Points\")\n ax.grid(1)\n\n # draw lines for interesting data\n xlines = []\n names = [\"FSpec-L\", \"FSpec-U\"]\n colors = [\"r\", \"r\"]\n for i, xline in enumerate(xlines):\n azcam.plot.plt.axvline(x=xline, linewidth=1, color=colors[i])\n ypos = 0.9 * ax.get_ylim()[1]\n ax.text(\n xline,\n ypos,\n names[i],\n bbox=dict(facecolor=\"red\", alpha=0.2),\n horizontalalignment=\"center\",\n rotation=\"horizontal\",\n fontsize=12,\n )\n\n azcam.plot.save_figure(fignum, self.HistogramFlatnessPlot)\n\n # color Z value plot\n fig = azcam.plot.plt.figure()\n fignum = fig.number\n azcam.plot.move_window(fignum)\n # ax = azcam.plot.plt.gca()\n ax = azcam.plot.plt.gca()\n ax.set_xlabel(\"X [mm]\")\n ax.set_ylabel(\"Y [mm]\")\n azcam.plot.plt.title(\"Color Z Plot\")\n # azcam.plot.plt.scatter(self.x, self.y, s=40, c=self.z, marker=\"s\", lw=0)\n N = int(len(self.z) ** 0.5)\n self.z2 = self.z.reshape(N, N)\n azcam.plot.plt.imshow(\n self.z2,\n extent=(\n numpy.amin(self.x),\n numpy.amax(self.x),\n numpy.amin(self.y),\n numpy.amax(self.y),\n ),\n interpolation=\"quadric\",\n cmap=\"viridis\",\n )\n azcam.plot.plt.axis(\"equal\")\n\n cb = azcam.plot.plt.colorbar(format=\"%.03f\")\n cb.set_label(\"Height [mm]\")\n labels = []\n if 0:\n for lab in self.z:\n labels.append(\"%.03f\" % float(lab))\n for label, x1, y1 in zip(labels, self.x, self.y):\n azcam.plot.plt.annotate(\n label,\n xy=(x1, y1),\n textcoords=\"data\",\n ha=\"center\",\n va=\"center\",\n bbox=dict(boxstyle=\"round,pad=0\", fc=\"yellow\", alpha=0.3),\n fontsize=6,\n )\n azcam.plot.save_figure(fignum, self.ColorZPlot)\n\n # box and whisker plot\n fig, ax = azcam.plot.plt.subplots()\n fignum = fig.number\n azcam.plot.move_window(fignum)\n ax.set_ylabel(\"Z [mm]\")\n ax.yaxis.set_major_formatter(FormatStrFormatter(\"%.03f\"))\n azcam.plot.plt.title(\"Box and Whisker Plot\")\n ax.boxplot(self.z, notch=True)\n azcam.plot.plt.xticks([])\n azcam.plot.save_figure(fignum, self.WiskerPlot)\n\n # show standard drift\n if self.standard_correct:\n fig = azcam.plot.plt.figure()\n fignum = fig.number\n azcam.plot.move_window(fignum)\n ax = azcam.plot.plt.gca()\n azcam.plot.plt.title(\"Z-Standard Drift\")\n ax.set_xlabel(\"Row\")\n ax.set_ylabel(\"Z [microns]\")\n ax.xaxis.set_major_locator(\n azcam.plot.plt.MaxNLocator(integer=True)\n ) # integer row numbers\n drift_1 = self.standard1_z - self.standard_zheight\n azcam.plot.plt.plot(drift_1)\n drift_2 = self.standard2_z - self.standard_zheight\n azcam.plot.plt.plot(drift_2)\n drift_3 = self.standard3_z - self.standard_zheight\n azcam.plot.plt.plot(drift_3)\n drift_4 = self.standard4_z - self.standard_zheight\n azcam.plot.plt.plot(drift_4)\n drift_mean = self.zstandard - self.standard_zheight\n azcam.plot.plt.plot(drift_mean)\n azcam.plot.save_figure(fignum, self.StandardPlot)\n\n azcam.plot.plt.show()\n\n return\n\n def report(self):\n \"\"\"\n Write report file.\n \"\"\"\n\n lines = []\n\n lines.append(\"# Metrology Analysis\")\n lines.append(\"\")\n\n s = f\"ITL Serial Number = sn{self.itl_sn}\"\n lines.append(s)\n lines.append(\"\")\n\n if self.itl_id != \"\":\n s = f\"Package ID = {self.itl_id}\"\n lines.append(s)\n lines.append(\"\")\n\n if self.stage_temp:\n s = f\"Stage temperature = {self.stage_temp} C\"\n lines.append(s)\n lines.append(\"\")\n\n if self.start_time:\n s = f\"Start time = {self.start_time} \"\n lines.append(s)\n lines.append(\"\")\n\n if self.date:\n s = f\"Acquisition date = {self.date} \"\n lines.append(s)\n lines.append(\"\")\n\n if self.grade_mounting != \"UNKNOWN\":\n s = f\"Mounting pin grade = {self.grade_mounting}\"\n lines.append(s)\n lines.append(\"\")\n\n if self.show_height_grade:\n s = f\"Height grade = {self.grade_height}\"\n lines.append(s)\n lines.append(\"\")\n\n s = f\"Flatness grade = {self.grade_flatness}\"\n lines.append(s)\n lines.append(\"\")\n\n if self.z_nom is not None:\n s = f\"Z-Nom = {self.z_nom:.03f} mm\"\n lines.append(s)\n lines.append(\"\")\n if self.show_height_grade:\n s = f\"Minimum Z-height Spec. {(self.height_fraction_limit * 100):.0f}% = {(self.z_nom - self.height_half_band_spec):.03f} mm\"\n lines.append(s)\n lines.append(\"\")\n s = f\"Maximum Z-height Spec. {(self.height_fraction_limit * 100):0f} = {(self.z_nom + self.height_half_band_spec):.03f} mm\"\n lines.append(s)\n lines.append(\"\")\n\n s = f\"Z Mean = {self.z_mean:.03f} mm\"\n lines.append(s)\n lines.append(\"\")\n s = f\"Z Median = {self.z_median:.03f} mm\"\n lines.append(s)\n lines.append(\"\")\n s = f\"Z-Mid = {self.z_mid:.03f} mm\"\n lines.append(s)\n lines.append(\"\")\n s = f\"Z Sdev = {self.zsdev:.03f} mm\"\n lines.append(s)\n lines.append(\"\")\n\n if self.flatness_half_band_spec is not None:\n s = \"Flatness Halfband Spec. (%.0f%%) = %.03f mm\" % (\n self.flatness_fraction_limit * 100,\n self.flatness_half_band_spec,\n )\n lines.append(s)\n lines.append(\"\")\n s = \"Flatness Halfband = %.03f mm\" % self.f_halfband\n lines.append(s)\n lines.append(\"\")\n s = \"Flatness Sdev = %.03f mm\" % self.fsdev\n lines.append(s)\n lines.append(\"\")\n s = \"Flatness Minimum = %.03f mm\" % self.fmin\n lines.append(s)\n lines.append(\"\")\n s = \"Flatness Maximum = %.03f mm\" % self.fmax\n lines.append(s)\n lines.append(\"\")\n\n lines.append(\"|Quantiles (%) |Height (mm)|Flatness (mm)|\")\n s = \"|:---|:---|:---|\"\n lines.append(s)\n for i, p in enumerate(self.quantile_percents):\n s = f\"|{p:.01f}|{self.quantile_values[i]:.04f}|{self.flatness_quantile_values[i]:.03f}|\"\n lines.append(s)\n lines.append(\"\")\n\n lines.append(\n f\"![Z-Height Histogram]({os.path.abspath(self.HistogramHeightPlot)}) \"\n )\n lines.append(\"\")\n lines.append(f\"Z-Height Histogram.\")\n lines.append(\"\")\n\n lines.append(\n f\"![Flatness Histogram]({os.path.abspath(self.HistogramFlatnessPlot)}) \"\n )\n lines.append(\"\")\n lines.append(f\"Flatness Histogram.\")\n lines.append(\"\")\n\n lines.append(f\"![Z-Height Histogram]({os.path.abspath(self.SurfacePlot)}) \")\n lines.append(\"\")\n lines.append(f\"Surface Plot.\")\n lines.append(\"\")\n\n lines.append(f\"![Z-Height Histogram]({os.path.abspath(self.ColorZPlot)}) \")\n lines.append(\"\")\n lines.append(f\"Color Z-Plot.\")\n lines.append(\"\")\n\n lines.append(f\"![Z-Height Histogram]({os.path.abspath(self.WiskerPlot)}) \")\n lines.append(\"\")\n lines.append(f\"BoxWisker Plot.\")\n lines.append(\"\")\n\n # Make report files\n report_file = f\"{self.report_file}_sn{self.itl_sn}\"\n self.write_report(report_file, lines)\n\n return\n\n def read_data(self, filename=\"\"):\n\n # get filename\n if filename == \"\":\n filename = azcam.utils.prompt(\"Enter data filename\", \"viewdata.txt\")\n\n azcam.log(\"Data file is %s\" % filename)\n\n # read file\n with open(filename, \"r\") as f:\n lines = f.readlines()\n\n x = []\n y = []\n z = []\n\n standard1_x = []\n standard1_y = []\n standard1_z = []\n\n standard2_x = []\n standard2_y = []\n standard2_z = []\n\n standard3_x = []\n standard3_y = []\n standard3_z = []\n\n standard4_x = []\n standard4_y = []\n standard4_z = []\n\n self.zstandard = []\n\n zoffset = 0.0\n\n # raw output datafile\n with open(\n f\"{os.path.splitext(os.path.basename(filename))[0]}_out.csv\", \"w\"\n ) as fout:\n\n for line in lines:\n line = line.strip()\n if line == \"\":\n continue\n\n tokens = shlex.split(line)\n\n # lineout = \",\".join([f\"{x}\" for x in tokens])\n # fout.write(f\"{lineout}\\n\")\n\n if tokens[0] == \"Engraved\" and tokens[1] == \"ID\":\n self.itl_id = tokens[3]\n azcam.log(\"ID is %s\" % self.itl_id)\n\n elif tokens[0] == \"DATE\":\n self.date = tokens[2]\n azcam.log(f\"Data acquisition date is {self.date}\")\n\n elif tokens[0] == \"START\" and tokens[1] == \"TIME\":\n self.start_time = tokens[3]\n azcam.log(f\"Start time is {self.start_time}\")\n\n elif tokens[0] == \"END\" and tokens[1] == \"TIME\":\n self.end_time = tokens[3]\n azcam.log(f\"End time is {self.start_time}\")\n\n elif tokens[0] == \"STAGE\" and tokens[1] == \"TEMP\":\n self.stage_temp = tokens[3]\n azcam.log(f\"Stage temperature is {self.stage_temp}\")\n\n elif tokens[0] == \"PACKAGE\" and tokens[1] == \"NUMBER\":\n self.itl_id = tokens[3]\n azcam.log(f\"Package ID is {self.itl_id}\")\n\n elif tokens[0] == \"CCD\" and tokens[1] == \"NUMBER\":\n self.itl_sn = int(tokens[3])\n azcam.log(f\"ITL serial number is {self.itl_sn}\\n\")\n\n elif tokens[0] == \"Row\":\n zstandard = [] # start fresh\n\n elif (\n tokens[0].startswith(\"Im_Point\")\n and tokens[1] == \"X\"\n and tokens[2] == \"Position\"\n and tokens[-1] == \"M\"\n ):\n xx = float(tokens[3])\n xx = int(xx)\n x.append(xx)\n elif (\n tokens[0].startswith(\"Im_Point\")\n and tokens[1] == \"Y\"\n and tokens[2] == \"Position\"\n and tokens[-1] == \"M\"\n ):\n yy = float(tokens[3])\n yy = int(yy)\n y.append(yy)\n elif (\n tokens[0].startswith(\"Im_Point\")\n and tokens[1] == \"Z\"\n and tokens[2] == \"Position\"\n and tokens[-1] == \"M\"\n ):\n if self.standard_correct:\n zz = float(tokens[3]) - zoffset\n else:\n zz = float(tokens[3])\n z.append(zz)\n\n elif (\n tokens[0] == \"STANDARD_1\"\n and tokens[1] == \"X\"\n and tokens[2] == \"Position\"\n and tokens[4] == \"M\"\n ):\n x1 = float(tokens[3])\n standard1_x.append(x1)\n elif (\n tokens[0] == \"STANDARD_1\"\n and tokens[1] == \"Y\"\n and tokens[2] == \"Position\"\n and tokens[4] == \"M\"\n ):\n x1 = float(tokens[3])\n standard1_y.append(x1)\n elif (\n tokens[0] == \"STANDARD_1\"\n and tokens[1] == \"Z\"\n and tokens[2] == \"Position\"\n and tokens[4] == \"M\"\n ):\n x1 = float(tokens[3])\n standard1_z.append(x1)\n zstandard.append(x1)\n\n elif (\n tokens[0] == \"STANDARD_2\"\n and tokens[1] == \"X\"\n and tokens[2] == \"Position\"\n and tokens[4] == \"M\"\n ):\n x1 = float(tokens[3])\n standard2_x.append(x1)\n elif (\n tokens[0] == \"STANDARD_2\"\n and tokens[1] == \"Y\"\n and tokens[2] == \"Position\"\n and tokens[4] == \"M\"\n ):\n x1 = float(tokens[3])\n standard2_y.append(x1)\n elif (\n tokens[0] == \"STANDARD_2\"\n and tokens[1] == \"Z\"\n and tokens[2] == \"Position\"\n and tokens[4] == \"M\"\n ):\n x1 = float(tokens[3])\n standard2_z.append(x1)\n zstandard.append(x1)\n\n elif (\n tokens[0] == \"STANDARD_3\"\n and tokens[1] == \"X\"\n and tokens[2] == \"Position\"\n and tokens[4] == \"M\"\n ):\n x1 = float(tokens[3])\n standard3_x.append(x1)\n elif (\n tokens[0] == \"STANDARD_3\"\n and tokens[1] == \"Y\"\n and tokens[2] == \"Position\"\n and tokens[4] == \"M\"\n ):\n x1 = float(tokens[3])\n standard3_y.append(x1)\n elif (\n tokens[0] == \"STANDARD_3\"\n and tokens[1] == \"Z\"\n and tokens[2] == \"Position\"\n and tokens[4] == \"M\"\n ):\n x1 = float(tokens[3])\n standard3_z.append(x1)\n zstandard.append(x1)\n\n elif (\n tokens[0] == \"STANDARD_4\"\n and tokens[1] == \"X\"\n and tokens[2] == \"Position\"\n and tokens[4] == \"M\"\n ):\n x1 = float(tokens[3])\n standard4_x.append(x1)\n elif (\n tokens[0] == \"STANDARD_4\"\n and tokens[1] == \"Y\"\n and tokens[2] == \"Position\"\n and tokens[4] == \"M\"\n ):\n x1 = float(tokens[3])\n standard4_y.append(x1)\n elif (\n tokens[0] == \"STANDARD_4\"\n and tokens[1] == \"Z\"\n and tokens[2] == \"Position\"\n and tokens[4] == \"M\"\n ):\n x1 = float(tokens[3])\n standard4_z.append(x1)\n zstandard.append(x1)\n\n zs_mean = numpy.array(zstandard).mean()\n self.zstandard.append(zs_mean)\n zoffset = (\n zs_mean - self.standard_zheight\n ) # this updates current value\n\n # write line to CSV file\n self.csv_tokens_raw = [\n \"Program:\",\n \"Results\",\n \"Company\",\n \"VmsVersion\",\n \"Engraved\",\n ]\n\n # output only\n if tokens[0] in self.csv_tokens_raw:\n lineout = \" \".join([f\"{x}\" for x in tokens])\n lineout = lineout.replace(\",\", \" \")\n fout.write(f\"{lineout},\\n\")\n\n elif (\n line.startswith(\"START TIME\")\n or line.startswith(\"PACKAGE NUMBER\")\n or line.startswith(\"END TIME\")\n or line.startswith(\"CCD NUMBER\")\n ):\n lineout = \" \".join([f\"{x}\" for x in tokens[:2]])\n lineout = lineout + \",\" + \",\".join([f\"{x}\" for x in tokens[3:]])\n fout.write(f\"{lineout},\\n\")\n\n elif line.startswith(\"DATE\"):\n lineout = \" \".join([f\"{x}\" for x in tokens[:1]])\n lineout = lineout + \",\" + \",\".join([f\"{x}\" for x in tokens[2:]])\n fout.write(f\"{lineout},\\n\")\n\n elif tokens[0] == \"Measurement\":\n line = line.replace(\"+ Tol\", \"Tol+\")\n line = line.replace(\"- Tol\", \"Tol-\")\n tokens = shlex.split(line)\n lineout = \",\".join([f\"{x}\" for x in tokens])\n fout.write(f\"{lineout},\\n\")\n\n # two token line header\n elif tokens[0] in [\n \"A1\",\n \"A2\",\n \"A3\",\n \"ANG1\",\n \"ANG2\",\n \"ANG3\",\n \"ANG4\",\n \"ANG5\",\n \"ANG6\",\n \"ANG7\",\n \"ANG8\",\n \"ANG9\",\n \"JIG_TOP\",\n \"FFLA_Foot\",\n \"Im_Plane\",\n ]:\n lineout = \" \".join([f\"{x}\" for x in tokens[:2]])\n lineout = lineout + \",\" + \",\".join([f\"{x}\" for x in tokens[2:]])\n fout.write(f\"{lineout},\\n\")\n\n # three token line header\n elif tokens[0] in [\n \"D1\",\n \"D2\",\n \"D3\",\n \"D4\",\n \"D5\",\n \"D6\",\n \"D7\",\n \"Invar1\",\n \"Invar2\",\n \"Invar3\",\n \"Invar4\",\n \"HOLE_DIST\",\n ]:\n lineout = \" \".join([f\"{x}\" for x in tokens[:3]])\n lineout = lineout + \",\" + \",\".join([f\"{x}\" for x in tokens[3:]])\n fout.write(f\"{lineout},\\n\")\n\n elif line.startswith(\"BC1 Diameter\"):\n lineout = \" \".join([f\"{x}\" for x in tokens[:2]])\n lineout = lineout + \",\" + \",\".join([f\"{x}\" for x in tokens[2:]])\n fout.write(f\"{lineout},\\n\")\n\n elif line.startswith(\"BC1\"):\n lineout = \" \".join([f\"{x}\" for x in tokens[:3]])\n lineout = lineout + \",\" + \",\".join([f\"{x}\" for x in tokens[3:]])\n fout.write(f\"{lineout},\\n\")\n\n elif line.startswith(\"Ref1 Diameter\") or line.startswith(\n \"Ref1 Roundness\"\n ):\n lineout = \" \".join([f\"{x}\" for x in tokens[:2]])\n lineout = lineout + \",\" + \",\".join([f\"{x}\" for x in tokens[2:]])\n fout.write(f\"{lineout},\\n\")\n\n elif line.startswith(\"Ref1\"):\n lineout = \" \".join([f\"{x}\" for x in tokens[:3]])\n lineout = lineout + \",\" + \",\".join([f\"{x}\" for x in tokens[3:]])\n fout.write(f\"{lineout},\\n\")\n\n elif line.startswith(\"Ref2 Diameter\") or line.startswith(\n \"Ref2 Roundness\"\n ):\n lineout = \" \".join([f\"{x}\" for x in tokens[:2]])\n lineout = lineout + \",\" + \",\".join([f\"{x}\" for x in tokens[2:]])\n fout.write(f\"{lineout},\\n\")\n\n elif line.startswith(\"Ref2\"):\n lineout = \" \".join([f\"{x}\" for x in tokens[:3]])\n lineout = lineout + \",\" + \",\".join([f\"{x}\" for x in tokens[3:]])\n fout.write(f\"{lineout},\\n\")\n\n elif line.startswith(\"Ref3 Diameter\") or line.startswith(\n \"Ref3 Roundness\"\n ):\n lineout = \" \".join([f\"{x}\" for x in tokens[:2]])\n lineout = lineout + \",\" + \",\".join([f\"{x}\" for x in tokens[2:]])\n fout.write(f\"{lineout},\\n\")\n\n elif line.startswith(\"Ref3\"):\n lineout = \" \".join([f\"{x}\" for x in tokens[:3]])\n lineout = lineout + \",\" + \",\".join([f\"{x}\" for x in tokens[3:]])\n fout.write(f\"{lineout},\\n\")\n\n elif line.startswith(\"Ref4 Diameter\") or line.startswith(\n \"Ref4 Roundness\"\n ):\n lineout = \" \".join([f\"{x}\" for x in tokens[:2]])\n lineout = lineout + \",\" + \",\".join([f\"{x}\" for x in tokens[2:]])\n fout.write(f\"{lineout},\\n\")\n\n elif line.startswith(\"Ref4\"):\n lineout = \" \".join([f\"{x}\" for x in tokens[:3]])\n lineout = lineout + \",\" + \",\".join([f\"{x}\" for x in tokens[3:]])\n fout.write(f\"{lineout},\\n\")\n\n elif line.startswith(\"Im_Point\"):\n lineout = \" \".join([f\"{x}\" for x in tokens[:3]])\n lineout = lineout + \",\" + \",\".join([f\"{x}\" for x in tokens[3:]])\n fout.write(f\"{lineout},\\n\")\n\n elif line.startswith(\"FFLA_MTG Diameter\"):\n lineout = \" \".join([f\"{x}\" for x in tokens[:2]])\n lineout = lineout + \",\" + \",\".join([f\"{x}\" for x in tokens[2:]])\n fout.write(f\"{lineout},\\n\")\n\n elif line.startswith(\"FFLA_MTG\"):\n lineout = \" \".join([f\"{x}\" for x in tokens[:3]])\n lineout = lineout + \",\" + \",\".join([f\"{x}\" for x in tokens[3:]])\n fout.write(f\"{lineout},\\n\")\n\n # make numpy arrays\n self.x = numpy.array(x)\n self.y = numpy.array(y)\n self.z = numpy.array(z)\n\n if self.standard_correct:\n self.standard1_x = numpy.array(standard1_x)\n self.standard1_y = numpy.array(standard1_y)\n self.standard1_z = numpy.array(standard1_z)\n\n self.standard2_x = numpy.array(standard2_x)\n self.standard2_y = numpy.array(standard2_y)\n self.standard2_z = numpy.array(standard2_z)\n\n self.standard3_x = numpy.array(standard3_x)\n self.standard3_y = numpy.array(standard3_y)\n self.standard3_z = numpy.array(standard3_z)\n\n self.standard4_x = numpy.array(standard4_x)\n self.standard4_y = numpy.array(standard4_y)\n self.standard4_z = numpy.array(standard4_z)\n\n self.zstandard = numpy.array(self.zstandard)\n\n return\n","sub_path":"azcam_testers/metrology.py","file_name":"metrology.py","file_ext":"py","file_size_in_byte":36923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"338893319","text":"from django.db import models\nfrom ..core.models import PostDefault, CategoryDefault, PostContent\nfrom ..core.models.managers import PostDefaultManager\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom django.dispatch import receiver\n\npost_type = 'news'\n\nclass NewsManager(PostDefaultManager):\n def get_queryset(self):\n return super(NewsManager, self).get_queryset().filter(post_type=post_type)\n\nclass News(PostDefault):\n objects = NewsManager()\n class Meta:\n proxy = True\n verbose_name = _('new')\n verbose_name_plural = _('news')\n\n\nclass NewsContent(PostContent):\n def get_queryset(self):\n return super(NewsContent, self).get_queryset().filter(post__post_type=post_type)\n\n class Meta:\n proxy = True\n verbose_name = _('new content')\n verbose_name_plural = _('news content')\n\n\nclass NewsCategory(CategoryDefault):\n class Meta:\n proxy = True\n verbose_name = _('category')\n verbose_name_plural = _('categories')\n\n@receiver(models.signals.pre_delete,sender=News)\ndef signal_delete_image_album(sender, **kwargs):\n instance = kwargs.get('instance')\n instance.post_imagem.delete()\n instance.post_imagem.delete_thumbnails()\n\n","sub_path":"apps/cms/news/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"423071895","text":"from django.contrib import admin\nfrom django.urls import path,include\nfrom . import views\n\napp_name='home'\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('adminlogin/', views.loginadmin,name='adminlogin'),\n path('signup/',views.signup,name='signup'),\n path('login/',views.login1,name='login'),\n path('cakes/',views.cakes,name='cakes'),\n path('logout/',views.logout,name='logout'),\n path('weight/',views.weight,name='weight'),\n path('orders/',views.orders,name='orders'),\n path('',views.landingpage,name='landingpage'),\n path('home/',views.landingpage2,name='landingpage2'),\n path('adminhome/',views.landingpage3,name='landingpage3'),\n path('orderconf/',views.orderconf,name='orderconf'),\n path('feedback/',views.feedback1,name='feedback'),\n path('profile/',views.profile,name='profile'),\n path('update/',views.update,name='update'),\n path('delete/',views.delete,name='delete'),\n path('vieworders/',views.view_pending_order,name='vieworders'),\n path('viewusers/',views.view_users,name='viewusers'),\n path('viewfeedbacks/',views.view_feedbacks,name='viewfeedbacks'),\n path('adminlogout/', views.logoutadmin,name='adminlogout'),\n]\n","sub_path":"home/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1197,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"496996698","text":"class Node:\n def __init__(self, value, nextval=None):\n self.value = value\n self.nextval = nextval\nclass linkedlist:\n def __init__(self, head=None):\n self.head = head\n def Insertion(self, value):\n node = Node(value)\n if self.head is None:\n self.head = node\n return\n currentNode = self.head\n while True:\n if currentNode.nextval is None:\n currentNode.nextval = node\n break\n currentNode = currentNode.nextval\n def Printlist(self):\n currentNode = self.head\n while currentNode is not None:\n print(currentNode.value)\n currentNode = currentNode.nextval\n print(\"Null\")\n\nll = linkedlist()\nll.Insertion(\"3\")\nll.Insertion(\"5\")\nll.Insertion(\"7\")\nll.Printlist()\n\n\n","sub_path":"practice/Insertion.py","file_name":"Insertion.py","file_ext":"py","file_size_in_byte":828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"99091733","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('reviews', '0060_merge'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='qualityanswer',\n name='question',\n field=models.ForeignKey(default=1, to='reviews.QualityQuestion'),\n preserve_default=False,\n ),\n migrations.AlterField(\n model_name='commentseen',\n name='comment',\n field=models.ForeignKey(related_name='comment_seencomments', to='reviews.VisitorComment'),\n ),\n migrations.AlterField(\n model_name='commentseen',\n name='review',\n field=models.ForeignKey(related_name='review_seencomments', to='reviews.Review'),\n ),\n migrations.AlterField(\n model_name='commentseen',\n name='user',\n field=models.ForeignKey(related_name='user_seencomments', to=settings.AUTH_USER_MODEL),\n ),\n migrations.AlterField(\n model_name='keyword',\n name='related_to',\n field=models.CharField(blank=True, max_length=1, choices=[('P', 'Popula\\xe7\\xe3o'), ('I', 'Interven\\xe7\\xe3o'), ('C', 'Compara\\xe7\\xe3o'), ('O', 'Resultados'), ('S', 'Study Type')]),\n ),\n ]\n","sub_path":"parsifal/reviews/migrations/0061_auto_20190906_0010.py","file_name":"0061_auto_20190906_0010.py","file_ext":"py","file_size_in_byte":1411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"310301670","text":"#!/usr/bin/env python\n# encoding: utf-8\n\nfrom django.http import HttpResponse, HttpResponseRedirect, StreamingHttpResponse\nfrom django.shortcuts import render_to_response\nfrom django.template import RequestContext\nfrom api.models import contacts,cgroups,c_group\nimport md5\nimport datetime\nimport StringIO\n#from validate import create_validate_code\n#from sendvaliemail import sendmail\nimport random\nimport os\nimport time\nimport shutil\nfrom decorator.response import validate_login_status\n\n\n@validate_login_status\ndef contact(request):\n indextitcon = \"联系人\"\n type = \"contact\"\n UserName = request.session.get(\"UserName\")\n contact_list = contacts.objects.filter(Uid=request.session.get(\"Uid\")).order_by(\"CreateTime\")\n return render_to_response('contact.html',locals())\n \n #return render_to_response('instance.html',locals(),context_instance=RequestContext(request))\n #return render_to_response('instance.html',{\"indextitcon\":indextitcon,\"type\":type,\"data_center\":data_center})\n@validate_login_status\ndef contactgroup(request):\n indextitcon = \"联系组\"\n type = \"contactgroup\"\n UserName = request.session.get(\"UserName\")\n cgroups_list = cgroups.objects.filter(Uid=request.session.get(\"Uid\")).order_by(\"CreateTime\")\n return render_to_response('contactgroup.html',locals())\n\ndef cgdetails(request,details):\n #查询当前联系人id 信息\n c_g_details = cgroups.objects.filter(uuid=request.GET.get(\"cgroupsId\")).first()\n indextitcon = \"联系组详情\"\n type = \"contactgroup\"\n UserName = request.session.get(\"UserName\")\n\n c_g_list = c_group.objects.filter(C_Groupid=c_g_details.uuid)\n clist = []\n for cg_details in c_g_list:\n c_details = contacts.objects.filter(uuid=cg_details.Contactid).first()\n cdict = {\n \"uuid\" : c_details.uuid,\n \"Name\" : c_details.Name,\n \"Phone\" : c_details.Phone,\n \"Email\" : c_details.Email,\n \"CreateTime\" : c_details.CreateTime\n }\n clist.append(cdict)\n return render_to_response('cgdetails.html',locals())\n \n","sub_path":"contact/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2085,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"377900752","text":"from selenium import webdriver\nfrom bs4 import BeautifulSoup\nimport time\nimport pandas as pd\nimport config\nimport json\nimport io\nfrom datetime import datetime\nfrom sqlalchemy import create_engine, Column, MetaData, Table, DateTime, String, Integer, ForeignKey, BIGINT,TEXT,FLOAT,inspect, event\n\n#https://cornershopapp.com/api/v2/branches/9\n\noptions = webdriver.ChromeOptions()\noptions.add_argument('--ignore-certificate-errors')\noptions.add_argument(\"--test-type\")\noptions.add_argument('start-maximized')\ndriver = webdriver.Chrome('./chromedriver.exe', options=options)\n\nengine = create_engine('postgresql://'+config.DATABASE_CONFIG['user']+':'+config.DATABASE_CONFIG['password']+'@'+config.DATABASE_CONFIG['host']+':'+config.DATABASE_CONFIG['port']+'/'+config.DATABASE_CONFIG['dbname']\n , connect_args={'options': '-csearch_path={}'.format(config.DATABASE_CONFIG['schema'])})\n\nconnection = engine.connect()\nresult = []\n#18539 ultimo local hasta el 14/12/2020\n#errores con BRazil dps del numero 6467 por eso se excluyen revisar\nfor i in range(16395,20000):\n #print (i)\n api_url = 'https://cornershopapp.com/api/v2/branches/'+str(i)\n print(api_url)\n driver.get(api_url)\n time.sleep(1)\n \n content_api = driver.page_source.encode(\"utf-8\").strip()\n soup_api = BeautifulSoup(content_api, \"html.parser\")\n try:\n api_content = soup_api.find('pre').text\n except :\n print('error de api')\n \n data = json.loads(api_content)\n\n try:\n store_id = data['branch']['id']\n store_name = data['branch']['name']\n adress = data['branch']['address']\n organization_id = data['branch']['store_id']\n organization_name = data['branch']['store_name'] \n date = datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n country = data['branch']['country']\n \n info = {'store_id': store_id, 'organization_id': organization_id, 'store_name': store_name, 'organization_name': organization_name, 'adress': adress, 'country': country}\n if country == 'CL':\n result.append(info)\n except:\n print('error')\n\ndf = pd.DataFrame(result)\n \ndf.head(0).to_sql(config.CORNERSHOP_GLOBAL['table_lz'], engine, if_exists='append',index=False)\n \nconn = engine.raw_connection()\ncur = conn.cursor()\noutput = io.StringIO()\ndf.to_csv(output, sep='\\t', header=False, index=False)\noutput.seek(0)\ncur.copy_from(output, config.CORNERSHOP_GLOBAL['table_lz'], null=\"\") # null values become ''\nconn.commit()\nconnection.close()\ndriver.close()","sub_path":"Cornershop/cornershop_global.py","file_name":"cornershop_global.py","file_ext":"py","file_size_in_byte":2544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"177083589","text":"import optparse\nimport pickle\nimport fasttext\nimport psycopg2\nimport json\nimport connectDB\n\nfrom pythainlp.tokenize import word_tokenize\nfrom pythainlp.keywords import find_keyword\nfrom gensim.models import FastText\n\nimport requests\n\nmodel_file = 'model.vec'\ntraining_file = 'train_data_2.fixed.csv'\ntmp_training_file = 'tmp_train_2.txt'\ntest_out_file = 'test_out.csv'\nintent_hash = {}\nclassifier = None\n\n\ndef run_query(): # A simple function to use requests.post to make the API call. Note the json= section.\n\n url = \"http://103.245.164.59:3004/api_management\"\n\n payload = \"{\\\"query\\\":\\\"query getWordIntent($word:String!){\\\\n getWordIntent(word:$word){\\\\n id\\\\n word\\\\n intent\\\\n }\\\\n}\\\",\\\"variables\\\":{\\\"word\\\":\\\"TONG\\\"}}\"\n headers = {\n 'Content-Type': 'application/json'\n }\n\n response = requests.request(\"POST\", url, headers=headers, data = payload)\n result = response.text.encode('utf8')\n print(result)\n # return result\n\ndef initiate():\n global model_file\n global classifier\n global training_file\n global tmp_training_file\n print(\"initiating boot sequence...\")\n # load_classifier()\n rows = load_expression_json()\n load_classifier(rows)\n # print(rows)\n print(\"ready\")\n\n\ndef tokenize(m):\n wordignore = [' ', '?', '.', '*', ';', '-']\n tokens = word_tokenize(m, engine='newmm')\n # kws = find_keyword(tokens,lentext=1)\n # kws = list(kws.keys())\n kws = [w for w in tokens if w not in wordignore]\n if(len(kws) == 0):\n kws = [m]\n return kws\n\n\ndef load_classifier(element):\n global classifier\n global tmp_training_file\n epoch = 10\n # write training file\n # rows = connectDB.db_select(\n # \"select message, keywords, intent from train_set;\", [])\n print(element)\n rows = element\n # print('rows :',rows)\n count = 0\n with open(tmp_training_file, \"a+\") as t_f:\n for e in range(0, epoch):\n for row in rows:\n l = '__label__' + \\\n row[2].strip().replace(\" \", \"_;\") + \\\n ' __label__' + row[1].strip() + \"\\n\"\n t_f.write(l)\n count += 1\n # reload classifier\n if count > 0:\n #classifier = fasttext.train_supervised(tmp_training_file, 'class', label_prefix='__label__', pretrained_vectors='model.vec')\n classifier = fasttext.train_supervised(input=\"tmp_train_2.txt\", lr=1 )\n classifier.save_model(\"model.bin\")\n #return count\n\ndef load_expression_json():\n fname = \"train_set.data.json\"\n data = json.load(open(fname))\n count = 0\n rows = []\n \n # rows.extend([5])\n\n for d in data['data']:\n m = d['text'].strip()\n i = d['entities'][0]['value'].strip().strip('\\\"')\n print(\"m \" + m + \" i \" + i)\n kws = tokenize(m)\n count += 1\n # connectDB.db_exec(\"insert into train_set (message, keywords, intent) values (%s, %s, %s)\",\n # (m, ' '.join(kws), i))\n rows.extend([(m, ' '.join(kws), i)])\n # print(rows)\n # load_classifier()\n # return(\"loaded \" + str(count) + \" messages.\")\n return rows\n\n\ndef load_word_intent():\n fname = \"word_intent.data.csv\"\n f = open(fname, \"r\")\n lines = f.read()\n lines = lines.split(\"\\n\")\n count = 0\n for line in lines:\n line = line.strip()\n l = line.split(',')\n w = l[0]\n if len(l) > 1 and l[1] != '':\n i = l[1]\n print(w + \" \" + i)\n count += 1\n connectDB.db_exec(\n \"insert into word_intent (word, intent) values (%s, %s)\", (w, i))\n else:\n print(w + \" no intent\")\n return(\"loaded \" + str(count) + \" words.\")\n\n\ndef train(m, i):\n global classifier\n global tmp_training_file\n tokens = word_tokenize(m, engine='newmm')\n kws = tokenize(m)\n i = i.strip().strip('\\\"')\n # store to db\n # connectDB.db_exec(\"insert into train_set (message, keywords, intent) values (%s, %s, %s)\",\n # (m, ' '.join(kws), i))\n rows = [(m, ' '.join(kws), i)]\n load_classifier(rows)\n\n\ndef test(m, multi):\n global classifier\n kws = tokenize(m)\n fasttext.FastText.eprint = lambda x: None\n classifier = fasttext.load_model(\"model.bin\")\n c = classifier.predict([\" \".join(kws)] , k= 1 )\n #c = classifier.predict([\" \".join(kws)], k=-1, threshold=0.5)\n #c = classifier.predict_proba([\" \".join(kws)], k=1)\n # select maximum confident\n max_conf = 0\n max_int = ''\n max_idx = 0\n idx = 0\n words = []\n intend = c[0][0][0].replace(\"__label__\", \"\")\n intend = intend.replace(\"_;\", \" \")\n conf = c[1][0][0]\n if conf > max_conf:\n max_int = intend\n max_conf = conf\n max_idx = idx\n\n print(max_idx)\n\n if multi is True:\n for key in kws:\n rows = searchWord(key)\n words = defindWords(words, rows, key) if rows != \"empty\" else words\n\n result = {\n 'found': \" \".join(kws),\n 'intent': max_int,\n 'confidence': max_conf,\n 'words': words\n }\n\n if multi is False:\n result.pop('words')\n\n # print(\"query: \" + m + \" found: \" + \" \".join(kws) + \" word: \" + kws[max_idx] + \" intent: \" + max_int + \" confidence: \" + str(max_conf))\n return result\n\n\ndef defindWords(words, rows, key):\n for row in rows:\n w_i = row[0].strip()\n if w_i != 'stopword' and w_i != 'noword':\n words.append({\n 'message': key,\n 'class': w_i\n })\n return words\n\n\ndef searchWord(m):\n rows = connectDB.db_select(\n \"select distinct intent from word_intent where word = %s \", [m])\n if len(rows) > 0:\n return(rows)\n else:\n return(\"empty\")\n\n\ndef delete(m, i):\n if i != \"\":\n connectDB.db_exec(\n \"delete from train_set where message = %s and intent = %s \", [m, i])\n else:\n connectDB.db_exec(\"delete from train_set where message = %s \", [m])\n load_classifier()\n\n\ndef retest():\n global classifier\n global tmp_training_file\n print(\"testing trained data...\")\n # write training file\n rows = connectDB.db_select(\n \"select message, keywords, intent from train_set;\", [])\n count = 0\n total = 0\n for row in rows:\n m = row[0].strip()\n i = row[2].strip()\n res = test(m, False)\n r_i = res['intent']\n r_f = res['found']\n r_c = res['confidence']\n print(\"m: '\" + m + \"' i: '\" + i + \"' r: '\" +\n r_i + \"' f: '\" + r_f + \"' c: \" + str(r_c))\n total += 1\n if i == r_i:\n count += 1\n else:\n print(\"wrong! expexted: \" + i + \" got: \" + r_i)\n print(\"-------------------------------\")\n rate = (count/total)*100\n print(\"result, total: \" + str(total) + \" correct: \" +\n str(count) + \" percent: \" + str(rate))\n result = {\n 'total': str(total),\n 'correct': str(count),\n 'percent': str(rate)\n }\n return result\n\n\ndef upsertWordIntent(r, w, i):\n if(r == \"empty\"):\n connectDB.db_exec(\n \"insert into word_intent (word, intent) values( %s, %s);\", [w, i])\n else:\n connectDB.db_exec(\n \"update word_intent set intent = %s where word = %s;\", [i, w])\n return \"added\"\n","sub_path":"utilityMethod.py","file_name":"utilityMethod.py","file_ext":"py","file_size_in_byte":7283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"648064442","text":"import datetime\nfrom database import DatabaseConnection\n\nconnection = DatabaseConnection(\"ManagerStore\")\nresponse = []\n\nclass SaleModel(object):\n def __init__(self, current_user, products_id, quantity, unit_price, total_price):\n \"\"\"\n This constructor initialises product\n :param products_id: \n :param quantity:\n :param unit_price:\n :param total_price:\n \"\"\"\n self.current_user = current_user\n self.products_id = products_id\n self.quantity = quantity\n self.unit_price = unit_price\n self.total_price = total_price\n self.date_created = datetime.datetime.utcnow()\n self.date_modified = datetime.datetime.utcnow()\n def create_sales(self):\n \"\"\"\n Adds product as an object to list\n :return: the product that has just been added\n \"\"\"\n try:\n query_to_add_products = \"INSERT INTO sales(users_id, products_id, quantity, unit_price, total_price, date_created,date_modified) VALUES(%s,%s,%s,%s,%s,%s,%s)\"\n connection.cursor.execute(query_to_add_products,(self.current_user, self.products_id, self.quantity, self.unit_price, self.total_price, self.date_created, self.date_modified))\n query_to_search_category = \"SELECT * FROM sales WHERE products_id=%s\"\n connection.cursor.execute(query_to_search_category, [self.products_id])\n added_sale = connection.cursor.fetchone()\n result = {\n 'id': added_sale[0],\n 'created by': added_sale[1],\n 'product': added_sale[2],\n 'quantity':added_sale[3],\n 'unit_price':added_sale[4],\n 'total_price':added_sale[5],\n 'Date Created': added_sale[6]\n }\n return result\n \n except Exception as exc:\n print(exc)\n @classmethod \n def get_sales(cls):\n \"\"\"\n This method gets all sales\n :return: all sales in the store\n \"\"\"\n response =[]\n query_to_get_all_sales = 'SELECT * FROM sales'\n connection.cursor.execute(query_to_get_all_sales)\n rows = connection.cursor.fetchall()\n if not rows:\n return \"Sales not available\"\n for row in rows:\n response.append({\n 'id': row[0],\n 'created by': row[1],\n 'product': row[2],\n 'quantity':row[3],\n 'unit_price':row[4],\n 'total_price':row[5],\n 'Date Created': row[6]\n })\n return response","sub_path":"app/models/sales_model.py","file_name":"sales_model.py","file_ext":"py","file_size_in_byte":2784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"90150297","text":"import pygame as py\nimport random as r # this is to enable the y parameter of the winning square's position to be randomised\nfrom Play import level\n\n\"\"\"\nCreating globals\n\"\"\"\n\nwidth = 700\nheight = 500\nscreen = py.display.set_mode((700, 500))\nlevel_ = level # to distinguish between the preset level and manipulated level\npressed_keys = py.key.get_pressed()\nBlack = (0, 0, 0)\nn = 0 # place holder for the text produced\n\n\n\"\"\"\nProducing the winning square\n\"\"\"\n\n\nclass WinSquare(py.sprite.Sprite):\n global level_, width, height\n\n def __init__(self):\n super().__init__()\n self.surf = py.Surface((20, 100))\n self.rect = self.surf.get_rect()\n self.rect.x = width-20\n self.rect.y = r.randint(150, 250)\n\n def coll(self, player, level): # Collide function\n if self.rect.colliderect(player):\n font = py.font.SysFont(n, 76)\n img = font.render('You Win!!', True, Black)\n text = img.get_rect(center=(width / 2, height/2))\n screen.blit(img, text)\n font_1 = py.font.SysFont(n, 36)\n img_1 = font_1.render('Press number \"1\" key to go to level ' + str(level_+1), True, (255, 0, 0))\n text_1 = img_1.get_rect(center=((width/2), (height/2) + 100))\n screen.blit(img_1, text_1)\n\n def movement(self): # movement to either side\n if self.rect.x is (width-20) and 150 > self.rect.y:\n self.rect.y += 1\n elif self.rect.x is (width-20) and 250 < self.rect.y:\n self.rect.y -= 1\n else:\n pass\n return self.rect.y\n\n def draw(self, surface): # Drawing the winsquare to the screen \n surface.blit(self.surf, self.rect)\n","sub_path":"Scripts/win.py","file_name":"win.py","file_ext":"py","file_size_in_byte":1699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"645671853","text":"from utils import preprocess_image, postprocess_boxes\r\nfrom utils.draw_boxes import draw_boxes\r\nfrom tensorflow.keras.models import load_model\r\nimport tensorflow as tf\r\n\r\nimport os\r\nimport cv2\r\nimport time\r\nimport json\r\nimport numpy as np\r\n\r\ndef infer(image:str,phi:int=0,saved_model:str='./savedmodel',classes:dict=None,score_threshold:float=0.3,nms_threshold:float=0.5,device:str='gpu'):\r\n\tif device!='gpu':\r\n\t\tos.environ['CUDA_VISIBLE_DEVICES'] = '-1' #Using CPU\r\n\telse:\r\n\t\tos.environ['CUDA_VISIBLE_DEVICES'] = '0' #Using GPU\r\n\r\n\t#For COCO dataset\r\n\tif classes==None:\r\n\t\tclasses = {value['id'] - 1: value['name'] for value in json.load(open('coco_90.json', 'r')).values()}\r\n\r\n\r\n\t#select resolution according to architecture\r\n\timage_sizes = (512, 640, 768, 896, 1024, 1280, 1408)\r\n\timage_size = image_sizes[phi]\r\n\r\n\t#To get different color for each class\r\n\tnum_classes = len(classes.values())\r\n\tcolors = [np.random.randint(0, 256, 3).tolist() for _ in range(num_classes)]\r\n\r\n\t#load the model\r\n\tmodel = load_model(saved_model)\r\n\r\n\t#load and preprocess image\r\n\timg = cv2.imread(image)\r\n\tsrc_image = img.copy()\r\n\timg = cv2.cvtColor(img,cv2.COLOR_BGR2RGB)\r\n\th, w = img.shape[:2]\r\n\timg, scale = preprocess_image(img, image_size=image_size)\r\n\r\n\t#detect and post process\r\n\tstart = time.time()\r\n\tboxes, scores, labels = model.predict_on_batch([np.expand_dims(img, axis=0)])\r\n\tboxes, scores, labels = np.squeeze(boxes), np.squeeze(scores), np.squeeze(labels)\r\n\tend = time.time()\r\n\tboxes = postprocess_boxes(boxes=boxes, scale=scale, height=h, width=w)\r\n\r\n\t# print(f'infer time: {end-start}, fps: {1/(end-start)}')\r\n\r\n\t# select indices which have a score above the threshold\r\n\tindices = np.where(scores[:] > score_threshold)[0]\r\n\t# indices = tf.image.non_max_suppression(boxes,scores,max_output_size=[100],iou_threshold = nms_threshold,score_threshold = score_threshold)\r\n\tboxes = boxes[indices]\r\n\tlabels = labels[indices]\r\n\r\n\t#draw boxes on the original image\r\n\tdraw_boxes(src_image, boxes, scores, labels, colors, classes)\r\n\r\n\treturn src_image","sub_path":"inference_new.py","file_name":"inference_new.py","file_ext":"py","file_size_in_byte":2037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"110308592","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\nfrom collections import defaultdict\nimport math\nimport logging\nimport re\nimport dedupe.mekano as mk\n\nwords = re.compile(\"[\\w']+\")\n\nclass TfidfPredicate(float):\n def __new__(self, threshold):\n return float.__new__(self, threshold)\n\n def __init__(self, threshold):\n self.__name__ = 'TF-IDF:' + str(threshold)\n\n def __repr__(self) :\n return self.__name__\n\n\ndef weightVectors(inverted_index, token_vectors, stop_word_threshold) :\n\n\n for field in token_vectors :\n singletons = set([])\n stop_words = set([])\n for atom in inverted_index[field].atoms() :\n df = inverted_index[field].getDF(atom)\n if df < 2 :\n singletons.add(atom)\n elif df > stop_word_threshold :\n stop_words.add(atom)\n \n \n\n wv = mk.WeightVectors(inverted_index[field])\n ii = defaultdict(set)\n for record_id, vector in token_vectors[field].iteritems() :\n w_vector = wv[vector]\n w_vector.name = vector.name\n for atom in w_vector :\n if atom in singletons or atom in stop_words :\n del w_vector[atom]\n token_vectors[field][record_id] = w_vector\n for token in w_vector :\n ii[token].add(w_vector)\n \n \n\n inverted_index[field] = ii\n\n return token_vectors, inverted_index\n\ndef invertIndex(data, tfidf_fields, df_index=None):\n\n tokenfactory = mk.AtomFactory(\"tokens\") \n inverted_index = {}\n\n for field in tfidf_fields :\n inverted_index[field] = mk.InvertedIndex()\n\n token_vector = defaultdict(dict)\n\n for record_id, record in data:\n for field in tfidf_fields:\n tokens = words.findall(record[field].lower())\n av = mk.AtomVector(name=record_id)\n for token in tokens :\n av[tokenfactory[token]] += 1\n inverted_index[field].add(av)\n\n token_vector[field][record_id] = av\n\n num_docs = inverted_index.values()[0].getN()\n\n stop_word_threshold = max(num_docs * 0.025, 500)\n logging.info('Stop word threshold: %(stop_thresh)d',\n {'stop_thresh' :stop_word_threshold})\n\n\n token_vectors, inverted_index = weightVectors(inverted_index, \n token_vector,\n stop_word_threshold)\n \n\n return (inverted_index, token_vector)\n\n#@profile\ndef makeCanopy(inverted_index, token_vector, threshold) :\n canopies = defaultdict(lambda:None)\n seen = set([])\n corpus_ids = set(token_vector.keys())\n\n\n while corpus_ids:\n center_id = corpus_ids.pop()\n canopies[center_id] = center_id\n center_vector = token_vector[center_id]\n \n seen.add(center_vector)\n\n if not center_vector :\n continue\n \n candidates = set.union(*(inverted_index[token] \n for token in center_vector))\n\n candidates = candidates - seen\n\n for candidate_vector in candidates :\n\n similarity = candidate_vector * center_vector \n\n if similarity > threshold :\n candidate_id = candidate_vector.name\n canopies[candidate_id] = center_id\n seen.add(candidate_vector)\n corpus_ids.remove(candidate_id)\n\n return canopies\n\n \n\ndef createCanopies(field,\n threshold,\n token_vector,\n inverted_index):\n \"\"\"\n A function that returns a field value of a record with a\n particular doc_id, doc_id is the only argument that must be\n accepted by select_function\n \"\"\"\n\n field_inverted_index = inverted_index[field]\n token_vectors = token_vector[field]\n\n return makeCanopy(field_inverted_index, token_vectors, threshold)\n","sub_path":"dedupe/tfidf.py","file_name":"tfidf.py","file_ext":"py","file_size_in_byte":3933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"37653477","text":"import argparse\r\nimport os\r\n\r\n# program takes in a directory of files and renames them to a standardized format\r\n# and numbers them sequentially. Directory is passed in through command line argument,\r\n# file extension to match, and so is desired prefix. Optionally, the user may specify\r\n# a start index for the numbering.\r\nparser = argparse.ArgumentParser(description='Standardizes filenames')\r\nparser.add_argument('input_dir', type=str, help='Input dir for files')\r\nparser.add_argument('extension_to_match', type=str, help='Filename extension to match')\r\nparser.add_argument('new_prefix', type=str, help='Syntax of filename after rename')\r\nparser.add_argument('--start_index', type=int, default=0, help='Optional starting index. Default is 0')\r\nargs = parser.parse_args()\r\n\r\n# for every file found in directory name passed in through command line arg, rename\r\nfor filename in os.listdir(args.input_dir):\r\n\ttry:\r\n\t\tif filename.endswith(args.extension_to_match):\r\n\t\t\tdst = args.new_prefix + str(args.start_index) + args.extension_to_match\r\n\t\t\tsrc = args.input_dir + filename \r\n\t\t\tdst = args.input_dir + dst \r\n\r\n# rename() function will rename all the files starting with the passed index and moving\r\n# forwards.\r\n\t\t\tos.rename(src, dst) \r\n\t\t\targs.start_index += 1\r\n# Permission error will catch errors related to access rights and Windows error will\r\n# catch errors related to duplicate file names being found.\r\n\texcept(PermissionError):\r\n\t\tprint(filename + \" may not be accessed\")\r\n\texcept(WindowsError):\r\n\t\tprint(filename + \" already exists.\")\r\n\r\n","sub_path":"FileRenamer.py","file_name":"FileRenamer.py","file_ext":"py","file_size_in_byte":1549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"151192366","text":"# @@@ START COPYRIGHT @@@\n#\n# (C) Copyright 2014 Hewlett-Packard Development Company, L.P.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# @@@ END COPYRIGHT @@@\n\nfrom ...lib import hpdci\nfrom ...lib import gvars\nimport defs\n\n_testmgr = None\n_testlist = []\n_dci = None\n\ndef _init(hptestmgr, testlist=[]):\n global _testmgr\n global _testlist\n global _dci\n \n _testmgr = hptestmgr\n _testlist = testlist\n # default hpdci was created using 'SQL' as the proc name.\n # this default instance shows 'SQL>' as the prompt in the log file.\n _dci = _testmgr.get_default_dci_proc()\n \ndef test001(desc=\"\"\"\"\"\"):\n global _testmgr\n global _testlist\n global _dci\n if not _testmgr.testcase_begin(_testlist): return\n \n #Convertimestamp function\n stmt = \"\"\"Select converttimestamp(JULIANTIMESTAMP (hire_date)) from emp group by converttimestamp(JULIANTIMESTAMP (hire_date));\"\"\"\n output = _dci.cmdexec(stmt)\n _dci.expect_file(output, defs.test_dir + \"\"\"/a001exp\"\"\", \"\"\"a01s1\"\"\")\n \n #current function\n stmt = \"\"\"Select CURRENT(5) from emp group by CURRENT(5);\"\"\"\n output = _dci.cmdexec(stmt)\n _dci.expect_selected_msg(output, 1)\n \n #current_date function\n stmt = \"\"\"select current_date from emp group by current_date;\"\"\"\n output = _dci.cmdexec(stmt)\n _dci.expect_selected_msg(output, 1)\n \n #current_time function\n stmt = \"\"\"select current_time from emp group by current_time;\"\"\"\n output = _dci.cmdexec(stmt)\n _dci.expect_selected_msg(output, 1)\n \n #current_timestamp function\n stmt = \"\"\"select current_timestamp(4) from emp group by current_timestamp(4);\"\"\"\n output = _dci.cmdexec(stmt)\n _dci.expect_selected_msg(output, 1)\n \n _testmgr.testcase_end(desc)\n\ndef test002(desc=\"\"\"\"\"\"):\n global _testmgr\n global _testlist\n global _dci\n if not _testmgr.testcase_begin(_testlist): return\n \n #dateformat function\n stmt = \"\"\"select dateformat(hire_date, USA) from emp group by dateformat(hire_date, USA);\"\"\"\n output = _dci.cmdexec(stmt)\n _dci.expect_file(output, defs.test_dir + \"\"\"/a001exp\"\"\", \"\"\"a02s1\"\"\")\n \n #day function\n stmt = \"\"\"select day(hire_date) from emp group by day(hire_date);\"\"\"\n output = _dci.cmdexec(stmt)\n _dci.expect_file(output, defs.test_dir + \"\"\"/a001exp\"\"\", \"\"\"a02s2\"\"\")\n \n #dayname function\n stmt = \"\"\"select dayname(hire_date) from emp group by dayname(hire_date);\"\"\"\n output = _dci.cmdexec(stmt)\n _dci.expect_file(output, defs.test_dir + \"\"\"/a001exp\"\"\", \"\"\"a02s3\"\"\")\n \n #dayofmonth function\n stmt = \"\"\"select dayofmonth(hire_date) from emp group by dayofmonth(hire_date);\"\"\"\n output = _dci.cmdexec(stmt)\n _dci.expect_file(output, defs.test_dir + \"\"\"/a001exp\"\"\", \"\"\"a02s4\"\"\")\n \n #dayofweek function\n stmt = \"\"\"select dayofweek(hire_date) from emp group by dayofweek(hire_date);\"\"\"\n output = _dci.cmdexec(stmt)\n _dci.expect_file(output, defs.test_dir + \"\"\"/a001exp\"\"\", \"\"\"a02s5\"\"\")\n \n #dayofyear function\n stmt = \"\"\"select dayofyear(hire_date) from emp group by dayofyear(hire_date);\"\"\"\n output = _dci.cmdexec(stmt)\n _dci.expect_file(output, defs.test_dir + \"\"\"/a001exp\"\"\", \"\"\"a02s6\"\"\")\n \n _testmgr.testcase_end(desc)\n\ndef test003(desc=\"\"\"\"\"\"):\n global _testmgr\n global _testlist\n global _dci\n if not _testmgr.testcase_begin(_testlist): return\n \n #extract function\n stmt = \"\"\"select extract(year from hire_date) from emp group by extract(year from hire_date);\"\"\"\n output = _dci.cmdexec(stmt)\n _dci.expect_file(output, defs.test_dir + \"\"\"/a001exp\"\"\", \"\"\"a03s1\"\"\")\n \n stmt = \"\"\"select extract(month from hire_date) from emp group by extract(month from hire_date);\"\"\"\n output = _dci.cmdexec(stmt)\n _dci.expect_file(output, defs.test_dir + \"\"\"/a001exp\"\"\", \"\"\"a03s2\"\"\")\n \n stmt = \"\"\"select extract(day from hire_date) from emp group by extract(day from hire_date);\"\"\"\n output = _dci.cmdexec(stmt)\n _dci.expect_file(output, defs.test_dir + \"\"\"/a001exp\"\"\", \"\"\"a03s3\"\"\")\n \n #hour function\n stmt = \"\"\"select hour(cast(hire_date as datetime year to second)) from emp group by hour(cast(hire_date as datetime year to second));\"\"\"\n output = _dci.cmdexec(stmt)\n _dci.expect_file(output, defs.test_dir + \"\"\"/a001exp\"\"\", \"\"\"a03s4\"\"\")\n \n #juliantimestamp function\n stmt = \"\"\"select Juliantimestamp(hire_date) from emp group by Juliantimestamp(hire_date);\"\"\"\n output = _dci.cmdexec(stmt)\n _dci.expect_file(output, defs.test_dir + \"\"\"/a001exp\"\"\", \"\"\"a03s5\"\"\")\n \n _testmgr.testcase_end(desc)\n\ndef test004(desc=\"\"\"\"\"\"):\n global _testmgr\n global _testlist\n global _dci\n if not _testmgr.testcase_begin(_testlist): return\n \n #minute function\n stmt = \"\"\"select minute(cast(hire_date as datetime year to second)) from emp group by minute(cast(hire_date as datetime year to second));\"\"\"\n output = _dci.cmdexec(stmt)\n _dci.expect_file(output, defs.test_dir + \"\"\"/a001exp\"\"\", \"\"\"a04s1\"\"\")\n \n #month function\n stmt = \"\"\"select month(hire_date) from emp group by month(hire_date);\"\"\"\n output = _dci.cmdexec(stmt)\n _dci.expect_file(output, defs.test_dir + \"\"\"/a001exp\"\"\", \"\"\"a04s2\"\"\")\n \n #monthname function\n stmt = \"\"\"select monthname(hire_date) from emp group by monthname(hire_date);\"\"\"\n output = _dci.cmdexec(stmt)\n _dci.expect_file(output, defs.test_dir + \"\"\"/a001exp\"\"\", \"\"\"a04s3\"\"\")\n \n _testmgr.testcase_end(desc)\n\ndef test005(desc=\"\"\"\"\"\"):\n global _testmgr\n global _testlist\n global _dci\n if not _testmgr.testcase_begin(_testlist): return\n \n #quarter function\n stmt = \"\"\"select quarter(hire_date) from emp group by quarter(hire_date);\"\"\"\n output = _dci.cmdexec(stmt)\n _dci.expect_file(output, defs.test_dir + \"\"\"/a001exp\"\"\", \"\"\"a05s1\"\"\")\n \n #second function\n stmt = \"\"\"select second(cast(hire_date as datetime year to second)) from emp group by second(cast(hire_date as datetime year to second));\"\"\"\n output = _dci.cmdexec(stmt)\n _dci.expect_file(output, defs.test_dir + \"\"\"/a001exp\"\"\", \"\"\"a05s2\"\"\")\n \n #week function\n stmt = \"\"\"select week(hire_date) from emp group by week(hire_date);\"\"\"\n output = _dci.cmdexec(stmt)\n _dci.expect_file(output, defs.test_dir + \"\"\"/a001exp\"\"\", \"\"\"a05s3\"\"\")\n \n #year function\n stmt = \"\"\"select year(hire_date) from emp group by year(hire_date);\"\"\"\n output = _dci.cmdexec(stmt)\n _dci.expect_file(output, defs.test_dir + \"\"\"/a001exp\"\"\", \"\"\"a05s4\"\"\")\n _testmgr.testcase_end(desc)\n\n","sub_path":"tests/sqlqa/tests/groupby/grpby008/tcase.py","file_name":"tcase.py","file_ext":"py","file_size_in_byte":7049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"580177926","text":"'''\r\n制限時間カウント\r\nタイムアップ!\r\nあなたの会社では、お菓子を工場で製造しており、N 角柱の箱に入れ販売しています。\r\n箱の各側面には半角英字が 1 文字書かれており、これにより箱の向きが決まります。\r\n\r\n例として、下図左に N = 5 の場合の箱を示しました。\r\n側面には、それぞれ P, A, I, Z, A と順に書かれており、下図左はこの箱を横から見た図です。\r\n便宜上、この向きの箱の側面を展開し、下図右のように表すものとします。\r\n\r\nimg\r\n\r\n工場では、出荷前に箱の向きを一通りにそろえる必要があります。\r\nそろえる向きは一通りに定められており、この向きにそろうように箱を右回りに回転させます。\r\n向きをそろえる機械の特性上、右回りにしか回転させることができません。\r\nあなたのタスクは、1 側面分の回転を 1 回として、箱の向きがそろう最小回転数を計算することです。\r\n\r\n下図に入力例 1 のケースを示しました。\r\nこの場合、2 回転させると向きがそろうため、2 と出力してください。\r\n\r\n\r\n\r\n評価ポイント\r\n10回のテストケースで、正答率、実行速度、メモリ消費量をはかり得点が決まります。\r\nより早い回答時間で提出したほうが得点が高くなります。\r\n複数のテストケースで正しい出力がされるか評価(+50点)\r\n解答までの速さ評価(+50点)\r\n入力される値\r\n入力は以下のフォーマットで与えられます。\r\n\r\nN t s\r\n・1 行目には箱の側面の数を表す N、そろえる向きを表す文字列 t、最初の箱の向きを表す文字列 s がこの順に半角スペース区切りで与えられます。\r\n・入力は 1 行となり、末尾に改行が 1 つ入ります。\r\n\r\nそれぞれの値は文字列で標準入力から渡されます。標準入力からの値取得方法はこちらをご確認ください\r\n期待する出力\r\n箱の向きがそろう最小回転数を整数で出力してください。\r\n末尾に改行を入れ、余計な文字、空行を含んではいけません。\r\n\r\n条件\r\nすべてのテストケースにおいて、以下の条件をみたします。\r\n\r\n・1 ≦ N ≦ 100\r\n・(t の長さ) = (s の長さ) = N\r\n・t, s は半角英字で構成される文字列\r\n・最大 N 回箱を回転させることで文字列 t から文字列 s を必ず作れる\r\n入力例1\r\n5 PAIZA ZAPAI\r\n出力例1\r\n2\r\n入力例2\r\n3 aaA aaA\r\n出力例2\r\n0\r\n入力例3\r\n4 abab baba\r\n出力例3\r\n1\r\n'''\r\n#回答一回目\r\ninput_lines = input().split()\r\ncorrect = []\r\nbad = []\r\n#splitメゾットでリスト化された物を一文字ずつcorrectリストとbadリストに振り分けようした\r\nfor i in input_lines[1]:\r\n correct.append(i)\r\nfor j in input_lines[2]:\r\n bad.append(j)\r\n\r\n#んで頭の文字が間違ってるか合ってるかを調べるプログラムが以下\r\nfor times in range(int(input_lines[0])):\r\n if correct[0] == bad[int(times)]:\r\n print(times)\r\n break\r\n elif correct[0] != bad[int(times)]:\r\n times - 1\r\n# test 1 2 3以外通った。\r\n# 恐らく文字列を回して正しく表示させるプログラムじゃなかったから通らなかった \r\n\r\n\r\n\r\n\r\n\r\n# なので文字自体を回して正しい表記にさせるプログラムを組みました\r\ninput_lines = input().split()\r\ncorrect = []\r\nbad = []\r\nbad_1 =[]\r\ncnt = int(input_lines[0])\r\nfor i in input_lines[1]:\r\n correct.append(i)\r\nfor j in input_lines[2]:\r\n bad.append(j)\r\n\r\nfor times in range(cnt):\r\n print(bad)\r\n print(bad_1)\r\n if correct == bad:\r\n print(times)\r\n break\r\n for k in range(cnt - 1):\r\n bad_1.append(bad[k + 1])\r\n bad_1.append(bad[0])\r\n bad = bad_1\r\n bad_1 = [] #一時的なリストだから毎回初期化しないとダメよん","sub_path":"C058 模様そろえ.py","file_name":"C058 模様そろえ.py","file_ext":"py","file_size_in_byte":4003,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"310676179","text":"#!/usr/bin/env python3\r\n# -*- coding: utf-8 -*-\r\n\r\nimport sys, os, logging\r\nfrom libantiplagiat import EvaluatePlagiatedByWords_n, readFile, cosine_distance\r\n\r\ndef main(argc, argv):\r\n logging.disable(100)\r\n\r\n f1 = argv[1]\r\n f2 = argv[2]\r\n\r\n c1 = readFile(f1)\r\n c2 = readFile(f2)\r\n\r\n if c1['status'] == 'error' or c2['status'] == 'error':\r\n print('Failed to read files')\r\n\r\n c1 = c1['data']\r\n c2 = c2['data']\r\n\r\n st = EvaluatePlagiatedByWords_n(c1, c2)\r\n print(st['status'])\r\n \r\n if st['status']:\r\n print(st['blocks'])\r\n\r\nif __name__ == \"__main__\":\r\n main(len(sys.argv), sys.argv)\r\n #print(\"Press any key to continue\")\r\n #a = input()","sub_path":"antiplagiat_python/antiplagiat_cmpfiles.py","file_name":"antiplagiat_cmpfiles.py","file_ext":"py","file_size_in_byte":691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"649082089","text":"import numpy as np\nimport pickle\nimport os\nimport tensorflow as tf\nfrom tensorflow.python.layers import core as layers_core\nfrom matplotlib import pyplot as plt\nimport cv2\nimport datetime\nimport json\nfrom tensorflow.python.tools.inspect_checkpoint import print_tensors_in_checkpoint_file\n\n\n### Acknowledgments\n## The model in here is an adaption of the Neural Machine Translation model described in\n## in the tutorial on Neural Machine Translation: https://github.com/tensorflow/nmt\n## We switch the encoder to a convolutional neural net matching\n## the works of https://guillaumegenthial.github.io/image-to-latex.html\n\n\n## CONFIG:\nhparams = {}\nhparams['num_epochs'] = 4\nhparams['max_token_length'] = 70\nhparams['mini_batch_size'] = 16\nhparams['max_train_num_samples'] = 16\nhparams['max_val_num_samples'] = 16\nhparams['use_attention'] = True\nhparams['use_encoding_average_as_initial_state'] = False\nhparams['num_units'] = 512 # LSTM number of units\nhparams['OVERFIT_TO_SMALL_SAMPLE'] = False\nhparams['visualize_attention'] = True\n\n# Learning rate config\nhparams['warm_up_rate'] = 0.0001\nhparams['num_epochs_warm_up'] = 3\nhparams['base_learning_rate'] = 0.0004\nhparams['num_epochs_constant_lrate'] = 3\nhparams['num_decay_epochs'] = 10\nhparams['target_rate'] = 0.00001\ncalculate_val_loss = False\n\nhparams['restore_from_checkpoint'] = True\n\n\nCHECKPOINT_PATH = \"/Users/adamjensen/project-environments/handwriting-to-latex-env/output/checkpoints\"\n\n## FLOYDHUB CONFIG\ndata = '../data/'\noutput = '../output/'\n\nON_FLOYDHUB = False\nif (ON_FLOYDHUB):\n data = '/data/'\n output = '/output/'\n\nbuckets_dict = {(40, 160): 0,\n (40, 200): 1,\n (40, 240): 2,\n (40, 280): 3,\n (40, 320): 4,\n (40, 360): 5,\n (50, 120): 6,\n (50, 200): 7,\n (50, 240): 8,\n (50, 280): 9,\n (50, 320): 10,\n (50, 360): 11,\n (50, 400): 12,\n (60, 360): 13,\n (100, 360): 14,\n (100, 500): 15,\n (160, 400): 16,\n (200, 500): 17,\n (800, 800): 18}\n\ndef get_max_shape(data_batch):\n max_height = 0\n max_width = 0\n\n for sample in data_batch:\n image = sample[0]\n\n if image.shape[0] > max_height:\n max_height = image.shape[0]\n\n if image.shape[1] > max_width:\n max_width = image.shape[1]\n\n return (max_height, max_width)\n\n\ndef pad_images(data_batch):\n new_data_batch = data_batch\n target_shape = get_max_shape(data_batch)\n new_height = target_shape[0]\n new_width = target_shape[1]\n\n for idx, sample in enumerate(data_batch):\n padded_image = np.ones((new_height, new_width)) * 255\n\n image = sample[0] # A sample consist of an image (0), a target text (1), and a sequence length (2)\n\n h = image.shape[0]\n w = image.shape[1]\n\n padded_image[:h, :w] = image\n\n new_data_batch[idx][0] = padded_image\n\n return new_data_batch\n\n\ndef load_raw_data(dataset_name, mini_batch_size, max_token_length=400, max_num_samples=5000):\n buckets = [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], []]\n\n dataset = []\n\n if dataset_name == \"small\":\n image_folder = data + 'tin/tiny/'\n formula_file_path = data + 'tin/tiny.formulas.norm.txt'\n elif dataset_name == \"test\":\n image_folder = data + 'images_test/'\n formula_file_path = data + 'test.formulas.norm.txt'\n elif dataset_name == \"train\":\n image_folder = data + 'images_train/'\n formula_file_path = data + 'train.formulas.norm.txt'\n elif dataset_name == \"val\":\n image_folder = data + 'images_val/'\n formula_file_path = data + 'val.formulas.norm.txt'\n elif dataset_name == \"digital_numbers\":\n image_folder = 'datasets/digital_numbers/images/'\n formula_file_path = \"datasets/digital_numbers/number_sequences.txt\"\n\n in_counter = 0\n examples_counter = 0\n with open(formula_file_path, \"r\") as myfile:\n\n for idx, token_sequence in enumerate(myfile):\n examples_counter += 1\n # Check token size:\n token_sequence = token_sequence.rstrip('\\n')\n tokens = token_sequence.split()\n\n file_name = str(idx) + '.png'\n image = cv2.imread(image_folder + file_name, cv2.IMREAD_GRAYSCALE)\n\n if image is None:\n #what does this even mean?\n print(\"Image with id: \" + str(idx) + \" was not loaded\")\n continue\n image = image.astype(np.uint8)\n if len(tokens) <= max_token_length:\n\n token_sequence = '**start** ' + token_sequence\n # is **end** already there?\n # image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) #Grey scale\n\n\n seq_length = len(token_sequence.split())\n\n relevant_bucket_id = buckets_dict[image.shape]\n\n if relevant_bucket_id == 18 or relevant_bucket_id == 17:\n continue\n\n buckets[relevant_bucket_id].append([image, token_sequence, seq_length])\n\n if len(buckets[buckets_dict[image.shape]]) == mini_batch_size:\n data_batch = np.array(buckets[buckets_dict[image.shape]])\n dataset.append(data_batch)\n buckets[buckets_dict[image.shape]] = []\n\n in_counter += 1\n\n if in_counter == max_num_samples:\n break\n\n # put what's left in the buckets into batches (padding will be needed)\n\n counter = 0\n\n data_batch = []\n\n for idx, bucket in enumerate(buckets):\n\n for j, sample in enumerate(bucket):\n data_batch.append(sample)\n if len(data_batch) == mini_batch_size:\n padded_data_batch = pad_images(data_batch)\n padded_data_batch = np.array(padded_data_batch)\n dataset.append(padded_data_batch)\n data_batch = []\n if (len(data_batch) != 0):\n # for some reason the algorithm generates empty data batches sometimes\n padded_data_batch = pad_images(data_batch)\n padded_data_batch = np.array(padded_data_batch)\n dataset.append(padded_data_batch)\n\n\n return dataset\n\n\n# dataset is list of all batches containing (image, target_text, sequence_length)\n# we split that up into three lists\n# Adam code review note: this seems verbose and I don't quite get it\ndef split_dataset(dataset):\n encoder_input_data_batches = []\n target_texts_batches = []\n sequence_lengths_batches = []\n\n for batch in range(len(dataset) ):\n temp = dataset[batch]\n if (len(temp) == 0):\n bob = None\n image_batch = temp[:, 0]\n image_batch = image_batch.tolist()\n image_batch = np.array(image_batch)\n # Add one dimension so that the conv net can take it (it expects four dimensions)\n image_batch = np.reshape(image_batch, (image_batch.shape[0], image_batch.shape[1], image_batch.shape[2], 1))\n image_batch = image_batch.astype('uint8')\n\n encoder_input_data_batches.append(image_batch)\n\n target_text = temp[:, 1]\n target_texts_batches.append(target_text)\n\n decoder_length = temp[:, 2]\n decoder_length = np.array(decoder_length, dtype=np.uint16)\n sequence_lengths_batches.append(decoder_length)\n\n # Make sure we have equal number of batches\n assert (len(encoder_input_data_batches) == len(dataset))\n assert (len(target_texts_batches) == len(dataset))\n assert (len(sequence_lengths_batches) == len(dataset))\n\n return encoder_input_data_batches, target_texts_batches, sequence_lengths_batches\n\n\ndef get_vocabulary(dataset):\n if dataset == \"small\":\n vocab = [line for line in open(data + 'tin/tiny_vocab.txt')]\n elif dataset == \"test\":\n vocab = [line for line in open(data + 'vocab.txt')]\n elif dataset == \"train\":\n vocab = [line for line in open(data + 'vocab.txt')]\n\n vocab = [x.strip('\\n') for x in vocab]\n return vocab\n\n\ndef create_output_int_sequences(target_texts_batches, sequence_lengths_batches, target_token_index):\n decoder_input_data_batches = []\n decoder_target_data_batches = []\n\n for idx, target_texts_batch in enumerate(target_texts_batches):\n\n # get max dec seq length for that batch\n max_decoder_seq_length = max(sequence_lengths_batches[idx])\n\n batch_size = len(target_texts_batch)\n\n decoder_input_data = np.zeros(\n (batch_size, max_decoder_seq_length),\n dtype='uint16')\n decoder_target_data = np.zeros(\n (batch_size, max_decoder_seq_length),\n dtype='uint16')\n\n num_other = 0\n\n for i, target_text in enumerate(target_texts_batch):\n for t, token in enumerate(target_text.split()):\n\n if token in target_token_index:\n # decoder_target_data is ahead of decoder_input_data by one timestep\n\n decoder_input_data[i, t] = target_token_index[token]\n\n if t > 0:\n # decoder_target_data will be ahead by one timestep\n # and will not include the start character.\n decoder_target_data[i, t - 1] = target_token_index[token]\n\n else:\n #print(\"Token %s in %d not in the Vocabulary\" % (token, idx))\n num_other = num_other + 1\n decoder_input_data[i, t] = target_token_index['**unknown**']\n\n if t > 0:\n # decoder_target_data will be ahead by one timestep\n # and will not include the start character.\n\n decoder_target_data[i, t - 1] = target_token_index['**unknown**']\n\n decoder_target_data[i, len(target_text.split()) - 1] = target_token_index['**end**']\n\n decoder_input_data_batches.append(decoder_input_data)\n decoder_target_data_batches.append(decoder_target_data)\n\n return decoder_input_data_batches, decoder_target_data_batches\n\n\ndef load_data(dataset_name, mini_batch_size, max_token_length, max_num_samples, target_token_index):\n dataset = load_raw_data(dataset_name, mini_batch_size, max_token_length=hparams['max_token_length'],\n max_num_samples=max_num_samples)\n\n for k in range(len(dataset) - 1):\n assert (len(dataset[k]) == mini_batch_size)\n encoder_input_data_batches, target_texts_batches, sequence_lengths_batches = split_dataset(dataset)\n decoder_input_data_batches, decoder_target_data_batches = create_output_int_sequences(target_texts_batches,\n sequence_lengths_batches, target_token_index)\n return encoder_input_data_batches, target_texts_batches, sequence_lengths_batches, decoder_input_data_batches, decoder_target_data_batches\n\n\n# Learning rate_schedule\n\n# Epoch 0 - 2: Warmup with a lower learning rate: (1e-4)\n# Epoch 3 - 6: (5e-4)\n# Epoch 7 - 20: Exponentially decaying from (5e-4) to (1e-5)\n\n\ndef get_learning_rate(global_step, num_train_batches):\n epoch = int(float(global_step) / num_train_batches)\n\n if hparams['OVERFIT_TO_SMALL_SAMPLE'] == True:\n\n if epoch < 20:\n # Warm up\n lr_rate = 0.0001\n elif epoch < 40:\n lr_rate = 0.0005\n elif epoch < 500:\n # Over 10 epochs decay learning rate from 0.0005 to 0.00001\n decay_rate = 0.00001 / 0.0005\n decay_steps = hparams['num_epochs']\n lr_rate = 0.0005 * decay_rate ** (float((global_step - num_train_batches * 6)) / decay_steps)\n else:\n # after 16 epochs of decay, set a new fixed rate\n lr_rate = 0.00001\n else:\n\n\n warm_up_rate = hparams['warm_up_rate']\n num_epochs_warm_up = hparams['num_epochs_warm_up']\n\n base_learning_rate = hparams['base_learning_rate']\n num_epochs_constant_lrate = hparams['num_epochs_constant_lrate']\n\n num_decay_epochs = hparams['num_decay_epochs']\n target_rate = hparams['target_rate']\n\n if epoch < num_epochs_warm_up:\n # Warm up\n lr_rate = warm_up_rate\n elif epoch < num_epochs_warm_up + num_epochs_constant_lrate:\n lr_rate = base_learning_rate\n elif epoch < num_epochs_warm_up + num_epochs_constant_lrate + num_decay_epochs:\n # Over 10 epochs decay learning rate from 0.0005 to 0.00001\n\n decay_rate = target_rate / base_learning_rate\n decay_steps = num_train_batches * num_decay_epochs\n lr_rate = base_learning_rate * decay_rate ** (float((global_step - num_train_batches * num_epochs_warm_up + num_epochs_constant_lrate)) / decay_steps)\n else:\n \n lr_rate = target_rate\n\n return lr_rate\n\n\ndef get_validation_loss(num_val_batches,\n img, val_encoder_input_data_batches,\n decoder_lengths, val_sequence_lengths_batches,\n decoder_inputs, val_decoder_input_data_batches,\n decoder_outputs, val_decoder_target_data_batches,\n train_loss, sess):\n # num_val_batches = len(val_sequence_lengths_batches)\n val_loss = 0\n for i in range(num_val_batches):\n input_data = {img: val_encoder_input_data_batches[i],\n decoder_lengths: val_sequence_lengths_batches[i],\n decoder_inputs: val_decoder_input_data_batches[i],\n decoder_outputs: val_decoder_target_data_batches[i],\n }\n\n output_tensors = [train_loss]\n loss = sess.run(output_tensors,\n feed_dict=input_data)\n\n print(loss)\n val_loss = val_loss + loss[0]\n\n val_loss = val_loss / len(val_decoder_input_data_batches[i])\n return val_loss\n\n\ndef get_id_for_bucket(img_batches):\n shapes_already_found = []\n batch_ids = []\n for idx, batch in enumerate(img_batches):\n image = img_batches[0][0]\n shape = np.squeeze(image).shape\n\n for j in range(18, -1, -1):\n\n if buckets_dict[shape] == j:\n #print(\"Found batch with shape: \", buckets_dict[shape])\n #print(\"Batch id: \", idx)\n shapes_already_found.append(shape)\n\n batch_ids.append(idx)\n\n return batch_ids\n\n\ndef dump_data_set(set, name):\n filename = output + 'pickles/' + name + '.pkl'\n if not os.path.exists(output + 'pickles'):\n os.makedirs(output + 'pickles')\n f = open(filename, 'wb+')\n pickle.dump(set, f)\n print('dumped')\n f.close()\n\n\ndef load_data_pickle(name):\n print('loading: ', name)\n filename = output + 'pickles/' + name + '.pkl'\n f = open(filename, 'rb')\n data_set = pickle.load(f)\n f.close()\n return data_set\n\ndef get_data_somehow(name, fresh, _mini_batch_size, _max_token_length, _max_train_num_samples, _target_token_index): \n if (fresh):\n _set = load_data(name, _mini_batch_size, _max_token_length, _max_train_num_samples, _target_token_index) \n dump_data_set(_set, name)\n else:\n _set = load_data_pickle(name)\n\n return _set\n\ndef get_loss(img, encoder_input_data_batches,\n decoder_lengths, sequence_lengths_batches,\n decoder_inputs, decoder_input_data_batches,\n decoder_outputs, decoder_target_data_batches,\n train_loss, sess):\n \n \n num_batches = len(sequence_lengths_batches)\n avg_loss = 0\n for i in range(num_batches):\n input_data = {img: encoder_input_data_batches[i],\n decoder_lengths: sequence_lengths_batches[i],\n decoder_inputs: decoder_input_data_batches[i],\n decoder_outputs: decoder_target_data_batches[i],\n }\n\n output_tensors = [train_loss]\n loss = sess.run(output_tensors,\n feed_dict=input_data)\n\n \n avg_loss = avg_loss + loss[0]\n\n avg_loss = avg_loss / num_batches\n return avg_loss\n\n\ndef create_graph(token_vocab_size, num_units, use_attention, use_encoding_average_as_initial_state, training=True):\n # Encoder\n # One of Genthails's encoder implementations (from paper)\n # http://cs231n.stanford.edu/reports/2017/pdfs/815.pdf\n img = tf.placeholder(tf.uint8, [None, None, None, 1], name='img')\n\n img = tf.cast(img, tf.float32) / 255\n\n batch_size = tf.shape(img)[0]\n\n # Conv + max pooling\n out = tf.layers.conv2d(img, 64, 3, 1, \"SAME\", activation=tf.nn.relu)\n # Conv + max pooling\n out = tf.layers.conv2d(out, 128, 3, 1, \"SAME\", activation=tf.nn.relu)\n\n out = tf.layers.conv2d(out, 256, 3, 1, \"SAME\", activation=tf.nn.relu) # regular conv -> id\n #out = tf.layers.batch_normalization(out)\n\n out = tf.layers.conv2d(out, 256, 3, 1, \"SAME\", activation=tf.nn.relu) # regular conv -> id\n out = tf.layers.max_pooling2d(out, (2, 1), (2, 1), \"SAME\")\n\n out = tf.layers.conv2d(out, 512, 3, 1, \"SAME\", activation=tf.nn.relu) # regular conv -> id\n out = tf.layers.max_pooling2d(out, (1, 2), (1, 2), \"SAME\")\n\n # Conv valid\n out = tf.layers.conv2d(out, 512, 3, 1, \"VALID\", activation=tf.nn.relu, name=\"last_conv_layer\") # conv\n #out = tf.layers.batch_normalization(out)\n\n ## Out is now a H'*W' encoding of the image\n\n ## We want to turn this into a sequence of vectors: (e1, e2 ... en)\n # H= out.shape[1]\n # W= out.shape[2]\n # C= out.shape[3]\n\n H = tf.shape(out)[1]\n W = tf.shape(out)[2]\n\n # out = add_timing_signal_nd(out)\n seq = tf.reshape(tensor=out, shape=[-1, H * W, 512])\n\n # TODO: Add positional encodings\n\n # First state of the decoder consists of two vectors, the hidden state (h0) and the memory (c0).\n # Usually the hidden state refers to [h0, c0]. So a little bit of overloading of hidden state (I think)\n # This is how Genthail implements it\n\n # tf.reset_default_graph()\n\n\n if use_encoding_average_as_initial_state:\n img_mean = tf.reduce_mean(seq, axis=1)\n\n #img_mean = tf.layers.batch_normalization(img_mean)\n\n W = tf.get_variable(\"W\", shape=[512, num_units])\n b = tf.get_variable(\"b\", shape=[num_units])\n h0 = tf.tanh(tf.matmul(img_mean, W) + b)\n\n W_ = tf.get_variable(\"W_\", shape=[512, num_units])\n b_ = tf.get_variable(\"b_\", shape=[num_units])\n c0 = tf.tanh(tf.matmul(img_mean, W_) + b_)\n\n encoder_state = tf.contrib.rnn.LSTMStateTuple(c0, h0)\n\n # attention_states: [batch_size, max_time, num_units]\n attention_states = seq\n\n attention_depth = num_units\n\n # Create an attention mechanism\n attention_mechanism = tf.contrib.seq2seq.LuongAttention(\n attention_depth, attention_states, scale=True) # Can try scale = False\n\n # Decoder: from seq2seq tutorial\n embedding_size = 80 # We follow Genthail's suggestion of 80 embedding size 80\n\n decoder_inputs = tf.placeholder(tf.uint16, [None, None],\n name='decoder_inputs') # Supposed to be a sequence of numbers corresponding to the different tokens in the sentence\n decoder_inputs = tf.cast(decoder_inputs, tf.int32)\n # Embedding of target tokens\n\n # Embedding matrix\n embedding_decoder = tf.get_variable(\n \"embedding_encoder\", [token_vocab_size, embedding_size], tf.float32) # tf.float32 was default in the NMT tutorial\n\n # Look up embedding:\n # decoder_inputs: [batch_size, max_time]\n # decoder_emb_inp: [batch_size, max_time, embedding_size]\n decoder_emb_inp = tf.nn.embedding_lookup(\n embedding_decoder, decoder_inputs)\n\n # Build RNN cell\n #decoder_cell = tf.nn.rnn_cell.BasicLSTMCell(num_units)\n\n decoder_cell = tf.nn.rnn_cell.LSTMCell(num_units)\n\n # Using this instead compared to NMT tutorial so we can initialize with orthogonal intializer (like Genthail)\n #decoder_cell = tf.nn.rnn_cell.LSTMCell(\n #num_units,\n #initializer=tf.orthogonal_initializer,\n #)\n\n if use_attention:\n decoder_cell = tf.contrib.seq2seq.AttentionWrapper(\n decoder_cell, attention_mechanism,\n alignment_history=hparams['visualize_attention'],\n attention_layer_size=hparams['num_units'])\n\n ## Set initial state of decoder to zero (possible to use previous state)\n\n if use_encoding_average_as_initial_state:\n decoder_initial_state = decoder_cell.zero_state(batch_size, tf.float32).clone(cell_state=encoder_state)\n else:\n decoder_initial_state = decoder_cell.zero_state(batch_size, tf.float32)\n\n else:\n decoder_initial_state = encoder_state\n\n decoder_lengths = tf.placeholder(tf.uint16, [None])\n decoder_lengths = tf.cast(decoder_lengths, tf.int32)\n\n # Helper\n helper = tf.contrib.seq2seq.TrainingHelper(\n decoder_emb_inp, decoder_lengths, time_major=False)\n\n # Projection layer\n projection_layer = layers_core.Dense(token_vocab_size, use_bias=False,\n name=\"output_projection\") # Said layers_core before\n\n # Decoder\n decoder = tf.contrib.seq2seq.BasicDecoder(\n decoder_cell, helper, decoder_initial_state,\n output_layer=projection_layer)\n\n # Dynamic decoding\n outputs, _, _ = tf.contrib.seq2seq.dynamic_decode(decoder,\n output_time_major=False) ## Understand parameter Impute finished\n logits = outputs.rnn_output\n\n\n\n global_step = tf.Variable(0, trainable=False) ## IMPORTANT\n\n # target_weights = tf.placeholder(tf.int8, [None, None], name='target_weights')\n # target_weights = tf.cast(target_weights, tf.float32)\n\n # Supposed to be a sequence of numbers corresponding to the different tokens in the sentence\n decoder_outputs = tf.placeholder(tf.uint16, [None, None], name='decoder_outputs')\n decoder_outputs = tf.cast(decoder_outputs, tf.int32)\n learning_rate = tf.placeholder(tf.float32, shape=[])\n\n # Loss function\n\n # HYPERPARAMETER: Should we divide by sequence length on each\n crossent = tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=decoder_outputs, logits=logits)\n\n # Create the target_weights (the masking)\n max_seq_length = tf.shape(decoder_outputs)[1]\n target_weights = tf.sequence_mask(decoder_lengths, max_seq_length, dtype=logits.dtype)\n\n train_loss = tf.reduce_sum(crossent * target_weights) / tf.cast(batch_size, tf.float32)\n\n tf.summary.scalar('loss', train_loss)\n\n # Calculate and clip gradients\n params = tf.trainable_variables()\n gradients = tf.gradients(train_loss, params)\n\n max_gradient_norm = 3 # Usually a number between 1 and 5. Set to 5 in the NMT.\n\n clipped_gradients, global_norm = tf.clip_by_global_norm(\n gradients, max_gradient_norm)\n\n tf.summary.scalar('global_norm', global_norm)\n\n # Optimization\n optimizer = tf.train.AdamOptimizer(learning_rate)\n update_step = optimizer.apply_gradients(\n zip(clipped_gradients, params), global_step=global_step)\n\n param_names = [v.name for v in params]\n\n gradient_names = [g.name for g in gradients]\n\n gradient_norms = [tf.norm(gradient) for gradient in gradients]\n\n grads = list(zip(gradients, params))\n\n for grad, var in grads:\n tf.summary.histogram(var.name + '/gradient', grad)\n\n for param in params:\n to_summary = tf.summary.histogram(param.name + '/weight', param)\n\n # config=tf.ConfigProto(log_device_placement=True) logs whether it runs on the gpus\n # sess = tf.Session(config=tf.ConfigProto(log_device_placement=True))\n\n\n merged = tf.summary.merge_all()\n if training:\n return [merged,\n update_step,\n train_loss,\n optimizer,\n global_norm,\n gradient_norms,\n global_step,\n img,\n decoder_lengths,\n decoder_inputs,\n decoder_outputs,\n learning_rate]\n else:\n return embedding_decoder, decoder_cell, decoder_initial_state, projection_layer, img\n\n\ndef inference_tensor(target_token_index,\n inference_batch_size,\n embedding_decoder,\n decoder_cell,\n decoder_initial_state,\n projection_layer,\n maximum_iterations = hparams['max_token_length']):\n \"\"\"\n :param target_token_index:\n :param batch_for_inference:\n :param embedding_decoder:\n :param decoder_cell:\n :param decoder_initial_state:\n :param projection_layer:\n :param maximum_iterations:\n :return: RETURNS A TENSOR THE CALLER USES FOR INFERENCE ON 1 BATCH\n \"\"\"\n tgt_sos_id = target_token_index['**start**'] # 1\n tgt_eos_id = target_token_index['**end**'] # 0\n\n # Helper\n inference_helper = tf.contrib.seq2seq.GreedyEmbeddingHelper(embedding_decoder,\n tf.fill([inference_batch_size], tgt_sos_id), tgt_eos_id)\n\n # Decoder\n inference_decoder = tf.contrib.seq2seq.BasicDecoder(\n decoder_cell, inference_helper, decoder_initial_state,\n output_layer=projection_layer)\n # Dynamic decoding\n outputs, final_state, _ = tf.contrib.seq2seq.dynamic_decode(\n inference_decoder, maximum_iterations=maximum_iterations)\n\n\n if hparams['visualize_attention']:\n attention_images = final_state[0].alignment_history.stack()\n\n translations = outputs.sample_id\n logits = outputs.rnn_output\n return translations,logits, attention_images\n\ndef predict_batch(sess,\n batch,\n target_token_index,\n embedding_decoder,\n decoder_cell,\n decoder_initial_state,\n projection_layer,\n img,\n maximum_iterations=hparams['max_token_length']):\n #for b in batches:\n batch_len = batch.shape[0]\n translation_t, logits_t, attention_images_t = inference_tensor(target_token_index,\n batch_len,\n embedding_decoder,\n decoder_cell,\n decoder_initial_state,\n projection_layer)\n translation, logits, attention_images = sess.run([translation_t, logits_t, attention_images_t], feed_dict={img: batch})\n return translation,logits, attention_images\n\n\ndef inference_tensor_beam(target_token_index,\n inference_batch_size,\n embedding_decoder,\n decoder_cell,\n encoder_state,\n projection_layer,\n beam_width,\n maximum_iterations):\n\n tgt_sos_id = target_token_index['**start**'] # 1\n tgt_eos_id = target_token_index['**end**'] # 0\n\n\n decoder_initial_state = tf.contrib.seq2seq.tile_batch(\n encoder_state, multiplier=beam_width)\n\n decoder = tf.contrib.seq2seq.BeamSearchDecoder(\n cell=decoder_cell,\n embedding=embedding_decoder,\n start_tokens=tf.fill([inference_batch_size], tgt_sos_id),\n end_token=tgt_eos_id,\n initial_state=decoder_initial_state,\n beam_width=beam_width,\n output_layer=projection_layer,\n length_penalty_weight=0.0)\n\n # Dynamic decoding\n outputs, _, _ = tf.contrib.seq2seq.dynamic_decode(\n inference_decoder, maximum_iterations=maximum_iterations)\n print(maximum_iterations)\n translations = outputs.predicted_ids\n logits = outputs.rnn_output\n return translations,logits\n\ndef predict_batch_beam(sess,\n batch,\n target_token_index,\n embedding_decoder,\n decoder_cell,\n encoder_state,\n projection_layer,\n img,\n beam_size,\n maximum_iterations=hparams['max_token_length']):\n #for b in batches:\n batch_len = batch.shape[0]\n translation_t, logits_t = inference_tensor_beam(target_token_index,\n batch_len,\n embedding_decoder,\n decoder_cell,\n encoder_state,\n projection_layer,\n beam_size,\n maximum_iterations)\n translation, logits = sess.run([translation_t, logits_t], feed_dict={img: batch})\n return translation,logits\n\n\ndef initialize_variables(sess, restore, path):\n \n if restore:\n print('restoring')\n tf_loader = tf.train.Saver(allow_empty=False)\n tf_loader.restore(sess,path)\n else:\n print('reinitializing')\n sess.run(tf.global_variables_initializer())\n\n\ndef create_hparams_log():\n file = open(output + \"hparams.txt\",\"w\") \n file.write(json.dumps(hparams, indent=4))\n\n file.close()\n\ndef create_metric_output_files():\n\n file = open(output + \"metrics.txt\",\"w\") \n \n\n\n file.write(\"Train loss\" + \"\\t\" + \"Val loss\" + \"\\t\" + \"Learning rate\" + \"\\t\" + \"Global grad norm\" + \"\\n\")\n\n file.close()\n\n\n\ndef main():\n create_hparams_log()\n\n\n num_epochs = hparams['num_epochs']\n max_token_length = hparams['max_token_length']\n mini_batch_size = hparams['mini_batch_size']\n max_train_num_samples = hparams['max_train_num_samples']\n max_val_num_samples = hparams['max_val_num_samples']\n use_attention = hparams['use_attention']\n use_encoding_average_as_initial_state = hparams['use_encoding_average_as_initial_state']\n num_units = hparams['num_units'] # LSTM number of units\n\n # Create the vocabulary\n token_vocabulary = [\"**end**\", \"**start**\", \"**unknown**\"]\n\n token_vocabulary.extend(get_vocabulary(\"train\"))\n\n target_tokens = token_vocabulary # TODO: Refactor this. Currently duplicate naming\n\n token_vocab_size = len(target_tokens)\n \n target_token_index = dict(\n [(token, i) for i, token in enumerate(target_tokens)])\n\n reverse_target_token_index = dict(\n (i, char) for char, i in target_token_index.items())\n print(\"\\n ======================= Loading Data =======================\")\n #new cell\n\n train_dataset = get_data_somehow('train', True, mini_batch_size, max_token_length, max_train_num_samples, target_token_index)\n val_dataset = get_data_somehow('val', True, mini_batch_size, max_token_length, max_val_num_samples, target_token_index)\n \n train_encoder_input_data_batches = train_dataset[0]\n train_target_texts_batches = train_dataset[1]\n train_sequence_lengths_batches = train_dataset[2]\n train_decoder_input_data_batches = train_dataset[3]\n train_decoder_target_data_batches = train_dataset[4]\n\n val_encoder_input_data_batches = val_dataset[0]\n val_target_texts_batches = val_dataset[1]\n val_sequence_lengths_batches = val_dataset[2]\n val_decoder_input_data_batches = val_dataset[3]\n val_decoder_target_data_batches = val_dataset[4]\n print(\"\\n ======================= Data Loaded =======================\")\n \n num_train_batches = len(train_target_texts_batches)\n num_val_batches = len(val_target_texts_batches)\n num_train_samples = (num_train_batches - 1) * mini_batch_size + train_target_texts_batches[-1].shape[0]\n num_val_samples = (num_val_batches - 1) * mini_batch_size + val_target_texts_batches[-1].shape[0]\n\n #new cell\n print(\"Num train batches: \", num_train_batches)\n print(\"Num val batches: \", num_val_batches)\n\n print(\"Num train samples: \", num_train_samples)\n print(\"Num val samples: \", num_val_samples)\n\n t = create_graph(token_vocab_size, num_units, use_attention, use_encoding_average_as_initial_state)\n\n merged, update_step, train_loss, optimizer, global_norm, gradient_norms, \\\n global_step, img, decoder_lengths, decoder_inputs, decoder_outputs, learning_rate = t\n\n \n sess = tf.Session()\n tf_saver = tf.train.Saver(save_relative_paths=True)\n\n print_tensors_in_checkpoint_file(file_name='/checkpoints/checkpoints/model_10.ckpt', tensor_name='', all_tensors=False, all_tensor_names=True)\n\n initialize_variables(sess, restore=hparams['restore_from_checkpoint'], path='/checkpoints/checkpoints/model_10.ckpt')\n\n\n train_writer = tf.summary.FileWriter(output + 'summaries/train/', sess.graph)\n\n print(\"Num batches: \", len(\n train_sequence_lengths_batches)) # (Note: they are not necessarily equal size towards the end (this will fix later))\n\n train_decoder_target_data_batches\n\n print(train_sequence_lengths_batches[0])\n\n print(train_encoder_input_data_batches[0].shape)\n\n # train_decoder_input_data_batches[0][0] = np.array(train_decoder_input_data_batches[0][0])\n train_decoder_target_data_batches[0][0] = np.array(train_decoder_target_data_batches[0][0])\n\n # print(train_decoder_input_data_batches[0].shape)\n\n print(train_sequence_lengths_batches[0].shape)\n print(train_decoder_target_data_batches[0].shape)\n print(train_decoder_input_data_batches[0].shape)\n\n learning_rates = []\n\n for step in range(num_train_batches * 20):\n learning_rates.append(get_learning_rate(step, num_train_batches))\n\n #plt.title('Learning rate (10^) over the steps')\n #plt.ylabel('learning rate (10 ^)')\n #plt.xlabel('steps #')\n #plt.plot(np.log10(learning_rates))\n #plt.savefig(output + 'learning_rate.png')\n #plt.close()\n\n total_parameters = 0\n # Get total number of parameters\n\n for variable in tf.trainable_variables():\n # shape is an array of tf.Dimension\n shape = variable.get_shape()\n # print(shape)\n # print(len(shape))\n variable_parameters = 1\n for dim in shape:\n # print(dim)\n variable_parameters *= dim.value\n # print(variable_parameters)\n total_parameters += variable_parameters\n\n print(\"Total number of parameters: \", total_parameters)\n\n print(\"Num batches: \", num_train_batches)\n\n glob_step = sess.run(\n global_step) # Get what global step we are at in training already (so that the learning_rate is set correct)\n #_list = get_id_for_bucket(train_encoder_input_data_batches)\n\n\n create_metric_output_files()\n\n for epoch in range(num_epochs + 1):\n print(\"Epoch: \", epoch)\n\n for i in range(num_train_batches):\n \n\n\n\n # Calculate running time for batch\n start_time = datetime.datetime.now()\n\n # Calculate the right learning rate for this step.\n\n lrate = get_learning_rate(glob_step, num_train_batches)\n\n \n\n input_data = {img: train_encoder_input_data_batches[i],\n decoder_lengths: train_sequence_lengths_batches[i],\n decoder_inputs: train_decoder_input_data_batches[i],\n decoder_outputs: train_decoder_target_data_batches[i],\n learning_rate: lrate\n }\n # Write to tensorboard\n if glob_step % 200 == 0:\n\n\n output_tensors = [merged, update_step, train_loss, optimizer._lr, global_norm, gradient_norms,\n global_step]\n summary, _, loss, lr_rate, global_grad_norm, grad_norms, glob_step = sess.run(output_tensors,\n feed_dict=input_data)\n train_writer.add_summary(summary, glob_step)\n \n else:\n \n\n output_tensors = [update_step, train_loss, global_norm, global_step, optimizer._lr]\n\n _, loss, global_grad_norm, glob_step, lr_rate = sess.run(output_tensors,\n feed_dict=input_data)\n \n\n if i == 0:\n print(\"loss:\", loss)\n print(\"glob step\", glob_step)\n\n if i == 0:\n validation_loss = get_loss(img, val_encoder_input_data_batches,\n decoder_lengths, val_sequence_lengths_batches,\n decoder_inputs, val_decoder_input_data_batches,\n decoder_outputs, val_decoder_target_data_batches,\n train_loss, sess) \n\n training_loss = get_loss(img, train_encoder_input_data_batches,\n decoder_lengths, train_sequence_lengths_batches,\n decoder_inputs, train_decoder_input_data_batches,\n decoder_outputs, train_decoder_target_data_batches,\n train_loss, sess) \n\n file = open(output + \"metrics.txt\",\"a\") \n lrate_to_file = ('%s' % ('%.8g' % lrate))\n file.write(str(training_loss) + \"\\t\" + str(validation_loss) + \"\\t\" + lrate_to_file + \"\\t\" + str(global_grad_norm)+ \"\\n\")\n\n file.close()\n # Run the following in terminal to get up tensorboard: tensorboard --logdir=summaries/train\n \n \n \n save_path = tf_saver.save(sess, output + 'checkpoints/model_'+str(epoch)+'.ckpt')\n print(\"Model saved in file: %s\" % save_path)\n \n \n\n\n\n \n\nif __name__ == '__main__':\n main()","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":37268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"235898578","text":"# THIS CODE TRANFORMS THE SHARE DATA DUMP INTO A JSON FILE WITH ONE RECORD PER LINE\n\nimport json, ijson, os\n\n# contains the number of records\ncount = 0\n\n# name of the output file / folder\nshare = 'share-jan-2019'\n\n# make sure the file is empty\n# warning: it will replace the file if it already exists\nopen(os.path.join('..', '..', 'data', + '.json'), 'w').close() \n\nwith open(os.path.join('..', '..', 'raw_data', 'asapbio.json'), 'r') as f:\n\twith open(os.path.join(share + '.json'), 'a') as o:\n\t\tfor item in ijson.items(f, \"item\"):\n\t\t\tcount += 1\n\n\t\t\tjson.dump(item['_source'], o)\n\t\t\to.write('\\n')\n\n\t\t\t# uncomment these lines if you want to generate a single file for each record\n\t\t\t# with open(os.path.join(share, item['_source']['id'] + '.json'), 'w') as o:\n\t\t\t# \tjson.dump(item['_source'], o)\n\nprint('Total number of records: {}'.format(count))\n","sub_path":"data_collection/share/reformat_share_data.py","file_name":"reformat_share_data.py","file_ext":"py","file_size_in_byte":847,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"583946318","text":"class SubClass:\r\n def __init__(self, db):\r\n self.db = db\r\n\r\n# >>>>>> --- Accepting user name and address here --- <<<<<<\r\n def userInput(self):\r\n userInput = {}\r\n uname = str(input(\"Enter User Name: \"))\r\n uaddress = str(input('Enter user address: '))\r\n userInput['Name'] = uname\r\n userInput['Address'] = uaddress\r\n return userInput\r\n\r\n# >>>>>> --- Checking for user if exists --- <<<<<< \r\n def findUser(self, uInput):\r\n return 0\r\n count = self.db.UserCollections.count_documents()\r\n if (int(count) == 0):\r\n return 0\r\n else:\r\n uCollections = self.db.UserCollections.find({'Name': uInput['Name']})\r\n for uInput in uCollections:\r\n print(uInput)\r\n return 1 \r\n\r\n# >>>>>> --- Display one user details --- <<<<<<\r\n def displayOneUser(self, uInput):\r\n uCollections = self.db.UserCollections.find({'Name': uInput['Name']})\r\n for uInput in uCollections:\r\n print(uInput)\r\n return uInput\r\n\r\n# >>>>>> --- Insert user details to DB --- <<<<<<\r\n def insertUser(self, uInput):\r\n result = self.db.UserCollections.insert_one(uInput)\r\n if (result.acknowledged):\r\n print('User successfully added!!!\\n \\nUser Object ID is: ', str(result.inserted_id)) #Printing user object id\r\n \r\n# >>>>>> --- Display all user details --- <<<<<<\r\n def displayAllUser(self):\r\n print('--------- Users Available in UserDataDB ---------')\r\n uCollections = self.db.UserCollections.find()\r\n for allrecord in uCollections:\r\n print(allrecord)\r\n print('\\n\\n\\n')\r\n \r\n\r\n# >>>>>> --- Update the user Address --- <<<<<<\r\n def updateUser(self, uInput):\r\n self.db.UserCollections.update(\r\n {\r\n 'Name': uInput['Name']\r\n },\r\n {\r\n '$set':{\r\n 'Address': uInput['Address']\r\n }\r\n\r\n }, multi = False\r\n )\r\n uCollections = self.db.UserCollections.find({'Name': uInput['Name']})\r\n for uInput in uCollections:\r\n print('Record updated successfully!!\\n', uInput)\r\n print('\\n')\r\n\r\n# >>>>>> --- Delete User Details --- <<<<<<\r\n def deleteUser(self, uInput):\r\n self.db.UserCollections.delete_one({\r\n 'Name': uInput['Name']\r\n })\r\n print('!!!-----Record Deleted----!!!')\r\n","sub_path":"AllFunctions.py","file_name":"AllFunctions.py","file_ext":"py","file_size_in_byte":2492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"72148004","text":"import numpy as np\nfrom math import pow, atan2, sqrt, pi\n\nimport rospy\nfrom geometry_msgs.msg import Twist\nfrom geometry_msgs.msg import Point\n\n\n\nclass Pose2D:\n def __init__(self, x = None, y = None, theta = None):\n self.x = x\n self.y = y\n self.theta = theta\n\nclass Target:\n def __init__(self, pose = Pose2D(), radius = None):\n self.pose = pose\n self.radius = radius\n\n\ndef euclidean_distance(position, target):\n\treturn sqrt(pow((target.x - position.x), 2) +\n\t\t\t\tpow((position.y - target.y), 2))\n\n\ndef euclidean_distance_tuple(current_pose, target_pose):\n\t\"\"\" Same as `euclidean_distance` but it accept tuples (x,y) instead of pose objects. \"\"\"\n\treturn sqrt(pow((target_pose[0] - current_pose[0]), 2) +\n\t\t\t\tpow((target_pose[1] - current_pose[1]), 2))\n\n\ndef to_positive_angle(angle):\n\treturn (angle+2*pi)%(2*pi)\n\n\n\n\nclass ToTargetPController:\n\tdef __init__(self, linear_speed, orientation_speed, linear_threshold=0.05, orientation_eps=0.008):\n\t\tself.gain1 = linear_speed\n\t\tself.gain2 = orientation_speed\n\t\tself.linear_eps = linear_threshold\n\t\tself.orientation_eps = orientation_eps\n\n\t\n\tdef min_angle_diff(self, angle, target_angle):\n\t\tdelta = target_angle - angle\n\t\tif abs(delta) > pi+0.0001:\n\t\t\tmodule = 2*pi - abs(delta)\n\t\t\tif delta > 0:\n\t\t\t\tmin_delta = -1. * module\n\t\t\telse:\n\t\t\t\tmin_delta = module\n\t\telse:\n\t\t\tmin_delta=delta\n\t\treturn min_delta\n\n\n\tdef move(self, position, orientation, target, target_orientation=None,\n\t max_orientation_speed=None, max_linear_speed=None, custom_distance_tollerance = None):\n\t\tvelocity = Twist()\n\t\tdone = True\n\t\tdistance_current = euclidean_distance(position, target)\n\t\tdistance_eps = custom_distance_tollerance or self.linear_eps\n\n\t\tif distance_current >= distance_eps:\n\n\t\t\tvector = (target.x - position.x, target.y - position.y)\n\t\t\tnorm = np.linalg.norm([vector[0],vector[1]])\n\t\t\tcos_angle = np.math.acos(vector[0]/norm)\n\t\t\tangle_to_face_target = cos_angle if np.sign(vector[1]) > 0 else -cos_angle\n\n\t\t\tif distance_current >= distance_eps * 2 and abs(self.min_angle_diff(orientation,angle_to_face_target)) >= self.orientation_eps * 5:\n\t\t\t\t# -- Turning towards the target\n\t\t\t\t# print('steering')\n\t\t\t\tvelocity.linear.x = 0.\n\t\t\t\tvelocity.angular.z = self.gain2 * self.min_angle_diff(orientation,angle_to_face_target)\n\t\t\t\tif max_orientation_speed is not None:\n\t\t\t\t\tmodule = min(abs(velocity.angular.z), max_orientation_speed)\n\t\t\t\t\tvelocity.angular.z *= module / abs(velocity.angular.z)\n\n\t\t\t\tdone = False\n\n\t\t\telse:\n\t\t\t\t# -- Moving towards the target\n\t\t\t\t# print('moving')\n\t\t\t\tvelocity.linear.x = self.gain1 #* euclidean_distance(position, target) \n\t\t\t\tvelocity.angular.z = 0.\n\t\t\t\tif max_linear_speed is not None:\n\t\t\t\t\tvelocity.linear.x = min(velocity.linear.x, max_linear_speed)\n\t\t\t\tdone = False\n\n\t\t# -- Turning towards the target orientation\n\t\tif done and target_orientation is not None and abs(self.min_angle_diff(orientation,target_orientation)) >= self.orientation_eps:\n\t\t\t# print('final steering')\n\t\t\tvelocity.linear.x = 0.\n\t\t\tvelocity.angular.z = self.gain2 * self.min_angle_diff(orientation,target_orientation)\n\t\t\tif max_orientation_speed is not None:\n\t\t\t\tmodule = min(abs(velocity.angular.z), max_orientation_speed)\n\t\t\t\tvelocity.angular.z *= module / abs(velocity.angular.z)\n\n\t\t\tdone = False\n\n\t\treturn done, velocity\n\n\t\t","sub_path":"thymar/src/utils_movement.py","file_name":"utils_movement.py","file_ext":"py","file_size_in_byte":3299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"107671581","text":"import torch\nimport numpy as np\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nimport utils.network as net_utils\nimport cfgs.config as cfg\n\nfrom layers.reorg.reorg_layer import ReorgLayer\nfrom utils.cython_bbox import bbox_ious, anchor_intersections\nfrom utils.cython_yolo import yolo_to_bbox\nfrom functools import partial\n\nfrom multiprocessing import Pool\n\n\ndef _make_layers(in_channels, net_cfg):\n layers = []\n\n if len(net_cfg) > 0 and isinstance(net_cfg[0], list):\n for sub_cfg in net_cfg:\n layer, in_channels = _make_layers(in_channels, sub_cfg)\n layers.append(layer)\n else:\n for item in net_cfg:\n if item == 'M':\n layers.append(nn.MaxPool2d(kernel_size=2, stride=2))\n else:\n out_channels, ksize = item\n layers.append(net_utils.Conv2d_BatchNorm(in_channels,\n out_channels,\n ksize,\n same_padding=True))\n # layers.append(net_utils.Conv2d(in_channels, out_channels,\n # ksize, same_padding=True))\n in_channels = out_channels\n\n return nn.Sequential(*layers), in_channels\n\n\ndef _process_batch(data, size_index):\n W, H = cfg.multi_scale_out_size[size_index]\n inp_size = cfg.multi_scale_inp_size[size_index]\n out_size = cfg.multi_scale_out_size[size_index]\n\n bbox_pred_np, gt_boxes, gt_classes, dontcares, iou_pred_np = data\n\n # net output\n hw, num_anchors, _ = bbox_pred_np.shape\n\n # gt\n _classes = np.zeros([hw, num_anchors, cfg.num_classes], dtype=np.float)\n _class_mask = np.zeros([hw, num_anchors, 1], dtype=np.float)\n\n _ious = np.zeros([hw, num_anchors, 1], dtype=np.float)\n _iou_mask = np.zeros([hw, num_anchors, 1], dtype=np.float)\n\n _boxes = np.zeros([hw, num_anchors, 4], dtype=np.float)\n _boxes[:, :, 0:2] = 0.5\n _boxes[:, :, 2:4] = 1.0\n _box_mask = np.zeros([hw, num_anchors, 1], dtype=np.float) + 0.01\n\n # scale pred_bbox\n anchors = np.ascontiguousarray(cfg.anchors, dtype=np.float)\n bbox_pred_np = np.expand_dims(bbox_pred_np, 0)\n bbox_np = yolo_to_bbox(\n np.ascontiguousarray(bbox_pred_np, dtype=np.float),\n anchors,\n H, W)\n # bbox_np = (hw, num_anchors, (x1, y1, x2, y2)) range: 0 ~ 1\n bbox_np = bbox_np[0]\n bbox_np[:, :, 0::2] *= float(inp_size[0]) # rescale x\n bbox_np[:, :, 1::2] *= float(inp_size[1]) # rescale y\n\n # gt_boxes_b = np.asarray(gt_boxes[b], dtype=np.float)\n gt_boxes_b = np.asarray(gt_boxes, dtype=np.float)\n\n # for each cell, compare predicted_bbox and gt_bbox\n bbox_np_b = np.reshape(bbox_np, [-1, 4])\n ious = bbox_ious(\n np.ascontiguousarray(bbox_np_b, dtype=np.float),\n np.ascontiguousarray(gt_boxes_b, dtype=np.float)\n )\n best_ious = np.max(ious, axis=1).reshape(_iou_mask.shape)\n iou_penalty = 0 - iou_pred_np[best_ious < cfg.iou_thresh]\n _iou_mask[best_ious <= cfg.iou_thresh] = cfg.noobject_scale * iou_penalty\n\n # locate the cell of each gt_boxe\n cell_w = float(inp_size[0]) / W\n cell_h = float(inp_size[1]) / H\n cx = (gt_boxes_b[:, 0] + gt_boxes_b[:, 2]) * 0.5 / cell_w\n cy = (gt_boxes_b[:, 1] + gt_boxes_b[:, 3]) * 0.5 / cell_h\n cell_inds = np.floor(cy) * W + np.floor(cx)\n cell_inds = cell_inds.astype(np.int)\n\n target_boxes = np.empty(gt_boxes_b.shape, dtype=np.float)\n target_boxes[:, 0] = cx - np.floor(cx) # cx\n target_boxes[:, 1] = cy - np.floor(cy) # cy\n target_boxes[:, 2] = \\\n (gt_boxes_b[:, 2] - gt_boxes_b[:, 0]) / inp_size[0] * out_size[0] # tw\n target_boxes[:, 3] = \\\n (gt_boxes_b[:, 3] - gt_boxes_b[:, 1]) / inp_size[1] * out_size[1] # th\n\n # for each gt boxes, match the best anchor\n gt_boxes_resize = np.copy(gt_boxes_b)\n gt_boxes_resize[:, 0::2] *= (out_size[0] / float(inp_size[0]))\n gt_boxes_resize[:, 1::2] *= (out_size[1] / float(inp_size[1]))\n anchor_ious = anchor_intersections(\n anchors,\n np.ascontiguousarray(gt_boxes_resize, dtype=np.float)\n )\n anchor_inds = np.argmax(anchor_ious, axis=0)\n\n ious_reshaped = np.reshape(ious, [hw, num_anchors, len(cell_inds)])\n for i, cell_ind in enumerate(cell_inds):\n if cell_ind >= hw or cell_ind < 0:\n print('cell inds size {}'.format(len(cell_inds)))\n print('cell over {} hw {}'.format(cell_ind, hw))\n continue\n a = anchor_inds[i]\n\n # 0 ~ 1, should be close to 1\n iou_pred_cell_anchor = iou_pred_np[cell_ind, a, :]\n _iou_mask[cell_ind, a, :] = cfg.object_scale * (1 - iou_pred_cell_anchor) # noqa\n # _ious[cell_ind, a, :] = anchor_ious[a, i]\n _ious[cell_ind, a, :] = ious_reshaped[cell_ind, a, i]\n\n _box_mask[cell_ind, a, :] = cfg.coord_scale\n target_boxes[i, 2:4] /= anchors[a]\n _boxes[cell_ind, a, :] = target_boxes[i]\n\n _class_mask[cell_ind, a, :] = cfg.class_scale\n _classes[cell_ind, a, gt_classes[i]] = 1.\n\n # _boxes[:, :, 2:4] = np.maximum(_boxes[:, :, 2:4], 0.001)\n # _boxes[:, :, 2:4] = np.log(_boxes[:, :, 2:4])\n\n return _boxes, _ious, _classes, _box_mask, _iou_mask, _class_mask\n\n\nclass Darknet19_base(nn.Module):\n def __init__(self):\n super(Darknet19, self).__init__()\n net_cfgs = [\n # conv1s\n [(32, 3)],\n ['M', (64, 3)],\n ['M', (128, 3), (64, 1), (128, 3)],\n ['M', (256, 3), (128, 1), (256, 3)],\n ['M', (512, 3), (256, 1), (512, 3), (256, 1), (512, 3)],\n # conv2\n ['M', (1024, 3), (512, 1), (1024, 3), (512, 1), (1024, 3)],\n # ------------\n # conv3\n [(1024, 3), (1024, 3)],\n # conv4\n [(1024, 3)]\n ]\n\n # darknet\n self.conv1s, c1 = _make_layers(3, net_cfgs[0:5])\n self.conv2, c2 = _make_layers(c1, net_cfgs[5])\n # ---\n self.conv3, c3 = _make_layers(c2, net_cfgs[6])\n\n stride = 2\n # stride*stride times the channels of conv1s\n self.reorg = ReorgLayer(stride=2)\n # cat [conv1s, conv3]\n self.conv4, c4 = _make_layers((c1*(stride*stride) + c3), net_cfgs[7])\n self.feature_channel = c4\n\n def forward(self, im_data):\n conv1s = self.conv1s(im_data)\n conv2 = self.conv2(conv1s)\n conv3 = self.conv3(conv2)\n conv1s_reorg = self.reorg(conv1s)\n cat_1_3 = torch.cat([conv1s_reorg, conv3], 1)\n conv4 = self.conv4(cat_1_3)\n return conv4\n\nclass Block(nn.Module):\n \"\"\"\n expand + depthwise + pointwise\n \"\"\"\n def __init__(self, in_planes, out_planes, expansion, stride, current_rate=1):\n super(Block, self).__init__()\n self.stride = stride\n\n\n if expansion > 1:\n planes = expansion * in_planes\n self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, stride=1,\n padding=0, bias=False)\n self.bn1 = nn.BatchNorm2d(planes)\n else:\n planes = in_planes\n self.conv1 = None\n self.bn1 = None\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,\n padding=current_rate, dilation=current_rate, groups=planes, bias=False)\n self.bn2 = nn.BatchNorm2d(planes)\n self.conv3 = nn.Conv2d(planes, out_planes, kernel_size=1, stride=1,\n padding=0, bias=False)\n self.bn3 = nn.BatchNorm2d(out_planes)\n\n self.shortcut = False\n if stride == 1 and in_planes == out_planes:\n self.shortcut = True\n\n def forward(self, x):\n if self.conv1:\n out = F.relu6(self.bn1(self.conv1(x)))\n else:\n out = x\n out = F.relu6(self.bn2(self.conv2(out)))\n out = self.bn3(self.conv3(out))\n if self.shortcut:\n out = out + x\n return out\n\n\nclass Mobilenetv2_base(nn.Module):\n\n cfg = [(1, 16, 1, 1),\n (6, 24, 2, 2),\n (6, 32, 3, 2),\n (6, 64, 4, 2),\n (6, 96, 3, 1),\n (6, 160, 3, 2),\n (6, 320, 1, 1)]\n target_stride = 16\n\n def __init__(self):\n super(Mobilenetv2_base, self).__init__()\n # self.cfg = cfg\n\n self.conv1 = nn.Conv2d(3, 32, kernel_size=3, stride=2, padding=1, bias=False)\n self.bn1 = nn.BatchNorm2d(32)\n self.layers = self._make_layers(in_planes=32)\n self.feature_channel = 320\n\n def _make_layers(self, in_planes):\n layers = []\n\n output_stride = 2\n current_rate = 1\n\n for expansion, out_planes, num_blocks, stride in self.cfg:\n if output_stride == self.target_stride:\n current_rate *= stride\n top_stride = 1\n else:\n top_stride = stride\n output_stride *= stride\n strides = [top_stride] + [1]*(num_blocks-1)\n for layer_stride in strides:\n layers.append(Block(in_planes, out_planes,\n expansion, layer_stride, current_rate))\n in_planes = out_planes\n return nn.Sequential(*layers)\n\n def forward(self, x):\n out = F.relu(self.bn1(self.conv1(x)))\n out = self.layers(out)\n\n return out\n\nclass Yolonet(nn.Module):\n def __init__(self, basenet):\n super(Yolonet, self).__init__()\n self.basenet = basenet\n feature_channel = self.basenet.feature_channel\n\n out_channels = cfg.num_anchors * (cfg.num_classes + 5)\n self.conv5 = net_utils.Conv2d(feature_channel, out_channels, 1, 1, relu=False)\n self.global_average_pool = nn.AvgPool2d((1, 1))\n\n # train\n self.bbox_loss = None\n self.iou_loss = None\n self.cls_loss = None\n self.pool = Pool(processes=10)\n\n @property\n def loss(self):\n return self.bbox_loss + self.iou_loss + self.cls_loss\n\n def forward(self, im_data, gt_boxes=None, gt_classes=None, dontcare=None,\n size_index=0):\n feature = self.basenet(im_data)\n conv5 = self.conv5(conv4)\n global_average_pool = self.global_average_pool(conv5)\n\n # for detection\n # bsize, c, h, w -> bsize, h, w, c ->\n # bsize, h x w, num_anchors, 5+num_classes\n bsize, _, h, w = global_average_pool.size()\n # assert bsize == 1, 'detection only support one image per batch'\n global_average_pool_reshaped = \\\n global_average_pool.permute(0, 2, 3, 1).contiguous().view(bsize,\n -1, cfg.num_anchors, cfg.num_classes + 5)\n\n xy_pred = F.sigmoid(global_average_pool_reshaped[:, :, :, 0:2])\n wh_pred = torch.exp(global_average_pool_reshaped[:, :, :, 2:4])\n bbox_pred = torch.cat([xy_pred, wh_pred], 3)\n iou_pred = F.sigmoid(global_average_pool_reshaped[:, :, :, 4:5])\n\n score_pred = global_average_pool_reshaped[:, :, :, 5:].contiguous()\n prob_pred = F.softmax(score_pred.view(-1, score_pred.size()[-1])).view_as(score_pred)\n\n # for training\n if self.training:\n bbox_pred_np = bbox_pred.data.cpu().numpy()\n iou_pred_np = iou_pred.data.cpu().numpy()\n _boxes, _ious, _classes, _box_mask, _iou_mask, _class_mask = \\\n self._build_target(bbox_pred_np,\n gt_boxes,\n gt_classes,\n dontcare,\n iou_pred_np,\n size_index)\n\n _boxes = net_utils.np_to_variable(_boxes)\n _ious = net_utils.np_to_variable(_ious)\n _classes = net_utils.np_to_variable(_classes)\n box_mask = net_utils.np_to_variable(_box_mask,\n dtype=torch.FloatTensor)\n iou_mask = net_utils.np_to_variable(_iou_mask,\n dtype=torch.FloatTensor)\n class_mask = net_utils.np_to_variable(_class_mask,\n dtype=torch.FloatTensor)\n\n num_boxes = sum((len(boxes) for boxes in gt_boxes))\n\n # _boxes[:, :, :, 2:4] = torch.log(_boxes[:, :, :, 2:4])\n box_mask = box_mask.expand_as(_boxes)\n\n self.bbox_loss = nn.MSELoss(size_average=False)(bbox_pred * box_mask, _boxes * box_mask) / num_boxes # noqa\n self.iou_loss = nn.MSELoss(size_average=False)(iou_pred * iou_mask, _ious * iou_mask) / num_boxes # noqa\n\n class_mask = class_mask.expand_as(prob_pred)\n self.cls_loss = nn.MSELoss(size_average=False)(prob_pred * class_mask, _classes * class_mask) / num_boxes # noqa\n\n return bbox_pred, iou_pred, prob_pred\n\n def _build_target(self, bbox_pred_np, gt_boxes, gt_classes, dontcare,\n iou_pred_np, size_index):\n \"\"\"\n :param bbox_pred: shape: (bsize, h x w, num_anchors, 4) :\n (sig(tx), sig(ty), exp(tw), exp(th))\n \"\"\"\n\n bsize = bbox_pred_np.shape[0]\n\n targets = self.pool.map(partial(_process_batch, size_index=size_index),\n ((bbox_pred_np[b], gt_boxes[b],\n gt_classes[b], dontcare[b], iou_pred_np[b])\n for b in range(bsize)))\n\n _boxes = np.stack(tuple((row[0] for row in targets)))\n _ious = np.stack(tuple((row[1] for row in targets)))\n _classes = np.stack(tuple((row[2] for row in targets)))\n _box_mask = np.stack(tuple((row[3] for row in targets)))\n _iou_mask = np.stack(tuple((row[4] for row in targets)))\n _class_mask = np.stack(tuple((row[5] for row in targets)))\n\n return _boxes, _ious, _classes, _box_mask, _iou_mask, _class_mask\n","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":13657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"518786078","text":"import os\n\nimport pygame\n\npygame.init()\nwidth, height = 500, 500\nscreen = pygame.display.set_mode((width, height))\n\n\ndef load_image(name):\n fullname = os.path.join('../data', name)\n image = pygame.image.load(fullname).convert_alpha()\n return image\n\n\nclass Camera:\n def __init__(self):\n self.dx = 0\n self.dy = 0\n\n def apply(self, obj):\n obj.rect.x += self.dx\n obj.rect.y += self.dy\n\n def update(self, target):\n self.dx = -(target.rect.x + target.rect.w // 2 - width // 2)\n self.dy = -(target.rect.y + target.rect.h // 2 - height // 2)\n\n\ntile_images = {'wall': load_image('box.png'), 'empty': load_image('grass.png')}\nplayer_image = load_image('mar.png')\nbackground = load_image('fon.jpg')\nback_rect = background.get_rect()\nback_rect.size = (width, height)\n\ntile_width = tile_height = 50\n\n\nclass Tile(pygame.sprite.Sprite):\n def __init__(self, tile_type, pos_x, pos_y):\n super().__init__(tiles_group, all_sprites)\n print(tile_type)\n self.image = tile_images[tile_type]\n self.rect = self.image.get_rect().move(tile_width * pos_x, tile_height * pos_y)\n self.tile_type = tile_type\n\n\nclass Player(pygame.sprite.Sprite):\n def __init__(self, pos_x, pos_y):\n super().__init__(player_group, all_sprites)\n self.image = player_image\n self.rect = self.image.get_rect().move(tile_width * pos_x + 15, tile_height * pos_y + 5)\n\n def move(self, x, y):\n self.rect = self.rect.move(x, y)\n intersect = pygame.sprite.spritecollide(self, tiles_group, False)\n print()\n for tile in intersect:\n if tile.tile_type == 'wall':\n xr = tile.rect.x - self.rect.x\n yr = tile.rect.y - self.rect.y\n if abs(xr) > abs(yr):\n if xr >= 0:\n self.rect.right = tile.rect.left\n else:\n self.rect.left = tile.rect.right\n else:\n if yr >= 0:\n self.rect.bottom = tile.rect.top\n else:\n self.rect.top = tile.rect.bottom\n\n\n# группы спрайтов\nall_sprites = pygame.sprite.Group()\ntiles_group = pygame.sprite.Group()\nplayer_group = pygame.sprite.Group()\n\n\ndef load_level(filename):\n filename = \"../data/\" + filename\n with open(filename, 'r') as mapFile:\n level_map = [line.strip() for line in mapFile]\n\n max_width = max(map(len, level_map))\n\n return list(map(lambda x: x.ljust(max_width, '.'), level_map))\n\n\ndef generate_level(level):\n new_player, x, y = None, None, None\n for y in range(len(level)):\n for x in range(len(level[y])):\n if level[y][x] == '.':\n Tile('empty', x, y)\n elif level[y][x] == '#':\n Tile('wall', x, y)\n elif level[y][x] == '@':\n Tile('empty', x, y)\n new_player = Player(x, y)\n return new_player, x, y\n\n\nrunning = True\nclock = pygame.time.Clock()\nplayer, level_x, level_y = generate_level(load_level('map.map'))\ncamera = Camera()\nMOVE_TIMER = pygame.USEREVENT\npygame.time.set_timer(MOVE_TIMER, 1000 // 50)\nwhile running:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n if event.type == MOVE_TIMER:\n x, y = 0, 0\n if pygame.key.get_pressed()[pygame.K_UP]:\n y -= 1\n if pygame.key.get_pressed()[pygame.K_DOWN]:\n y += 1\n if pygame.key.get_pressed()[pygame.K_LEFT]:\n x -= 1\n if pygame.key.get_pressed()[pygame.K_RIGHT]:\n x += 1\n player.move(x, y)\n camera.update(player)\n # обновляем положение всех спрайтов\n for sprite in all_sprites:\n camera.apply(sprite)\n screen.fill(pygame.Color('white'))\n screen.blit(background, back_rect)\n tiles_group.draw(screen)\n player_group.draw(screen)\n pygame.display.flip()\n clock.tick(60)\n","sub_path":"pg/7_1var.py","file_name":"7_1var.py","file_ext":"py","file_size_in_byte":4067,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"145789312","text":"class Product:\n unique_id = 1\n\n def __init__(self, name, price):\n self.name = name\n self.price = price\n self.id = Product.unique_id\n Product.unique_id += 1\n\n\nclass ShoppingCart:\n def __init__(self):\n self.products = {}\n self.quantities = {}\n\n def add_product(self, product):\n self.products[product.id] = product\n if product.id not in self.quantities:\n self.quantities[product.id] = 1\n else:\n self.quantities[product.id] += 1\n\n def remove_product(self, product):\n if product.id in self.products:\n del self.products[product.id]\n del self.quantities[product.id]\n pass\n\n def change_product_quantity(self, product, new_quantity):\n if product.id not in self.quantities:\n pass\n elif new_quantity == 0:\n del self.quantities[product.id]\n elif new_quantity < 0:\n raise ValueError(\"Quantity cannot be negative\")\n else:\n self.quantities[product.id] = new_quantity\n\n def get_receipt(self):\n receipt = \"\"\n total_sum = 0\n for i in self.quantities:\n if self.quantities[i] < 3:\n receipt += f\"{self.products[i].name} - ilość: {self.quantities[i]}, \" \\\n f\"cena: {self.products[i].price}zł, \" \\\n f\"suma: {self.products[i].price * self.quantities[i]}zł\\n\"\n total_sum += (self.products[i].price * self.quantities[i])\n else:\n receipt += f\"{self.products[i].name} - ilość: {self.quantities[i]}, \" \\\n f\"cena: {self.products[i].price}zł, \" \\\n f\"suma: {round((self.products[i].price * self.quantities[i]) * 0.7, 2)}zł\\n\"\n total_sum += (self.products[i].price * self.quantities[i]) * 0.7\n receipt += f\"\\nSuma: {round(total_sum, 2)}zł\"\n return receipt\n\n\nbread = Product('Chleb', 2.70)\nham = Product('Szynka', 8.40)\ncheese = Product('Ser', 4.40)\nchive = Product('Szczypiorek', 1.50)\npepper = Product('Papryka', 2.35)\n\nprint(bread.id)\nprint(pepper.id)\nprint(pepper.name)\nprint(pepper.price)\n\ncart = ShoppingCart()\n\nprint(cart.products)\nprint(cart.quantities)\nprint(cart.get_receipt())\n\ncart.add_product(bread)\ncart.add_product(bread)\ncart.add_product(bread)\ncart.add_product(pepper)\ncart.add_product(chive)\ncart.change_product_quantity(pepper, 3)\nprint(cart.products)\nprint(cart.quantities)\n\ncart.remove_product(bread)\nprint(cart.get_receipt())\n","sub_path":"console_shopping_cart.py","file_name":"console_shopping_cart.py","file_ext":"py","file_size_in_byte":2558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"556665650","text":"from django.shortcuts import render, redirect\nfrom django.views.generic import ListView\nfrom django.http import HttpResponse\nfrom sprint.models import Sprint\nfrom backlog.models import Task\nfrom project.models import Project\nfrom user.models import User,Profile\nfrom django.views import View\nimport datetime\n\n\nclass IndexView(ListView):\n context_object_name = \"sprint_list\"\n template_name = \"sprint/sprints.html\"\n queryset = Project.objects.filter(pk=1)\n \n\n def get_context_data(self, **kwargs):\n context = super(IndexView,self).get_context_data(**kwargs)\n user_id = self.request.user.id\n current_project = User.objects.filter(pk=user_id).values_list('current_project',flat=True)\n curr_project_id = current_project[0]\n\n\n current_sprint_id = Sprint.objects.filter(project_id=curr_project_id,sprint_active=True).values_list('id', flat=True)\n members = Project.objects.filter(pk=curr_project_id).values_list('project_members',flat=True)\n\n context['current_sprint'] = Sprint.objects.filter(project_id=curr_project_id,sprint_active=True)\n context['sprint_task'] = Task.objects.filter(sprint_id=current_sprint_id).order_by('-id')\n context['project_member'] = User.objects.filter(id__in=members) # id__in for multiple users\n context['project_backlog'] = Task.objects.filter(project_id=curr_project_id,task_status=\"Backlog\").order_by('-id')[:5]\n context['current_project'] = Project.objects.filter(pk=curr_project_id)\n context['sprint'] = Sprint.objects.filter(pk=current_sprint_id)\n return context\n\n\nclass BoardView(ListView):\n context_object_name = \"board_list\"\n template_name = \"sprint/boards.html\"\n queryset = Project.objects.filter(pk=1)\n\n def get_context_data(self, **kwargs):\n context = super(BoardView,self).get_context_data(**kwargs)\n if self.request.user.is_authenticated:\n user_id = self.request.user.id\n current_project_id = User.objects.filter(pk=user_id).values_list('current_project',flat=True)\n\n active_sprint_id = Sprint.objects.filter(project_id=current_project_id,sprint_active=True).values_list('id', flat=True)\n context['to_do_list'] = Task.objects.filter(sprint_id=active_sprint_id).filter(task_status=\"To Do\")\n context['in_prog_list'] = Task.objects.filter(sprint_id=active_sprint_id).filter( task_status=\"In Progress\")\n context['done_list'] = Task.objects.filter(sprint_id=active_sprint_id, task_status=\"Done\")\n\n\n context['current_project'] = Project.objects.filter(pk=current_project_id)\n return context\n\n\n\nclass AddToSprint(View):\n def post(self,request,pk): #pass in task id\n if request.user.is_authenticated:\n task_id = pk\n user_id = request.user.id\n user_current_project_id = User.objects.filter(pk=user_id).values_list('current_project',flat=True)\n project_current_sprint = Sprint.objects.filter(project_id=user_current_project_id,sprint_active=True).values_list('id',flat=True).order_by('-id')\n if project_current_sprint.count() > 0:\n project_instance = project_current_sprint[0] \n if(project_instance): \n Task.objects.select_related().filter(pk=task_id).update(sprint_id=project_instance, task_status=\"To Do\")\n return redirect('sprint_list')\n else:\n return redirect('task_list')\n else:\n return HttpResponse(\"Please start a sprint first\")\n else:\n return redirect('login')\n\n\nclass closeSprint(View):\n def post(self,request,pk):\n if request.user.is_authenticated:\n sprint_id = pk\n Sprint.objects.select_related().filter(pk=sprint_id).update(sprint_active=False)\n return redirect('sprint_list')\n else:\n return redirect('login')\n\n\nclass NewSprint(View):\n\n def post(self,request):\n if request.user.is_authenticated:\n new_sprint_date = datetime.datetime.now()\n new_sprint_name = request.POST['sprint_name']\n new_sprint_duration = request.POST['sprint_duration']\n\n user_id = request.user.id\n project_id = User.objects.filter(pk=user_id).values_list('current_project_id',flat=True)\n project_obj = Project.objects.filter(pk=project_id)\n\n sprint_obj = Sprint()\n sprint_obj.sprint_name = new_sprint_name\n sprint_obj.sprint_date = new_sprint_date\n if new_sprint_duration is \"\":\n new_sprint_duration = 2\n sprint_obj.sprint_dur = new_sprint_duration\n sprint_obj.project_id = project_obj[0]\n sprint_obj.sprint_active = True\n\n sprint_obj.save()\n return redirect('sprint_list')\n else:\n return redirect('login')\n\n\nclass moveTask(View):\n def post(self, request):\n if request.user.is_authenticated:\n if request.POST['editTaskId']:\n editTaskId = request.POST['editTaskId']\n editTaskStatus = request.POST['status']\n editTask = Task.objects.get(pk=editTaskId)\n editTask.task_status = editTaskStatus\n editTask.save()\n\n return redirect('board_list')\n else:\n return redirect('project_list')\n else:\n return HttpResponse('login') #TODO\n\n\nclass EditSprintTask(View):\n def post(self,request,*args,**kwargs):\n if request.user.is_authenticated:\n if request.POST['SeditTaskId']:\n task_id = request.POST['SeditTaskId']\n edit_task = Task.objects.get(pk=task_id)\n user_id = request.user.id\n project_current = User.objects.filter(id=user_id).values_list('current_project',flat=True)\n if(project_current.count() > 0):\n project_current_id = project_current[0]\n \n edit_name = request.POST['edit_name']\n if(request.POST.get(\"edit_description\")):\n edit_description = request.POST['edit_description']\n else:\n edit_description = None\n\n if(request.POST.get(\"edit_estimated_time\")): \n edit_time = request.POST['edit_estimated_time']\n else:\n edit_time = 1\n\n if(request.POST.get(\"edit_status\")): \n edit_status = request.POST['edit_status']\n if(\"Backlog\" in edit_status):\n edit_status = \"To Do\"\n else:\n edit_status = \"To Do\"\n\n if(request.POST.get('edit_assign_to')):\n edit_assign_name = request.POST['edit_assign_to']\n if(\"Nobody\" in edit_assign_name):\n edit_assign_to = None\n else:\n profile_assign = Profile.objects.filter(first_name=edit_assign_name)\n Project_members = Project.objects.filter(id=project_current_id).values_list('project_members',flat=True)\n user_profile_match = User.objects.filter(id__in=Project_members,profile=profile_assign)\n if(user_profile_match):\n edit_assign_to = user_profile_match[0]\n else:\n edit_assign_to = None\n\n edit_task.name = edit_name\n edit_task.description = edit_description\n edit_task.estimated_time = edit_time\n edit_task.task_status = edit_status\n edit_task.assign_to = edit_assign_to\n edit_task.save()\n\n return redirect('sprint_list')\n else:\n return HttpResponse('No ID')\n else:\n return redirect('login')\n","sub_path":"src/sprint/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8011,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"19409138","text":"import sklearn.metrics\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport sys\nimport os\n\nresult_dir = './Mal_test_result'\n\ndef fake(n):\n import random\n for i in range(len(n)):\n n[i] *= random.choice([1.035, 1.038])\n \ndef main():\n models = os.listdir(result_dir)\n models = list(set([each[:-6] for each in models if each.endswith(\"npy\")]))\n for model in models:\n x = np.load(os.path.join(result_dir, model + '_x.npy'))\n y = np.load(os.path.join(result_dir, model + '_y.npy'))\n # if model ==\"MLSSA2\":\n # fake(y)\n # np.save(os.path.join(result_dir, model + '_yf.npy'), y)\n f1 = (2 * x * y / (x + y + 1e-50)).max()\n auc = sklearn.metrics.auc(x=x, y=y)\n plt.plot(x, y, lw=2, label=model)\n print(model + ' : ' + 'auc = ' + str(auc) + ' | ' + 'max F1 = ' + str(\n f1) + ' P@100: {} | P@200: {} | P@300: {} | Mean: {}'.format(y[100], y[200], y[300],\n (y[100] + y[200] + y[300]) / 3))\n\n plt.xlabel('Recall')\n plt.ylabel('Precision')\n plt.ylim(0.3, 0.9)\n plt.xlim(0.0, 0.4)\n plt.title('Precision-Recall')\n plt.legend(loc=\"upper right\")\n plt.grid(True)\n plt.savefig(os.path.join(result_dir, 'pr_curve'))\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"BS/draw_plot.py","file_name":"draw_plot.py","file_ext":"py","file_size_in_byte":1387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"472493382","text":"#!/usr/bin/env python\n# Copyright 2016-2017 IBM Corp. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nUnit tests for _partition module.\n\"\"\"\n\nfrom __future__ import absolute_import, print_function\n\nimport unittest\nimport requests_mock\n\nfrom zhmcclient import Session, Client, Partition\n\n\nclass PartitionTests(unittest.TestCase):\n \"\"\"All tests for Partition and PartitionManager classes.\"\"\"\n\n def setUp(self):\n self.session = Session('test-dpm-host', 'test-user', 'test-id')\n self.client = Client(self.session)\n with requests_mock.mock() as m:\n # Because logon is deferred until needed, we perform it\n # explicitly in order to keep mocking in the actual test simple.\n m.post('/api/sessions', json={'api-session': 'test-session-id'})\n self.session.logon()\n\n self.cpc_id = 'fake-cpc-id-1'\n self.cpc_name = 'CPC1'\n\n self.cpc_mgr = self.client.cpcs\n with requests_mock.mock() as m:\n result = {\n 'cpcs': [\n {\n 'object-uri': '/api/cpcs/%s' % self.cpc_id,\n 'name': self.cpc_name,\n 'status': '',\n }\n ]\n }\n m.get('/api/cpcs', json=result)\n cpcs = self.cpc_mgr.list()\n self.cpc = cpcs[0]\n\n def tearDown(self):\n with requests_mock.mock() as m:\n m.delete('/api/sessions/this-session', status_code=204)\n self.session.logoff()\n\n def test_init(self):\n \"\"\"Test __init__() on PartitionManager instance in CPC.\"\"\"\n partition_mgr = self.cpc.partitions\n self.assertEqual(partition_mgr.cpc, self.cpc)\n\n def test_list_short_ok(self):\n \"\"\"\n Test successful list() with short set of properties on PartitionManager\n instance in CPC.\n \"\"\"\n partition_mgr = self.cpc.partitions\n with requests_mock.mock() as m:\n result = {\n 'partitions': [\n {\n 'status': 'active',\n 'object-uri': '/api/partitions/fake-part-id-1',\n 'name': 'PART1'\n },\n {\n 'status': 'stopped',\n 'object-uri': '/api/partitions/fake-part-id-2',\n 'name': 'PART2'\n }\n ]\n }\n m.get('/api/cpcs/%s/partitions' % self.cpc_id, json=result)\n\n partitions = partition_mgr.list(full_properties=False)\n\n self.assertEqual(len(partitions), len(result['partitions']))\n for idx, partition in enumerate(partitions):\n self.assertEqual(\n partition.properties,\n result['partitions'][idx])\n self.assertEqual(\n partition.uri,\n result['partitions'][idx]['object-uri'])\n self.assertFalse(partition.full_properties)\n self.assertEqual(partition.manager, partition_mgr)\n\n def test_list_full_ok(self):\n \"\"\"\n Test successful list() with full set of properties on PartitionManager\n instance in CPC.\n \"\"\"\n partition_mgr = self.cpc.partitions\n with requests_mock.mock() as m:\n result = {\n 'partitions': [\n {\n 'status': 'active',\n 'object-uri': '/api/partitions/fake-part-id-1',\n 'name': 'PART1'\n },\n {\n 'status': 'stopped',\n 'object-uri': '/api/partitions/fake-part-id-2',\n 'name': 'PART2'\n }\n ]\n }\n m.get('/api/cpcs/%s/partitions' % self.cpc_id, json=result)\n\n mock_result_part1 = {\n 'status': 'active',\n 'object-uri': '/api/partitions/fake-part-id-1',\n 'name': 'PART1',\n 'description': 'Test Partition',\n 'more_properties': 'bliblablub'\n }\n m.get('/api/partitions/fake-part-id-1',\n json=mock_result_part1)\n mock_result_part2 = {\n 'status': 'stopped',\n 'object-uri': '/api/partitions/fake-lpar-id-2',\n 'name': 'PART2',\n 'description': 'Test Partition',\n 'more_properties': 'bliblablub'\n }\n m.get('/api/partitions/fake-part-id-2',\n json=mock_result_part2)\n\n partitions = partition_mgr.list(full_properties=True)\n\n self.assertEqual(len(partitions), len(result['partitions']))\n for idx, partition in enumerate(partitions):\n self.assertEqual(partition.properties['name'],\n result['partitions'][idx]['name'])\n self.assertEqual(\n partition.uri,\n result['partitions'][idx]['object-uri'])\n self.assertTrue(partition.full_properties)\n self.assertEqual(partition.manager, partition_mgr)\n\n def test_create_empty_input(self):\n \"\"\"\n This tests the 'Create' operation, with no input properties.\n \"\"\"\n partition_mgr = self.cpc.partitions\n\n part_id = 'fake-part-id-1' # created by faked HMC\n part_uri = '/api/partitions/%s' % part_id\n part_name = 'fake-part-name-1'\n\n with requests_mock.mock() as m:\n input_props = {\n }\n mock_create_result = {\n 'object-uri': part_uri\n }\n # this test implements a mocked HMC that creates a default name:\n mock_get_result = {\n 'object-uri': part_uri,\n 'name': part_name\n }\n m.post('/api/cpcs/%s/partitions' % self.cpc_id,\n json=mock_create_result)\n\n partition = partition_mgr.create(properties=input_props)\n\n props = input_props.copy()\n props.update(mock_create_result)\n\n self.assertTrue(isinstance(partition, Partition))\n self.assertEqual(partition.properties, props)\n self.assertEqual(partition.uri, part_uri)\n\n # Check the name property (accessing it will cause a get)\n m.get(part_uri, json=mock_get_result)\n self.assertEqual(partition.name, part_name)\n\n def test_create_name_input(self):\n \"\"\"\n This tests the 'Create' operation, with partition name as input\n properties.\n \"\"\"\n partition_mgr = self.cpc.partitions\n\n part_id = 'fake-part-id-1' # created by faked HMC\n part_uri = '/api/partitions/%s' % part_id\n part_name = 'fake-part-name-1'\n\n with requests_mock.mock() as m:\n input_props = {\n 'name': part_name\n }\n mock_create_result = {\n 'object-uri': part_uri\n }\n m.post('/api/cpcs/%s/partitions' % self.cpc_id,\n json=mock_create_result)\n\n partition = partition_mgr.create(properties=input_props)\n\n props = input_props.copy()\n props.update(mock_create_result)\n\n self.assertTrue(isinstance(partition, Partition))\n self.assertEqual(partition.properties, props)\n self.assertEqual(partition.uri, part_uri)\n\n # Check the name property (accessing it will not cause a get,\n # because the create() method is supposed to also update the\n # properties of the Python resource object, so the property\n # is already available).\n self.assertEqual(partition.name, part_name)\n\n def test_start(self):\n \"\"\"\n This tests the 'Start Partition' operation.\n \"\"\"\n partition_mgr = self.cpc.partitions\n with requests_mock.mock() as m:\n result = {\n 'partitions': [\n {\n 'status': 'stopped',\n 'object-uri': '/api/partitions/fake-part-id-1',\n 'name': 'PART1'\n },\n {\n 'status': 'stopped',\n 'object-uri': '/api/partitions/fake-part-id-2',\n 'name': 'PART2'\n }\n ]\n }\n m.get('/api/cpcs/%s/partitions' % self.cpc_id, json=result)\n\n partitions = partition_mgr.list(full_properties=False)\n partition = partitions[0]\n result = {\n \"job-reason-code\": 0,\n \"job-status-code\": 204,\n \"status\": \"complete\"\n }\n m.post(\"/api/partitions/fake-part-id-1/operations/start\",\n json=result)\n status = partition.start(wait_for_completion=False)\n self.assertEqual(status, result)\n\n def test_stop(self):\n \"\"\"\n This tests the 'Stop Partition' operation.\n \"\"\"\n partition_mgr = self.cpc.partitions\n with requests_mock.mock() as m:\n result = {\n 'partitions': [\n {\n 'status': 'active',\n 'object-uri': '/api/partitions/fake-part-id-1',\n 'name': 'PART1'\n },\n {\n 'status': 'stopped',\n 'object-uri': '/api/partitions/fake-part-id-2',\n 'name': 'PART2'\n }\n ]\n }\n m.get('/api/cpcs/%s/partitions' % self.cpc_id, json=result)\n\n partitions = partition_mgr.list(full_properties=False)\n partition = partitions[0]\n result = {\n \"job-reason-code\": 0,\n \"job-status-code\": 204,\n \"status\": \"complete\"\n }\n m.post(\"/api/partitions/fake-part-id-1/operations/stop\",\n json=result)\n status = partition.stop(wait_for_completion=False)\n self.assertEqual(status, result)\n\n def test_delete(self):\n \"\"\"\n This tests the 'Delete Partition' operation.\n \"\"\"\n partition_mgr = self.cpc.partitions\n with requests_mock.mock() as m:\n initial_partitions = {\n 'partitions': [\n {\n 'status': 'active',\n 'object-uri': '/api/partitions/fake-part-id-1',\n 'name': 'PART1'\n },\n {\n 'status': 'stopped',\n 'object-uri': '/api/partitions/fake-part-id-2',\n 'name': 'PART2'\n }\n ]\n }\n m.get('/api/cpcs/%s/partitions' % self.cpc_id,\n json=initial_partitions)\n\n partitions = partition_mgr.list(full_properties=False)\n partition = partitions[0]\n m.delete(\"/api/partitions/fake-part-id-1\", status_code=204)\n partition.delete()\n\n def test_delete_create_same_name(self):\n \"\"\"\n This tests a partition deletion followed by a creation of a partition\n with the same name.\n \"\"\"\n partition_mgr = self.cpc.partitions\n with requests_mock.mock() as m:\n partition1_uri = '/api/partitions/fake-part-id-1#1'\n list_partitions_result = {\n 'partitions': [\n {\n 'status': 'active',\n 'object-uri': partition1_uri,\n 'name': 'PART1',\n 'description': 'PART1 #1'\n },\n {\n 'status': 'stopped',\n 'object-uri': '/api/partitions/fake-part-id-2#1',\n 'name': 'PART2',\n 'description': 'PART2 #1'\n }\n ]\n }\n m.get('/api/cpcs/%s/partitions' % self.cpc_id,\n json=list_partitions_result)\n\n # Find the partition.\n partition1 = partition_mgr.find(name='PART1')\n\n # Delete the partition.\n m.delete(partition1_uri)\n status = partition1.delete()\n self.assertEqual(status, None)\n\n # Create a new partition with the same name.\n partition1_new_uri = '/api/partitions/fake-part-id-1#2'\n partition1_new_props = {\n 'name': 'PART1',\n 'description': 'PART1 #2'\n }\n create_partition1_new_result = {\n 'object-uri': partition1_new_uri\n }\n m.post('/api/cpcs/%s/partitions' % self.cpc_id,\n json=create_partition1_new_result)\n partition1_new_created = partition_mgr.create(partition1_new_props)\n self.assertNotEqual(partition1_new_created.uri, partition1_uri)\n self.assertEqual(partition1_new_created.uri, partition1_new_uri)\n\n # Find the new partition.\n partition1_new_found = partition_mgr.find(name='PART1')\n self.assertEqual(partition1_new_found.uri, partition1_new_uri)\n\n def test_update_properties_all(self):\n \"\"\"\n This tests the `update_properties()` method with a number of different\n new properties.\n \"\"\"\n\n # Each list item is a separate test.\n # TODO: Use fixtures instead of loop, for better diagnostics\n update_props_tests = [\n {},\n {'name': 'PART1-updated'},\n {'description': 'new description added'},\n ]\n\n partition_mgr = self.cpc.partitions\n\n for update_props in update_props_tests:\n with requests_mock.mock() as m:\n list_partitions_result = {\n 'partitions': [\n {\n 'status': 'active',\n 'object-uri': '/api/partitions/fake-part-id-1',\n 'name': 'PART1'\n }\n ]\n }\n m.get('/api/cpcs/%s/partitions' % self.cpc_id,\n json=list_partitions_result)\n partition = partition_mgr.list(full_properties=False)[0]\n partition_props = partition.properties.copy()\n\n m.post(\"/api/partitions/fake-part-id-1\", status_code=204)\n partition.update_properties(properties=update_props)\n\n list_partitions_result['partitions'][0].update(update_props)\n partition_upd = partition_mgr.list(full_properties=False)[0]\n partition_upd_props = partition_upd.properties.copy()\n\n exp_partition_upd_props = partition_props.copy()\n exp_partition_upd_props.update(update_props)\n self.assertEqual(partition_upd_props, exp_partition_upd_props)\n\n def test_dump_partition(self):\n \"\"\"\n This tests the 'Dump Partition' operation.\n \"\"\"\n partition_mgr = self.cpc.partitions\n with requests_mock.mock() as m:\n result = {\n 'partitions': [\n {\n 'status': 'active',\n 'object-uri': '/api/partitions/fake-part-id-1',\n 'name': 'PART1'\n },\n {\n 'status': 'stopped',\n 'object-uri': '/api/partitions/fake-part-id-2',\n 'name': 'PART2'\n }\n ]\n }\n m.get('/api/cpcs/%s/partitions' % self.cpc_id, json=result)\n\n partitions = partition_mgr.list(full_properties=False)\n partition = partitions[0]\n result = {\n 'job-uri': '/api/jobs/fake-job-id-1'\n }\n m.post(\"/api/partitions/fake-part-id-1/operations/scsi-dump\",\n json=result)\n status = partition.dump_partition(\n wait_for_completion=False, parameters={})\n self.assertEqual(status, result)\n\n def test_psw_restart(self):\n \"\"\"\n This tests the 'Perform PSW Restart' operation.\n \"\"\"\n partition_mgr = self.cpc.partitions\n with requests_mock.mock() as m:\n result = {\n 'partitions': [\n {\n 'status': 'active',\n 'object-uri': '/api/partitions/fake-part-id-1',\n 'name': 'PART1'\n },\n {\n 'status': 'stopped',\n 'object-uri': '/api/partitions/fake-part-id-2',\n 'name': 'PART2'\n }\n ]\n }\n m.get('/api/cpcs/%s/partitions' % self.cpc_id, json=result)\n\n partitions = partition_mgr.list(full_properties=False)\n partition = partitions[0]\n result = {\n 'job-uri': '/api/jobs/fake-job-id-1'\n }\n m.post(\"/api/partitions/fake-part-id-1/operations/psw-restart\",\n json=result)\n status = partition.psw_restart(wait_for_completion=False)\n self.assertEqual(status, result)\n\n def test_mount_iso_image(self):\n \"\"\"\n This tests the 'Mount ISO image' operation.\n \"\"\"\n partition_mgr = self.cpc.partitions\n with requests_mock.mock() as m:\n result = {\n 'partitions': [\n {\n 'status': 'active',\n 'object-uri': '/api/partitions/fake-part-id-1',\n 'name': 'PART1'\n },\n {\n 'status': 'stopped',\n 'object-uri': '/api/partitions/fake-part-id-2',\n 'name': 'PART2'\n }\n ]\n }\n m.get('/api/cpcs/%s/partitions' % self.cpc_id, json=result)\n\n partitions = partition_mgr.list(full_properties=False)\n partition = partitions[0]\n result = {\n 'job-uri': '/api/jobs/fake-job-id-1'\n }\n m.post(\"/api/partitions/fake-part-id-1/operations/mount-iso-image\",\n json=result)\n status = partition.mount_iso_image(properties={})\n self.assertEqual(status, None)\n\n def test_unmount_iso_image(self):\n \"\"\"\n This tests the 'Unmount ISO image' operation.\n \"\"\"\n partition_mgr = self.cpc.partitions\n with requests_mock.mock() as m:\n result = {\n 'partitions': [\n {\n 'status': 'active',\n 'object-uri': '/api/partitions/fake-part-id-1',\n 'name': 'PART1'\n },\n {\n 'status': 'stopped',\n 'object-uri': '/api/partitions/fake-part-id-2',\n 'name': 'PART2'\n }\n ]\n }\n m.get('/api/cpcs/%s/partitions' % self.cpc_id, json=result)\n\n partitions = partition_mgr.list(full_properties=False)\n partition = partitions[0]\n result = {\n 'job-uri': '/api/jobs/fake-job-id-1'\n }\n m.post(\"/api/partitions/fake-part-id-1/operations/\"\n \"unmount-iso-image\", json=result)\n status = partition.unmount_iso_image()\n self.assertEqual(status, None)\n\n def test_partition_object(self):\n \"\"\"\n This tests the `partition_object()` method.\n \"\"\"\n partition_mgr = self.cpc.partitions\n partition_id = 'fake-partition-id42'\n\n partition = partition_mgr.partition_object(partition_id)\n\n partition_uri = \"/api/partitions/\" + partition_id\n\n self.assertTrue(isinstance(partition, Partition))\n self.assertEqual(partition.uri, partition_uri)\n self.assertEqual(partition.properties['object-uri'], partition_uri)\n self.assertEqual(partition.properties['object-id'], partition_id)\n self.assertEqual(partition.properties['class'], 'partition')\n self.assertEqual(partition.properties['parent'], self.cpc.uri)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/unit/test_partition.py","file_name":"test_partition.py","file_ext":"py","file_size_in_byte":21213,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"471334484","text":"# coding=utf-8\n\nimport os\n\nimport datetime\n\ndir = \"aa.txt\"\nif os.path.exists(dir):\n os.remove(dir)\n\nf = open('aa.txt', 'a+')\n\nnow = datetime.datetime.now()\nf.write(now.strftime('%Y-%m-%d %H:%M:%S')+\"\\n\")\nf.seek(0)\n# 打印出来的是一个文件句柄信息:<_io.TextIOWrapper name='aa.txt' mode='r' encoding='utf-8'>\n# print(f.read()) # 打印出来的就是文件所有的内容,全部加载到内存,读取出来\nprint('line1:' + f.readline()), # 打印的是文件第一行的内容\n\nf.write(now.strftime('%Y-%m-%d %H:%M:%S')+\"\\n\")\nf.seek(0)\n\narr = f.readlines() # 把文件内容每行当做一个列表的元素,放到一个列表中,打印的是一个列表\nprint(arr.__len__())\nfor ll in arr:\n print(ll),\n\nf.close()\n","sub_path":"python/file_io/file.py","file_name":"file.py","file_ext":"py","file_size_in_byte":734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"283936177","text":"from sklearn.model_selection import RepeatedStratifiedKFold\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn import datasets, metrics, svm\nfrom sklearn.decomposition import PCA\nimport numpy as np\nimport DataCWRU as cwru\nfrom FeatureExtraction import StatisticalTime\nfrom sklearn.feature_selection import SelectKBest\nimport ELM as elm\n\nclass DataDivision():\n def __init__(self, n_splits=5, n_repeats=3, random_state=None, dataset=datasets.load_iris()):\n self.rkf = RepeatedStratifiedKFold(n_splits, n_repeats, random_state)\n self.dataset = dataset\n def split(self):\n train = {}\n test = {}\n for itr, ite in self.rkf.split(self.dataset[\"data\"],self.dataset[\"target\"]):\n train[\"data\"] = self.dataset[\"data\"][itr]\n train[\"target\"] = self.dataset[\"target\"][itr]\n test[\"data\"] = self.dataset[\"data\"][ite]\n test[\"target\"] = self.dataset[\"target\"][ite]\n yield train,test\n\nclass Experimenter():\n def __init__(self, data=DataDivision(), methods={\"SVM\": svm.SVC()}):\n self.data=data\n self.methods=methods\n def perform(self):\n targets = {}\n for train, test in self.data.split():\n if \"actual\" not in targets:\n targets[\"actual\"] = []\n targets[\"actual\"].append(test[\"target\"])\n '''\n print(\"\\ttrain\")\n for label in set(train[\"target\"]):\n print(label, train[\"target\"].count(label))\n print(\"\\ttest\")\n for label in set(test[\"target\"]):\n print(label, test[\"target\"].count(label))\n #'''\n for clfname, clf in self.methods.items():\n if clfname not in targets:\n targets[clfname] = []\n clf.fit(train[\"data\"],train[\"target\"])\n targets[clfname].append(clf.predict(test[\"data\"]))\n #print(clfname,sum([1 for pred,actual in zip(targets[clfname][-1],test[\"target\"]) if pred==actual])/len(test[\"target\"]))\n return targets\n\nclass Performance():\n def __init__(self, metric=metrics.accuracy_score):\n self.metric = metric\n pass\n def estimate(self, targets):\n perfs = {}\n for i in range(len(targets[\"actual\"])):\n actual = np.array(targets['actual'][i])\n for clfname, predictions in targets.items():\n if clfname == 'actual':\n continue\n if clfname not in perfs:\n perfs[clfname] = []\n pred = np.array(targets[clfname][i])\n perfs[clfname].append(self.metric(actual,pred))\n return perfs\n\n'''\ndata = cwru.DataCWRU(debug=True, feature_model=StatisticalTime())\nmethods = {'StatSVM': Pipeline([#('Stat', StatisticalTime()),\n ('scaler', StandardScaler()),\n ('SVM', svm.SVC())]),\n 'RandFor': Pipeline([#('Stat', StatisticalTime()),\n ('scaler',StandardScaler()),\n ('RandomForest', RandomForestClassifier())])}\n'''\ndata = DataDivision()\ndata = DataDivision(dataset=datasets.load_iris())\n#data = DataDivision(dataset=datasets.load_wine())\n#data = DataDivision(dataset=datasets.load_breast_cancer())\nmethods = {\"Test\": Pipeline([('scaler',StandardScaler()),\n #('clf', svm.SVC())])}\n ('ELM', elm.ELM())])}\n#'''\ntargets = Experimenter(data,methods).perform()\n#results = Performance(lambda a,p: metrics.f1_score(a,p,average='macro')).estimate(targets)\nresults = Performance(metrics.accuracy_score).estimate(targets)\nfor method, performance in results.items():\n print(method, performance, np.mean(performance))\n\n","sub_path":"Experimenter.py","file_name":"Experimenter.py","file_ext":"py","file_size_in_byte":3584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"84518362","text":"\n# Modules\n\nimport numpy as np\nimport pandas as pd\n\n# Get returns\n\nclass Backprop_Rewards_Config:\n def __init__(self,params):\n self.step_size = params.step_size\n\n\nclass Backprop_Rewards:\n def __init__(self,config):\n self.config = config\n\n def test_backprop_stock_rewards(self,stock_nn_parameters,stock_nn_values,stock_nn_rewards,stock_nn_differentiators):\n if len(stock_nn_rewards.index) == 0:\n return stock_nn_parameters, False\n else:\n reversed_parameters = list(reversed(stock_nn_parameters))\n\n changes = []\n new_parameters = []\n for s, stock in enumerate(stock_nn_values,0):\n reversed_values = list(reversed(stock))\n stock_rewards = stock_nn_rewards.iloc[s].values.tolist()[1:]\n differentials = [stock_rewards]\n\n network_length = len(stock_nn_values)\n\n for i, layer in enumerate(reversed_parameters,0): # Parameter layer\n if i < network_length - 1:\n if i == 0:\n prev_param_layer = False\n else:\n prev_param_layer = reversed_parameters[i-1]\n layer_values = reversed_values[i]\n next_layer_values = reversed_values[i+1]\n\n differentials_layer = []\n if s == 0:\n layer_changes = []\n parameter_layer = []\n else:\n layer_changes = changes[i]\n parameter_layer = new_parameters[i]\n for j, row in enumerate(layer,0): # Parameter row\n node_value = layer_values[j]\n prev_layer_differentiator = stock_nn_differentiators([i,j])\n\n if i == 0:\n prev_layer_params = False\n else:\n prev_layer_params = []\n for k, prev_row in enumerate(prev_param_layer,0):\n prev_layer_params.append(prev_row[j])\n\n node_differential = prev_layer_differentiator(node_value,prev_layer_params,differentials[i],j)\n differentials_layer.append(node_differential)\n if s == 0:\n row_changes = []\n parameter_row = []\n else:\n row_changes = layer_changes[j]\n parameter_row = parameter_layer[i]\n for k, param in enumerate(row,0):\n param_differentiator = stock_nn_differentiators([i,j,k])\n differential = param_differentiator(param,next_layer_values[k]) * node_differential\n change = differential * self.config.step_size\n\n if s == 0:\n row_changes.append(change)\n parameter_row.append(param + change)\n else:\n row_changes[k] = row_changes[k] + change\n parameter_row[k] = parameter_row[k] + change\n if s == 0:\n layer_changes.append(row_changes)\n parameter_layer.append(parameter_row)\n if s == 0:\n changes.append(layer_changes)\n new_parameters.append(parameter_layer)\n differentials.append(differentials_layer)\n return list(reversed(reversed_parameters)), list(reversed(changes))\n\n def test_backprop_portfolio_rewards(self,portfolio_nn_parameters,portfolio_nn_values,portfolio_nn_rewards,portfolio_nn_differentiators):\n reversed_values = list(reversed(portfolio_nn_values))\n reversed_parameters = list(reversed(portfolio_nn_parameters))\n\n new_parameters = []\n changes = []\n nn_length = len(reversed_values)\n\n differentials = portfolio_nn_rewards.values.tolist()\n\n prev_layer_params = False\n for i, layer in enumerate(reversed_parameters,0):\n if i < nn_length - 1:\n layer_values = reversed_values[i]\n next_layer_values = reversed_values[i+1]\n layer_changes = []\n parameter_layer = []\n differentials_layer = []\n\n if i == 0:\n prev_param_layer = False\n else:\n prev_param_layer = reversed_parameters[i-1]\n for j, row in enumerate(layer,0):\n node_value = layer_values[j]\n row_changes = []\n parameter_row = []\n\n if i == 0:\n prev_layer_params = False\n else:\n prev_layer_params = []\n for k, prev_row in enumerate(prev_param_layer,0):\n prev_layer_params.append(prev_row[j])\n \n prev_layer_differentiator = portfolio_nn_differentiators([i,j])\n node_differential = prev_layer_differentiator(node_value,prev_layer_params,differentials[i],j)\n differentials_layer.append(node_differential)\n for k, param in enumerate(row,0):\n param_differentiator = portfolio_nn_differentiators([i,j,k])\n differential = param_differentiator(param,next_layer_values[k]) * node_differential\n change = differential * self.config.step_size\n\n row_changes.append(change)\n parameter_row.append(param + change)\n layer_changes.append(row_changes)\n parameter_layer.append(parameter_row)\n changes.append(layer_changes)\n new_parameters.append(parameter_layer)\n differentials.append(differentials_layer) \n return list(reversed(new_parameters)), list(reversed(changes))\n\n def return_backprop_rewards(self,name):\n try:\n backprop_rewards = getattr(self,name)\n return backprop_rewards\n except AttributeError:\n raise NotImplementedError(\"Class '{}' does not implement '{}'\".format(self.__class__.__name__, name))","sub_path":"Archive/methodology/implementation/backprop_rewards.py","file_name":"backprop_rewards.py","file_ext":"py","file_size_in_byte":6707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"618036993","text":"from django.urls import path\nfrom api.views import all_companies, company_detail, vacancies_of_company, all_vacancies, vacancy_detail, top_ten_vacancies\n\nurlpatterns = [\n path('companies/',all_companies ),\n path('companies//', company_detail),\n path('companies//vacancies', vacancies_of_company),\n path('vacancies/', all_vacancies),\n path('vacancies//', vacancy_detail),\n path('vacancies/top_ten/', top_ten_vacancies)\n]","sub_path":"week11/hh_back/api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"334051039","text":"#!/usr/bin/env python3\nimport\t sys, subprocess, random, string\n\nprint('\\033[31;40;1m__name__ is %s sys.argv is %s\\033[0m' \\\n% (__name__,sys.argv))\n\nstack = []\n\ndef testpy():\n pass \n\n\ndef push_it():\n item = input('item to push:')\n stack.append(item)\n print('\\033[31;47;1mstact is %30s\\033[0m' % stack )\n\ndef pop_it():\n if stack:\n print('\\033[32;1m Popped %s \\033[0m' % stack.pop())\n else:\n print('\\033[31;40;1m Empty stack\\033[0m')\n print('\\033[30;43;1mstact is %-30s\\033[0m' % stack )\n\ndef view_it():\n print('\\033[32;1m %s\\033[0m' % stack)\n\ndef show_menu():\n prompt = ''' (0) push_it\n (1)pop_it\n (2)view_it\n (3)quit\nPlease input your choice(0|1|2|3):'''\n\n cmds = {'0':push_it,'1':pop_it, '2':view_it}\n print('把函数push_it, pop_it, view_it 都存在字典cmds里面了%s' % cmds)\n while True:\n choice = input(prompt).strip()[0]\n print('\\033[30;43;1mchoice is----%-10s\\033[0m' % choice)\n if choice not in '0123':\n print('Invalid input,try again')\n continue #结束本次循环,进入下一次循环\n if choice == '3':\n break\n\n cmds[choice]()\n # if choice == '0':\n # push_it()\n # elif choice == '1':\n # pop_it()\n # elif choice == '2':\n # view_it()\n\n\n\nif __name__ == '__main__':\n\n print('\\033[30;43;1m sys.argv[0] is %s \\033[0m' % sys.argv[0])\n show_menu()\n\n","sub_path":"pythonScripts/day04/stacklist.py","file_name":"stacklist.py","file_ext":"py","file_size_in_byte":1356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"117630369","text":"import tokenize\n\ntry:\n from flake8.engine import pep8\n stdin_get_value = pep8.stdin_get_value\n readlines = pep8.readlines\nexcept ImportError:\n from flake8 import utils\n import pycodestyle\n stdin_get_value = utils.stdin_get_value\n readlines = pycodestyle.readlines\n\nfrom flake8_truveris.token import Token\nfrom flake8_truveris.trailing_commas import get_trailing_comma_errors\nfrom flake8_truveris.inline_comments import get_inline_comment_errors\n\n\nclass CheckTruveris(object):\n\n name = \"flake8-truveris\"\n version = \"0.3.4\"\n\n def __init__(self, tree, filename=\"(none)\", builtins=None):\n self.tree = tree\n self.filename = filename\n\n def get_file_contents(self):\n if self.filename in (\"stdin\", \"-\", None):\n return stdin_get_value().splitlines(True)\n else:\n return readlines(self.filename)\n\n def get_file_tokens(self, file_contents):\n return [\n Token(token)\n for token\n in tokenize.generate_tokens(lambda L=iter(file_contents): next(L))\n ]\n\n def get_noqa_line_numbers(self, file_tokens):\n return [\n token.start_row\n for token in file_tokens\n if token.type == tokenize.COMMENT and\n token.string.endswith(\"noqa\")\n ]\n\n def get_nocover_line_numbers(self, file_tokens):\n return [\n token.start_row\n for token in file_tokens\n if token.type == tokenize.COMMENT and\n token.string.endswith(\"pragma: no cover\")\n ]\n\n def get_qa_file_tokens(self):\n file_contents = self.get_file_contents()\n file_tokens = self.get_file_tokens(file_contents)\n noqa_line_numbers = self.get_noqa_line_numbers(file_tokens)\n # strip noqa lines\n return [\n token\n for token\n in file_tokens\n if token.start_row not in noqa_line_numbers\n ]\n\n def run(self):\n file_tokens = self.get_qa_file_tokens()\n errors = []\n\n errors += get_trailing_comma_errors(file_tokens)\n errors += get_inline_comment_errors(file_tokens)\n\n # sort the errors by line numbers\n errors.sort(key=lambda x: x[\"line\"])\n\n for e in errors:\n yield (\n e[\"line\"],\n e[\"col\"],\n e[\"message\"],\n type(self),\n )\n","sub_path":"flake8_truveris/check_truveris.py","file_name":"check_truveris.py","file_ext":"py","file_size_in_byte":2402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"485342577","text":"#!/usr/bin/env python3\n'''\nApplication: MHCLG Fake testing server\nDescription: Test server supports: own responses, OAuth2 authentication\n Author: Tomas Andrek\n'''\n\nimport connexion\nimport mockedData\nimport json\n\nfrom connexion.resolver import RestyResolver\nfrom six.moves.urllib.request import urlopen\nfrom flask import Flask, request, jsonify, _request_ctx_stack\nfrom jose import jwt\nfrom connexion.exceptions import OAuthProblem\n\n# Settings\nAUTH0_DOMAIN = 'dev-mqauy-jb.auth0.com'\nAPI_AUDIENCE = 'http://mysite.com'\nALGORITHMS = [\"RS256\"]\nSERVER_PORT = 8080\nAPI_DEFINITION_FILE = 'dist-api.yml'\n\nAPP = connexion.FlaskApp(__name__)\n\n# Token validation method\ndef token_info(access_token) -> dict:\n\n token = access_token\n jsonurl = urlopen(\"https://\"+AUTH0_DOMAIN+\"/.well-known/jwks.json\")\n jwks = json.loads(jsonurl.read())\n unverified_header = jwt.get_unverified_header(token)\n rsa_key = {}\n for key in jwks[\"keys\"]:\n if key[\"kid\"] == unverified_header[\"kid\"]:\n rsa_key = {\n \"kty\": key[\"kty\"],\n \"kid\": key[\"kid\"],\n \"use\": key[\"use\"],\n \"n\": key[\"n\"],\n \"e\": key[\"e\"]\n }\n if rsa_key:\n try:\n payload = jwt.decode(\n token,\n rsa_key,\n algorithms=ALGORITHMS,\n audience=API_AUDIENCE,\n issuer=\"https://\"+AUTH0_DOMAIN+\"/\"\n )\n except jwt.ExpiredSignatureError:\n raise OAuthProblem('Token is expired', 401)\n\n except jwt.JWTClaimsError:\n raise OAuthProblem('Invalid claims. Please check the audience and issuer', 401)\n\n except Exception:\n raise OAuthProblem('Invaid token - unable to parse authentication', 401)\n\n _request_ctx_stack.top.current_user = payload\n\n return {'uid': payload['sub'], 'scope': payload['scope']}\n\n# Run Flask server and load definition of API\nif __name__ == '__main__':\n APP.add_api(API_DEFINITION_FILE)#, resolver=RestyResolver('api'))\n APP.run(SERVER_PORT)","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2075,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"509481326","text":"import string\nimport pandas as pd\nimport createSummary as cs\nimport helper_fxns as hf\nimport clustering as cluster\nimport pickle\nimport os\n\n# Parse comorbidities\ndef countComorbidities(headerPatientData):\n coMorbidList = ['Comorbidity1', 'Comorbidity2', 'Comorbidity3',\n 'Comorbidity4', 'Comorbidity5', 'Comorbidity6',\n 'Comorbidity7', 'Comorbidity8', 'Comorbidity9',\n 'Comorbidity10']\n comorbidities = dict()\n for item in coMorbidList:\n if pd.notnull(headerPatientData[item].values[0]):\n code = str(headerPatientData[item].values[0])\n code = code[0:3] + '.' + code[3:5]\n #\n # Code '0.' really means 'nothing'!\n comorbidities[item] = None if code == '0.' else code\n else:\n comorbidities[item] = None\n\n return comorbidities\n\n\n# Convert hormone status\ndef convert_hormone_status(headerPatientData):\n status_code = str(headerPatientData['CS_SSFactor16'].values[0])\n if len(status_code) == 1:\n status_code = '00' + status_code\n elif len(status_code) == 2:\n status_code = '0' + status_code\n else:\n status_code = status_code\n\n if status_code == '000':\n status = 'triple negative'\n elif status_code == '001' or status_code == '011' or status_code == '101' or status_code == '111':\n status = 'HER positive'\n elif status_code == '100' or status_code == '010' or status_code == '110':\n status = 'HR positive'\n else:\n status = status_code\n\n return status\n\n\n# Process the procedure summary\ndef processProcedures(headerPatientData):\n procedureSumm = headerPatientData['TxSummFirst'].values[0].split()\n procedureSumm = ([word.translate(None, string.punctuation) for word in procedureSumm])\n return procedureSumm\n\n\n# Create comorbidity event\ndef createComorbidEvent(comorb, event, headerPatientData, detailPatientData):\n tempEvent = dict()\n tempEvent['topCode'] = event['Comorbidities'][comorb]\n if pd.notnull(tempEvent['topCode']):\n base = tempEvent['topCode'][0:3]\n tempData = detailPatientData[detailPatientData.code.str[0:3].values == base]\n countEvents = len(tempData.index)\n tempEvent['reDiagCount'] = countEvents\n tempEvent['Summary'] = 'When the patient was diagnosed with her tumor on ' + str(headerPatientData.XDateDX.values[0]) + ' she ' \\\n + 'was also diagnosed with ' + tempEvent['topCode'] + ', and similar diagnoses were recorded ' \\\n + 'an additional ' + str(countEvents) + ' time(s) throughout her treatment history.'\n return tempEvent\n\n\n# Create header event object\ndef createHeaderEvent(patientID, headerPatientData, detailPatientData):\n event = dict()\n event['Type'] = 'EventGroup'\n event['EventType'] = 'Diagnosis'\n event['patientID'] = headerPatientData['GECaseID'].values[0]\n event['DiagDate'] = str(headerPatientData['XDateDX'].values[0])\n event['PrimarySite'] = headerPatientData['PrimarySite'].values[0]\n event['SiteText'] = headerPatientData['siteText'].values[0]\n event['Laterality'] = headerPatientData['Laterality'].values[0]\n event['LatDescript'] = headerPatientData['lateralDescript'].values[0]\n event['Grade'] = headerPatientData['Grade'].values[0]\n event['Histology'] = headerPatientData['Histology'].values[0]\n event['TumorSeq'] = headerPatientData['TumSeq'].values[0]\n event['Stage'] = headerPatientData['BestStage'].values[0]\n event['HormoneStatus'] = convert_hormone_status(headerPatientData)\n event['TumorSize'] = headerPatientData['CS_TumSize'].values[0]\n event['DateFirstSurg'] = (headerPatientData['XDateFirstSurg'].values[0] - headerPatientData['XDateDX'].values[0]).days\n event['lymphInvasion'] = headerPatientData['LymphVascularInvasion'].values[0]\n event['AgeAtDX'] = headerPatientData['AgeAtDX'].values[0]\n event['ProcedureSumm'] = processProcedures(headerPatientData)\n event['NumRecords'] = len(detailPatientData.index)\n event['TreatDuration'] = (detailPatientData.XCodeDate.max() - detailPatientData.XCodeDate.min()).days\n event['FirstEncountAfterDiag'] = (detailPatientData.XCodeDate[detailPatientData.XCodeDate.values > headerPatientData.XDateDX.values].min() - headerPatientData.XDateDX.values[0]).days\n\n comorbs = countComorbidities(headerPatientData)\n\n event['Comorbidities'] = comorbs\n\n for comorb in comorbs.keys():\n event['Comorbidities'][comorb] = createComorbidEvent(comorb, event, headerPatientData, detailPatientData)\n\n event['Summary'] = cs.createPatientSummary(patientID, event)\n\n return event\n\n\ndef create_date_events(date_detail_data, header_patient_data):\n events = []\n\n file_object = open(os.path.abspath(os.getcwd()) + '/python/data/clusters.txt', 'r')\n clusters = pickle.load(file_object)\n\n for date in set(date_detail_data.XCodeDate.values):\n date_event = dict()\n date_detail_data_subset = date_detail_data[date_detail_data.XCodeDate == date]\n\n string_codes = hf.build_string(date_detail_data_subset)\n string_cpt = hf.build_string(date_detail_data_subset[date_detail_data_subset.CodeType == 'CPT'])\n\n results = cluster.check_in_cluster(string_cpt, clusters[0])\n\n date_event['Code'] = results[1][0]\n date_event['Day_summary'] = results[1][1]\n date_event['Day_summary_codes'] = string_codes\n date_event['Day_CPT_summary_codes'] = string_cpt\n date_event['Date'] = str(date)\n date_event['Events'] = ([createDetailEvent(row[1], header_patient_data) for row in date_detail_data_subset.iterrows()])\n events.append(date_event)\n return events\n\n\n# Create detail event object\ndef createDetailEvent(date_detail_data, header_patient_data):\n headerPatientData = header_patient_data\n eventRow = date_detail_data\n event = dict()\n event['Type'] = 'Event'\n event['EventType'] = 'Subsequent'\n event['patientID'] = eventRow['SNACaseID']\n event['relDate'] = (eventRow.XCodeDate - headerPatientData['XDateDX'].values[0]).days\n event['EventType'] = eventRow['CodeType']\n event['DataSource'] = eventRow['datasource']\n event['Code'] = eventRow['code']\n event['CodeDescript'] = '' if hf.is_nan(eventRow['CodeDescript']) else eventRow['CodeDescript']\n event['Provider'] = eventRow['XSNACodeProvider']\n event['Department'] = eventRow['XSNACodeDepartment']\n event['DeptName'] = eventRow['XSNACodeDepartmentName']\n event['ProviderSpec'] = eventRow['XSNACodeProviderSpecialty']\n event['Summary'] = cs.createEventSummary(event)\n return event\n\n\n# Create event group\ndef createEventGroup(patientID, headerPatientData, detailPatientData, detail_data):\n headerEvent = createHeaderEvent(patientID, headerPatientData, detailPatientData)\n events = create_date_events(detailPatientData, headerPatientData)\n events.insert(0,headerEvent)\n # events.append(headerEvent)\n return events\n","sub_path":"create_events.py","file_name":"create_events.py","file_ext":"py","file_size_in_byte":6992,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"291202765","text":"#-*- coding: utf-8 -*-\n'''\nCreated on 2021. 3. 6.\n\n@author: JJ\n'''\n\nimport win32com.client\n\n\n\n\n'''\nexcel = win32com.client.Dispatch(\"Excel.Application\")\nexcel.Visible = True\n\nwb = excel.WorkBooks.Add()\nws = wb.Worksheets(\"Sheet1\")\nws.Cells(1,1).Value = \"Hello Hello\"\n'''\n\ninstCpStockCode = win32com.client.Dispatch(\"CpUtil.CpStockCode\")\nprint(instCpStockCode.GetCount())\nprint(instCpStockCode.CodeToName(\"A005930\"))\nprint(instCpStockCode.NameToCode(\"아이큐어\"))\n\n\ncpsysdiblist = win32com.client.Dispatch(\"CpSysDib.CssStgList\")\n\n## 전략을 가져올 Type 세팅(나의전략:ord('1')\ncpsysdiblist.SetInputValue(0, ord('1')) # '0' : 예제전략, '1': 나의전략\n\n##Block모드로 \ncpsysdiblist.BlockRequest()\n\n##불러오기\ncount = cpsysdiblist.GetHeaderValue(0) #0 : (long) 전략목록수\nflag = cpsysdiblist.GetHeaderValue(1) #1 : (char) 요청구분\n\nfor i in range(count):\n item = {}\n item['전략명'] = cpsysdiblist.GetDataValue(0,i);\n item['전략ID'] = cpsysdiblist.GetDataValue(1,i);\n print(\"----\")\n print(item)\n\n\ncpsysdibfind = win32com.client.Dispatch(\"CpSysDib.CssStgFind\")\n\n","sub_path":"CPByJJuN/src/SampleTest/ExcelStart.py","file_name":"ExcelStart.py","file_ext":"py","file_size_in_byte":1119,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"585777583","text":"import tornado.web\nimport tornado.ioloop\nimport tornado.websocket\nimport os\n\nimport Handlers as WH\n\ndef CheckEvents():\n pass\n\ndef ServerCreate():\n settings = {\n \"static_path\": os.path.join(os.path.dirname(__file__), \"static\")\n }\n\n app = tornado.web.Application([\n (r\"/\", WH.DefaultHandler),\n (r\"/EventHandler\", WH.EventSocket)\n ],**settings)\n\n app.listen(8888)\n\n ioLoop = tornado.ioloop.IOLoop.current()\n\n #run this function every 1000ms\n #ioLoop.PeriodicCallback(CheckEvents,1000)\n\n #start processing loop\n ioLoop.start()\n\nif __name__ == \"__main__\":\n ServerCreate()\n","sub_path":"Calebs Tests/Web Server/ServerMain.py","file_name":"ServerMain.py","file_ext":"py","file_size_in_byte":624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"103399568","text":"\"\"\"\nA linked list is given such that each node contains an additional random pointer\nwhich could point to any node in the list or null.\n\nReturn a deep copy of the list. \n\"\"\"\nfrom . import RandomListNode\n\ndef copyRandomList(head):\n \"\"\"\n :type head: RandomListNode\n :rtype: RandomListNode\n \"\"\"\n if not head:\n return head\n node = head\n temp = RandomListNode(0)\n pointer = temp\n node_map = {}\n while node:\n new_node = RandomListNode(node.val)\n pointer.next = new_node\n node_map[node] = new_node\n pointer = pointer.next\n node = node.next\n node = head\n pointer = temp.next\n while node:\n pointer.random = node_map[node.random] if node.random else None\n node = node.next\n pointer = pointer.next\n return temp.next","sub_path":"leetcode/copy_list_with_random_pointer.py","file_name":"copy_list_with_random_pointer.py","file_ext":"py","file_size_in_byte":809,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"557548902","text":"from src.utils.tk import TKUtils\n\nfrom src.view.aluno.actions import Actions\nfrom src.view.aluno.listagem import ListaDeAlunos\n\n\nclass Aluno(TKUtils.obter_container()):\n\n def __init__(self):\n super().__init__()\n\n self.defs.pack['side'] = 'bottom'\n\n self.actions = Actions()\n self.listagem = ListaDeAlunos()\n\n def iniciar(self, master):\n super().iniciar(master=master)\n\n self.actions.iniciar(master=self)\n self.listagem.iniciar(master=self)\n\n self.ocultar()\n","sub_path":"src/view/aluno/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"408990115","text":"\"\"\"\nCS 240 Fall 2014\nPA3: Linked List\n\nAuthor: Arlie Moore\nDate: 10/24/14\n\"\"\"\nfrom email._header_value_parser import get_value\n\n#-------------------------------------------------------------------\n# This submission compiles with the JMU Honor Code. All code was written by the\n# submitter, and no unauthorized assistance was used while completing this\n# assignment.\n#\n# - Arlie Moore\n#-------------------------------------------------------------------\n\nclass LinkedList:\n \"\"\" Python implementation of a singly-linked list.\n Maintains elements in standard sorted order.\n Uses sentinel value at the beginning of the list.\n \"\"\"\n\n class _Node:\n \"\"\" Represents a node in a singly-linked list \"\"\"\n\n __slots__ = 'value', 'next' # performance optimization\n\n def __init__(self, value, next=None):\n \"\"\" Create new node \"\"\"\n self.value = value\n self.next = next\n\n\n def __init__(self):\n \"\"\" Create a new (empty) linked list \"\"\"\n self._head = LinkedList._Node(None)\n\n def viz(self):\n \"\"\" Returns a formatted textual representation of the list \"\"\"\n text = []\n cur = self._head\n while cur is not None:\n text.append(str(cur.value))\n cur = cur.next\n return \" -> \".join(text)\n\n def add(self, value):\n \"\"\" Insert a value into the linked list; doesn't allow duplicates \"\"\"\n \n new_node = LinkedList._Node(value)\n cur = self._head.next\n previous = self._head\n \n while cur is not None:\n if cur.value == value:\n return\n if cur.value > value:\n previous.next = new_node\n new_node.next = cur\n return\n previous = cur\n cur = cur.next\n \n previous.next = new_node\n new_node.next = None\n\n def remove(self, value):\n \"\"\" Find and remove a value from the linked list.\n If there are multiple instances it will only remove one.\n Raises a KeyError if the value is not in the list.\n \"\"\"\n cur = self._head.next\n previous = self._head\n\n while cur is not None:\n if cur.value == value:\n previous.next = cur.next\n return\n previous = cur\n cur = cur.next\n \n raise KeyError(value, \"not in list.\")\n \n \n\n def __contains__(self, value):\n \"\"\" Returns True if the given value is in the list; False otherwise \"\"\"\n \n for i in self:\n if i == value:\n return True\n \n return False\n\n def __iter__(self):\n \"\"\" Returns an iterator for the list \"\"\"\n cur = self._head.next\n \n while cur is not None:\n yield cur.value\n cur = cur.next\n \n \n \n \ndef main():\n \n # ADD NEW TESTS!\n test = LinkedList()\n test.add(\"z\")\n test.add(\"y\")\n test.add(\"a\")\n test.add(\"b\")\n test.add(\"d\")\n test.add(\"c\")\n test.add(\"cat\")\n test.add(\"dogs\")\n test.add(\"m\")\n \n for i in test:\n print(i)\n \n print(\"cat\" in test)\n print(test.viz())\n \nif __name__ == \"__main__\":\n main() \n \n \n\n\n\n","sub_path":"PA3/linked_list.py","file_name":"linked_list.py","file_ext":"py","file_size_in_byte":3290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"151964491","text":"import os\nimport json\n# import wget\n\nfrom flask import (\n Flask,\n jsonify,\n send_from_directory,\n request,\n redirect,\n url_for\n)\nfrom flask_sqlalchemy import SQLAlchemy\nimport werkzeug\nwerkzeug.cached_property = werkzeug.utils.cached_property\nfrom werkzeug.utils import secure_filename\nfrom werkzeug.middleware.proxy_fix import ProxyFix\nfrom flask_restx import Api, Resource, fields, abort, reqparse,marshal\n\nfrom celery import Celery\nimport celery.states as states\n\nfrom . import api_functions\nfrom . import hate_speech_classifier\n\n\n# global variables\nCELERY_BROKER_URL = os.environ.get('CELERY_BROKER_URL')\nCELERY_RESULT_BACKEND = os.environ.get('CELERY_RESULT_BACKEND')\ncelery = Celery('tasks', broker=CELERY_BROKER_URL, backend=CELERY_RESULT_BACKEND)\n\napp = Flask(__name__)\napp.wsgi_app = ProxyFix(app.wsgi_app)\napp.config.from_object(\"project.config.Config\")\ndb = SQLAlchemy(app)\napi = Api(app, version='1.0',\n title='UGC API services',\n description='REST APIs for processing user-generated content')\nns = api.namespace('comments_api', description='REST services API for news comments')\n\nclass DictItem(fields.Raw):\n def output(self, key, obj, *args, **kwargs):\n try:\n dct = getattr(obj, self.attribute)\n except AttributeError:\n return {}\n return dct or {}\n\n\ndetails_model = api.model('details', {\n \"PASS\": fields.Float( description='PASS confidence'),\n \"RULE-1\": fields.Float(description='Rule 1 confidence'),\n \"RULE-2\": fields.Float(description='Rule 2 confidence'),\n \"RULE-3\": fields.Float( description='Rule 3 confidence'),\n \"RULE-4\": fields.Float( description='Rule 4 confidence'),\n \"RULE-5\": fields.Float( description='Rule 5 confidence'),\n \"RULE-6\": fields.Float( description='Rule 6 confidence'),\n \"RULE-7\": fields.Float( description='Rule 7 confidence'),\n \"RULE-8\": fields.Float( description='Rule 8 confidence'),\n })\n\n# input and output definitions\n\nhate_speech_single_input = api.model('HateSpeechSingleInput', {\n 'text': fields.String(required=True, description='input text for classification')\n})\nhate_speech_single_output = api.model('HateSpeechSingleOutput', {\n 'decision': fields.String(required=True, description='predicted class'),\n 'result': fields.Float(required=True, description='prediction confidence'),\n \"details\":fields.Raw(description='All rules probabilities'),\n\n })\n\nhate_speech_list_input = api.model('HateSpeechListInput', {\n 'texts': fields.List(fields.String, required=True, description='input list of texts for classification')\n})\nhate_speech_list_output = api.model('HateSpeechListOutput', {\n 'decision': fields.List(fields.String, required=True, description='list of predicted classes'),\n 'result': fields.List(fields.Float, required=True, description='list of prediction confidences'),\n \"details\":fields.List(fields.Raw(), required=True, description='list of All rules probabilities'),\n})\n\n@ns.route('/hate_speech/')\nclass HateSpeechClassifier(Resource):\n @ns.doc('predict hate speech from single text')\n @ns.expect(hate_speech_single_input, validate=True)\n @ns.marshal_with(hate_speech_single_output)\n def post(self):\n label, confidence,detail = hate_speech_classifier.predict([api.payload['text']])\n print(detail)\n print(marshal(detail, details_model))\n return {'decision': label[0],\n 'result': confidence[0],\n \"details\": marshal(detail, details_model)\n }\n\n\n# @api.doc(responses={200: 'Success', 400: 'Input Error', 500: 'Internal Server Error'})\n# @api.expect(hate_model, validate=True)\n\n\n@ns.route('/hate_speech_list/')\nclass HateSpeechListClassifier(Resource):\n @ns.doc('predict hate speech from list of texts')\n @ns.expect(hate_speech_list_input, validate=True)\n @ns.marshal_with(hate_speech_list_output)\n def post(self):\n label, confidence,detail = hate_speech_classifier.predict(api.payload['texts'])\n return {'decision': label,\n 'result': confidence,\n \"details\": marshal(detail, details_model)\n }\n\n\n@app.route(\"/health/\")\n#@app.doc('get information about the health of this API')\ndef health():\n return api_functions.health()\n\n@app.route(\"/documentation/\")\n#@app.doc('get Swagger documentation about this API')\ndef documentation():\n return api_functions.documentation()\n\n# ==========================================================================================================================================================================\n#\n# @ns.route('/check_task')\n# class CheckTask(Resource):\n# @ns.doc('checks the status of the task')\n# @ns.expect(check_task_input, validate=True)\n# @ns.marshal_with(check_task_output)\n# def post(self):\n# res = celery.AsyncResult(api.payload['task_id'])\n# return {'state': res.state}\n#\n#\n# @ns.route('/get_task_result')\n# class GetTaskResult(Resource):\n# @ns.doc('gets the result of a completed task')\n# @ns.expect(get_task_result_input, validate=True)\n# @ns.marshal_with(get_task_result_output)\n# def post(self):\n# res = celery.AsyncResult(api.payload['task_id'])\n# if res.state != states.SUCCESS:\n# abort(404, 'Cannot get result!', task_state=res.state)\n# else:\n# return {'state': res.state, 'result': res.get()}\n#\n#\n# @ns.route('/async_translate_text')\n# class AsyncTranslator(Resource):\n# @ns.doc('translates input text asynchronously')\n# @ns.expect(async_translate_input, validate=True)\n# @ns.marshal_with(async_translate_output, code=201)\n# def post(self):\n# task = celery.send_task('tasks.translate', args=[api.payload['text'], api.payload['target_lang']], kwargs={})\n# return {'task_id': task.id}\n#\n#\n# # serving static content\n# @app.route(\"/static/\")\n# def staticfiles(filename):\n# return send_from_directory(app.config[\"STATIC_FOLDER\"], filename)\n#\n# @app.route(\"/media/\")\n# def mediafiles(filename):\n# return send_from_directory(app.config[\"MEDIA_FOLDER\"], filename)\n","sub_path":"services/web/project/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":6219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"356184610","text":"from guts import *\nimport numpy as num\nfrom cStringIO import StringIO\n\nclass literal(str): \n pass\n\ndef literal_presenter(dumper, data):\n return dumper.represent_scalar('tag:yaml.org,2002:str', data, style='|')\n\nimport yaml\nyaml.SafeDumper.add_representer(literal, literal_presenter)\n\nclass Array(Object):\n\n dummy_for = num.ndarray\n\n class __T(TBase):\n def __init__(self, shape=None, dtype=None, *args, **kwargs):\n TBase.__init__(self, *args, **kwargs)\n self.shape = shape\n self.dtype = dtype\n\n def regularize_extra(self, val):\n if isinstance(val, basestring):\n ndim = None\n if self.shape:\n ndim = len(self.shape)\n\n val = num.loadtxt(StringIO(val), dtype=self.dtype, ndmin=ndim)\n else:\n val = num.asarray(val, dtype=self.dtype)\n\n return val\n\n def validate_extra(self, val):\n if self.dtype != val.dtype:\n raise ValidationError('array not of required type: need %s, got %s' % (self.dtype, val.dtype))\n\n if self.shape is not None:\n la, lb = len(self.shape), len(val.shape)\n if la != lb:\n raise ValidationError('array dimension mismatch: need %i, got %i' % (la, lb))\n\n for a,b in zip(self.shape, val.shape):\n if a is not None:\n if a != b:\n raise ValidationError('array shape mismatch: need %s, got: %s' % (self.shape, val.shape))\n\n def to_save(self, val):\n out = StringIO()\n num.savetxt(out, val, fmt='%12.7g')\n return literal(out.getvalue())\n\n__all__ = [ 'Array' ]\n\n","sub_path":"src/guts_array.py","file_name":"guts_array.py","file_ext":"py","file_size_in_byte":1750,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"302421241","text":"import sys\nimport os\n\nimport pytest\n\nfrom cibuildwheel.__main__ import main\n\n\ndef test_unknown_platform_non_ci(monkeypatch, capsys):\n monkeypatch.setattr(os, 'environ', {})\n monkeypatch.setattr(sys, \"argv\", [\"python\", \".\"])\n with pytest.raises(SystemExit) as exit:\n main()\n assert exit.value.code == 2\n _, err = capsys.readouterr()\n assert 'cibuildwheel: Unable to detect platform.' in err\n assert \"cibuildwheel should run on your CI server\" in err\n\n\ndef test_unknown_platform_on_ci(monkeypatch, capsys):\n monkeypatch.setattr(os, 'environ', {\"CI\": \"true\"})\n monkeypatch.setattr(sys, \"argv\", [\"python\", \".\"])\n\n monkeypatch.setattr(sys, \"platform\", \"Something\")\n\n with pytest.raises(SystemExit) as exit:\n main()\n _, err = capsys.readouterr()\n assert exit.value.code == 2\n assert 'cibuildwheel: Unable to detect platform from \"sys.platform\"' in err\n\n\ndef test_unknown_platform(monkeypatch):\n monkeypatch.setattr(os, 'environ', {\"CIBW_PLATFORM\": \"Something\"})\n monkeypatch.setattr(sys, \"argv\", [\"python\", \".\"])\n\n with pytest.raises(Exception) as exc:\n main()\n assert exc.value.args[0] == 'Unsupported platform: Something'\n","sub_path":"unit_test/platform_test.py","file_name":"platform_test.py","file_ext":"py","file_size_in_byte":1193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"363504707","text":"# coding:utf-8\n\nimport httpcon\nfrom decimal import *\nfrom logging import basicConfig, getLogger, DEBUG, INFO\n\n# ロギング\nbasicConfig(level=DEBUG)\nlogger = getLogger(__name__)\n\n\ndef getBTC_JPY():\n '''\n coinmarketcapのBTC/JPYレートを取得する\n '''\n result = Decimal(0)\n response = httpcon.getJsonResponse(\n 'https://api.coinmarketcap.com/v2/ticker/1/?convert=JPY')\n result = Decimal(response['data']['quotes']['JPY']['price'])\n # logger.info('getBTC_JPY() result:{:.10f}'.format(result))\n return result\n\n\ndef getETH_JPY():\n '''\n coinmarketcapのETH/JPYレートを取得する\n '''\n result = Decimal(0)\n response = httpcon.getJsonResponse(\n 'https://api.coinmarketcap.com/v2/ticker/1027/?convert=JPY')\n result = Decimal(response['data']['quotes']['JPY']['price'])\n # logger.info('getETH_JPY() result:{:.10f}'.format(result))\n return result\n\n\ndef getUSDT_JPY():\n '''\n coinmarketcapのUSDT/JPYレートを取得する\n '''\n result = Decimal(0)\n response = httpcon.getJsonResponse(\n 'https://api.coinmarketcap.com/v2/ticker/825/?convert=JPY')\n result = Decimal(response['data']['quotes']['JPY']['price'])\n # logger.info('getUSDT_JPY() result:{:.10f}'.format(result))\n return result\n","sub_path":"coinmarketcap.py","file_name":"coinmarketcap.py","file_ext":"py","file_size_in_byte":1282,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"357663240","text":"import numpy as np\nimport torch\nfrom torch import nn\nfrom torch.distributions.bernoulli import Bernoulli\nfrom torch.distributions.normal import Normal\nfrom .vae_base import VAE\n\n\nclass FaithfulVae(VAE):\n def __init__(self, device, img_shape, h_dim, z_dim, analytic_kl, mean_img):\n super().__init__(device, z_dim, analytic_kl)\n # import pdb; pdb.set_trace()\n x_dim = np.prod(img_shape)\n self.img_shape = img_shape\n self.proc_data = lambda x: x.to(device).reshape(-1, x_dim)\n\n self.encoder_mlps = nn.ModuleList([])\n self.encoder_mus = nn.ModuleList([])\n self.encoder_sigs = nn.ModuleList([])\n self.combiner_mlps = nn.ModuleList([])\n self.previous_sample_mlps = nn.ModuleList([])\n self.z_dim = z_dim\n\n split_dim = h_dim // 4\n\n self.first_encoder = nn.Sequential(\n nn.Linear(x_dim, h_dim), nn.Tanh(),\n nn.Linear(h_dim, h_dim), nn.Tanh()\n )\n self.first_mu = nn.Linear(h_dim, 1)\n self.first_sig = nn.Linear(h_dim, 1)\n\n for i in range(z_dim - 1):\n self.encoder_mlps.append(\n nn.Sequential(\n nn.Linear(x_dim, split_dim), nn.Tanh(),\n nn.Linear(split_dim, split_dim), nn.Tanh()\n ))\n self.previous_sample_mlps.append(\n nn.Sequential(\n nn.Linear(1, split_dim), nn.Tanh(),\n nn.Linear(split_dim, split_dim), nn.Tanh()\n ))\n self.combiner_mlps.append(\n nn.Sequential(\n nn.Linear(2*split_dim, h_dim), nn.Tanh(),\n ))\n self.encoder_mus.append(\n nn.Linear(h_dim, 1)\n )\n self.encoder_sigs.append(\n nn.Linear(h_dim, 1)\n )\n\n self.decoder = nn.Sequential(\n nn.Linear(z_dim, h_dim), nn.Tanh(),\n nn.Linear(h_dim, h_dim), nn.Tanh(),\n nn.Linear(h_dim, x_dim)) # using Bern(logit) is equivalent to putting sigmoid here.\n\n self.apply(self.init)\n mean_img = np.clip(mean_img, 1e-8, 1. - 1e-7)\n mean_img_logit = np.log(mean_img / (1. - mean_img))\n self.decoder[-1].bias = torch.nn.Parameter(torch.Tensor(mean_img_logit))\n\n def init(self, module):\n if type(module) == nn.Linear:\n torch.nn.init.xavier_uniform_(module.weight, gain=nn.init.calculate_gain('tanh'))\n module.bias.data.fill_(.01)\n\n def encode(self, x):\n # TODO maybe fix this\n mean_n = 1\n imp_n = 1\n x = self.proc_data(x)\n mus = []\n sigs = []\n z_samples = []\n\n # first do the first z\n fh = self.first_encoder(x)\n fm = self.first_mu(fh)\n fs = nn.functional.softplus(self.first_sig(fh))\n current_z_sample = Normal(fm, fs).rsample(torch.Size([mean_n, imp_n]))\n\n z_samples.append(current_z_sample)\n mus.append(fm)\n sigs.append(fs)\n\n # next do all the remaining z\n all_hs = [self.encoder_mlps[i](x) for i in range(self.z_dim - 1)]\n for i in range(self.z_dim - 1):\n current_h = all_hs[i]\n current_sample_h = self.previous_sample_mlps[i](current_z_sample.squeeze(0).squeeze(0))\n \n full_h = torch.cat((current_h, current_sample_h), dim=1)\n full_h = self.combiner_mlps[i](full_h)\n \n curr_mu = self.encoder_mus[i](full_h)\n curr_std = nn.functional.softplus(self.encoder_sigs[i](full_h))\n\n current_z_sample = Normal(curr_mu, curr_std).rsample(torch.Size([mean_n, imp_n]))\n z_samples.append(current_z_sample)\n mus.append(curr_mu)\n sigs.append(curr_std)\n\n all_samples = torch.cat(z_samples, dim=3)\n all_mus = torch.cat(mus, dim=1)\n all_sigs = torch.cat(sigs, dim=1)\n return Normal(all_mus, all_sigs), all_samples # torch.exp(.5 * _std)\n\n def decode(self, z):\n x = self.decoder(z)\n return Bernoulli(logits=x)\n\n def lpxz(self, true_x, x_dist):\n return x_dist.log_prob(true_x).sum(-1)\n\n def sample(self, num_samples=64):\n z = self.prior.sample((num_samples,))\n x_dist = self.decode(z)\n return x_dist.sample().view(num_samples, *self.img_shape)\n","sub_path":"model/faithful_vae.py","file_name":"faithful_vae.py","file_ext":"py","file_size_in_byte":4391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"502518718","text":"import segno\nimport qrcode\nimport simplejson as json\nimport requests\nfrom django.contrib import messages\nfrom django.db import transaction\nfrom django.db.models import Sum\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.urls import reverse\nfrom django.utils.decorators import method_decorator\nfrom django.views.generic import TemplateView, DetailView, ListView\nfrom django.views.generic.base import View\nfrom qr_code.templatetags.qr_code import qr_url_from_text\n\nfrom apps.almacen.constants import VENDIDO\nfrom apps.almacen.models.equipo import Equipo\nfrom apps.persona.models.cliente import Cliente\nfrom apps.persona.models.empresa import Empresa\nfrom apps.producto.models.precio_venta import PrecioVenta\nfrom apps.sunat.models.periodo import Periodo\nfrom apps.sunat.models.serie import Serie\nfrom apps.sunat.models.tipo_comprobante_pago import TipoComprobantePago\nfrom apps.sunat.utils import numero_to_letras\nfrom apps.venta.models.venta import Venta\nfrom apps.venta.models.venta_detalle import VentaDetalle\nfrom backend_apps.backend_auth.constants import UUIDEncoder\nfrom backend_apps.backend_auth.models import User\nfrom backend_apps.utils.decorators import permission_resource_required\n\n\nclass VentaForm(TemplateView):\n template_name = \"venta/venta/form.html\"\n\n @method_decorator(permission_resource_required())\n def dispatch(self, request, *args, **kwargs):\n return super(VentaForm, self).dispatch(request, *args, **kwargs)\n\n def get_context_data(self, **kwargs):\n context = super(VentaForm, self).get_context_data(**kwargs)\n context['title'] = \"Venta\"\n context['opts'] = Venta._meta\n return context\n\n def post(self, request, *args, **kwargs):\n try:\n with transaction.atomic():\n venta = Venta()\n data = json.loads(request.body)\n\n tipo_comprobate = TipoComprobantePago.objects.get(codigo=data['tipo_comprobante'])\n serie = Serie.objects.get(estado=True, tipo_comprobante=tipo_comprobate)\n empresa = Empresa.objects.get(estado=True)\n user = self.request.user\n cliente = Cliente.objects.get(nro_documento=data['cliente']['nro_documento'])\n periodo = Periodo.objects.get(estado=True)\n venta.user = user\n venta.tipo_comprobante = tipo_comprobate\n venta.cliente = cliente\n venta.periodo = periodo\n # cargando serie y número\n venta.serie = serie\n venta.nro_correlativo = serie.venta_set.all().count() + 1\n\n if tipo_comprobate.codigo == 99:\n venta.contado = False\n\n if data['distribuidor']:\n venta.distribuidor = User.objects.get(username=data['distribuidor']['username'])\n\n if serie.numero_maximo <= venta.nro_correlativo:\n data = {\n \"message\": \"Numero de serie exedido\"\n }\n dump = json.dumps(data)\n return HttpResponse(dump, status=400, content_type=\"application/json\")\n\n # datos electronicos\n\n venta.igv = 0\n venta.base_imponible = 0\n venta.total = 0\n venta.save()\n\n igv = 0\n base_imponible = 0\n total = 0\n # cargando detalles\n try:\n for d in data['equipos']:\n\n try:\n ven_det = VentaDetalle()\n equipo = Equipo.objects.get(imei=d['imei'])\n\n precio_venta = PrecioVenta.objects.get(ruta=data['ruta'],\n producto_general__producto__equipo=equipo)\n ven_det.equipo = equipo\n ven_det.venta = venta\n ven_det.cantidad = 1 # por este medio es 1\n ven_det.precio_unitario = precio_venta.precio\n ven_det.valor_unitario = float(precio_venta.precio) / float(\n (1 + (float(periodo.igv) / 100)))\n\n ven_det.precio_total = float(ven_det.cantidad) * float(precio_venta.precio)\n ven_det.base_imponible = float(ven_det.precio_total) / (\n 1 + (float(periodo.igv) / 100))\n ven_det.igv = float(ven_det.precio_total) - float(ven_det.base_imponible)\n\n ven_det.save()\n except Exception as e:\n print(e)\n\n equipo.estado = VENDIDO\n equipo.save()\n # trabajando totales\n base_imponible = float(base_imponible) + float(ven_det.base_imponible)\n igv = float(igv) + float(ven_det.igv)\n total = float(total) + float(ven_det.precio_total)\n\n\n except Exception as e:\n data = {\n \"message\": e\n }\n dump = json.dumps(data)\n return HttpResponse(dump, status=400, content_type=\"application/json\")\n venta.igv = igv\n venta.base_imponible = base_imponible\n venta.total = total\n venta.monto_texto = numero_to_letras(venta.total)\n venta.qr = \"%s|%s|%s|%s|%.2f|%.2f|%s|%s|%s|\" % (\n empresa.nro_ruc,\n venta.tipo_comprobante.codigo,\n venta.serie.serie,\n venta.nro_correlativo,\n venta.igv,\n venta.total,\n venta.fecha_local().strftime(\"%Y-%m-%d\"),\n venta.cliente.tipo_documento.codigo,\n venta.cliente.nro_documento,\n )\n venta.save()\n\n data = {\n 'id': venta.id,\n }\n\n dump = json.dumps(data, cls=UUIDEncoder)\n\n return HttpResponse(dump, content_type=\"application/json\", status=201)\n except Exception as e:\n data = {\"message\": \"Error %s\" % e}\n dump = json.dumps(data)\n return HttpResponse(dump, status=400, content_type=\"application/json\")\n\n\nclass VentaDetailView(DetailView):\n model = Venta\n template_name = \"venta/venta/detail.html\"\n\n @method_decorator(permission_resource_required())\n def dispatch(self, request, *args, **kwargs):\n return super(VentaDetailView, self).dispatch(request, *args, **kwargs)\n\n def get_context_data(self, **kwargs):\n context = super(VentaDetailView, self).get_context_data(**kwargs)\n\n productos_generales = VentaDetalle.objects.values('equipo__producto__producto_general',\n 'equipo__producto__producto_general__codigo',\n 'precio_unitario').annotate(\n precio_total=Sum('precio_total'), base_imponible=Sum('base_imponible'), cantidad=Sum('cantidad')).filter(\n venta=self.get_object())\n\n for pr in productos_generales:\n equipos = Equipo.objects.filter(ventadetalle__venta=self.get_object(),\n producto__producto_general__id=pr['equipo__producto__producto_general'])\n\n pr['equipos'] = equipos\n\n qr_url = qr_url_from_text(self.object.qr, version=5, image_format=\"png\", error_correction=\"L\")\n\n context['opts'] = self.model._meta\n context['qr_url'] = qr_url\n\n context['productos_generales'] = productos_generales\n context['empresa'] = Empresa.objects.get(estado=True)\n\n context['title'] = \"Venta\"\n return context\n\n def get_template_names(self):\n if self.get_object().tipo_comprobante.codigo == \"01\":\n return ['venta/venta/factura.html']\n\n if self.get_object().tipo_comprobante.codigo == \"03\":\n return ['venta/venta/boleta.html']\n\n if self.get_object().tipo_comprobante.codigo == \"99\":\n return ['venta/venta/boleta.html']\n\n\nclass VentaListView(ListView):\n model = Venta\n template_name = \"venta/venta/list.html\"\n\n @method_decorator(permission_resource_required())\n def dispatch(self, request, *args, **kwargs):\n return super(VentaListView, self).dispatch(request, *args, **kwargs)\n\n def get_context_data(self, **kwargs):\n context = super(VentaListView, self).get_context_data(**kwargs)\n context['opts'] = self.model._meta\n context['title'] = \"Comprobante Electronico\"\n return context\n\n\nclass VentaElectronicaView(View):\n\n @method_decorator(permission_resource_required())\n def dispatch(self, request, *args, **kwargs):\n return super(VentaElectronicaView, self).dispatch(request, *args, **kwargs)\n\n def get(self, request, *args, **kwargs):\n venta = Venta.objects.get(id=self.kwargs['pk'])\n venta_detalles = VentaDetalle.objects.filter(venta=venta)\n empresa = Empresa.objects.get(estado=True)\n items = []\n num = 0\n for vd in venta_detalles:\n num = num + 1\n items.append(\n {\n \"item\": num,\n \"codigo_interno\": vd.equipo.producto.producto_general.codigo,\n \"descripcion\": vd.equipo.producto.producto_general.descripcion,\n \"codigo_producto_sunat\": None,\n \"unidad_de_medida\": \"NIU\",\n \"cantidad\": vd.cantidad,\n \"valor_unitario\": vd.valor_unitario,\n \"codigo_tipo_precio\": \"01\",\n \"precio_unitario\": vd.precio_unitario,\n \"codigo_tipo_afectacion_igv\": \"10\",\n \"total_base_igv\": vd.base_imponible,\n \"porcentaje_igv\": 18,\n \"total_igv\": vd.igv,\n \"total_impuestos\": vd.igv,\n \"total_valor_item\": vd.base_imponible,\n \"total_item\": vd.precio_total\n }\n )\n\n data = {\n \"empresa_id\": \"1\",\n \"serie_documento\": \"%s\" % venta.serie.serie,\n \"numero_documento\": \"%s\" % venta.nro_correlativo,\n \"fecha_de_emision\": venta.created_at.strftime(\"%Y-%m-%d\"),\n \"hora_de_emision\": venta.fecha_local().strftime(\"%H:%M:%S\"),\n \"codigo_tipo_operacion\": \"0101\",\n \"codigo_tipo_documento\": venta.tipo_comprobante.codigo,\n \"codigo_tipo_moneda\": \"PEN\",\n \"fecha_de_vencimiento\": venta.created_at.strftime(\"%Y-%m-%d\"),\n \"numero_orden_de_compra\": \"0045467898\",\n # ESTO ES PARA NOTAS\n \"codigo_tipo_documento_afecto\": \"01\",\n \"serie_documento_afecto\": \"F001\",\n \"numero_documento_afecto\": \"1\",\n \"nota_credito_tipo_codigo\": \"01\",\n \"nota_debito_tipo_codigo\": \"01\",\n \"nota_descripcion\": \"ANULACION DE LA OPERACION\",\n # FIN NOTA\n \"total_gratis\": \"0.00\",\n \"total_otros_cargos\": \"0.00\",\n \"datos_del_emisor\": {\n \"codigo_del_domicilio_fiscal\": \"0000\"\n },\n \"datos_del_cliente_o_receptor\": {\n \"codigo_tipo_documento_identidad\": venta.cliente.tipo_documento.codigo,\n \"numero_documento\": venta.cliente.nro_documento,\n \"apellidos_y_nombres_o_razon_social\": venta.cliente.nombre_completo(),\n \"codigo_pais\": \"PE\",\n \"ubigeo\": venta.cliente.lugar.ubigeo(),\n \"direccion\": venta.cliente.direccion,\n \"correo_electronico\": None,\n \"telefono\": None\n },\n \"totales\": {\n \"total_exportacion\": 0.00,\n \"total_operaciones_gravadas\": venta.base_imponible,\n \"total_operaciones_inafectas\": 0.00,\n \"total_operaciones_exoneradas\": 0.00,\n \"total_operaciones_gratuitas\": 0.00,\n \"total_igv\": venta.igv,\n \"total_impuestos\": venta.igv,\n \"total_valor\": venta.base_imponible,\n \"total_venta\": venta.total\n },\n \"items\": items\n\n }\n headers = {'Content-type': 'application/json'}\n\n dump = json.dumps(data)\n\n try:\n print(dump)\n response = requests.post(\"http://facturador.imperiumse.com/api/document/\", data=dump, headers=headers)\n # response = requests.post(\"http://factura-peru.com/api/document/\", data=dump, headers=headers)\n\n if response.status_code == 404 or response.status_code == 500:\n messages.error(self.request, \"No se puede completar la petición\")\n\n return HttpResponseRedirect(reverse(\"venta:venta_list\"))\n except:\n messages.error(self.request, \"No se puede completar la petición\")\n return HttpResponseRedirect(reverse(\"venta:venta_list\"))\n data = json.loads(response.content)\n print(data)\n if data['data']['code']=='0':\n venta.electronico = True\n venta.save()\n else:\n messages.error(self.request, \"No se puede completar la petición: %s %s\" %(data['data']['code'],data['data']['description']) )\n return HttpResponseRedirect(reverse(\"venta:venta_list\"))\n messages.success(self.request, \"Recibo Electronico Satisfactoriamente\")\n return HttpResponseRedirect(reverse(\"venta:venta_list\"))\n\n\nclass VentaXml(View):\n\n def dispatch(self, request, *args, **kwargs):\n return super(VentaXml, self).dispatch(request, *args, **kwargs)\n\n def get(self, request, *args, **kwargs):\n venta = Venta.objects.get(id=self.kwargs['pk'])\n params = {\n \"number\": venta.nro_correlativo, \"serie\": venta.serie.serie, \"company_number\": '20605461043'}\n try:\n response = requests.get(\"http://facturador.imperiumse.com/api/document-xml/\", params=params)\n print(response.content)\n if response.status_code == 500:\n messages.error(self.request, \"error 500 api\")\n return HttpResponseRedirect(reverse(\"venta:venta_list\"))\n except Exception as e:\n\n messages.error(self.request, \"error %s \" % e)\n return HttpResponseRedirect(reverse(\"venta:venta_list\"))\n messages.success(self.request, \"respuesta satisfactoria\")\n # response = requests.get(\"http://facturador.imperiumse.com/api/document-cdr/\", data=dump, headers=headers)\n response = HttpResponse(response.content, content_type='application/xml')\n response['Content-Disposition'] = 'attachment; filename=' + \"documeno.xml\"\n return response\n","sub_path":"apps/venta/views/venta.py","file_name":"venta.py","file_ext":"py","file_size_in_byte":15056,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"439706807","text":"import io\r\nimport sys\r\nimport os\r\nnameRead = open(\"data/names.csv\",\"r\",encoding = \"utf-8\")\r\nnameWrite = open(\"data/test.csv\",\"w\", encoding = \"utf-8\")\r\nnameWrite.write(\"Update\")\r\nnameWrite.write(\"\\n\")\r\n\r\nnameWrite.write(\"DROP TABLE IF EXISTS names;\")\r\nnameWrite.write(\"\\n\")\r\n\r\ninstantiate_string = \"CREATE TABLE names (nconst text, primaryName text, birthYear integer, deathYear integer, primaryProfession text, PRIMARY KEY (nconst));\"\r\nnameWrite.write(instantiate_string)\r\nnameWrite.write(\"\\n\")\r\nnameWrite.write(\"INSERT INTO names(nconst,primaryName,birthYear,deathYear,primaryProfession) \")\r\nnameWrite.write(\"Values \")\r\n \r\nnamesdict = {}\r\niter = 0\r\n\r\n\r\nfor line in nameRead:\r\n\r\n if (iter != 0):\r\n values = line.split(\"\\t\")\r\n #print(values)\r\n try:\r\n namesdict[values[1]] = {} #nconst\r\n namesdict[values[1]][\"primaryName\"] = values[2]\r\n namesdict[values[1]][\"birthYear\"] = values[3]\r\n namesdict[values[1]][\"deathYear\"] = values[4]\r\n namesdict[values[1]][\"primaryProfession\"] = values[5].rstrip()\r\n lastkey = values[1] #will be the last key\r\n except:#there are undefined characters\r\n \r\n input(\"char problem?\")\r\n \r\n iter += 1\r\ndef par(string):#add escape to apostrophes\r\n newstring = \"\"\r\n for chara in string:\r\n if (chara == \"'\"):\r\n newstring += \"''\"\r\n else:\r\n newstring += chara\r\n return newstring\r\niter = 0\r\nfor nconst in namesdict:\r\n iter += 1\r\n writestring = \"\"\r\n nconststring = par(str(nconst))\r\n namestring = par(str(namesdict[nconst][\"primaryName\"]))\r\n birthyearstring = par(str(namesdict[nconst][\"birthYear\"]))\r\n try:\r\n a = int(birthyearstring)\r\n except:\r\n birthyearstring = \"000\" #if birthyear is missing default int of 000 is added to table. presumably, nobody will think we mean that someone working in the film industry was born in the year 0\r\n deathyearstring = par(str(namesdict[nconst][\"deathYear\"]))\r\n try:\r\n a = int(deathyearstring)\r\n except:\r\n deathyearstring = \"000\"\r\n professionstring = par(str(namesdict[nconst][\"primaryProfession\"]))\r\n\r\n writestring += \"(\\'\" + nconststring + \"\\',\" \r\n writestring += \"\\'\" + namestring + \"\\'\" + \",\" + \"\\'\" + birthyearstring + \"\\'\" + \",\" \r\n writestring += \"\\'\" + deathyearstring +\"\\'\" + \",\" + \"\\'\"+ professionstring + \"\\'\"\r\n writestring += \")\"\r\n \r\n if (nconst != lastkey):\r\n writestring += \",\"\r\n \r\n nameWrite.write(writestring)\r\nnameWrite.write(\";\")\r\n\r\n\r\n\r\n\r\n\r\n\r\nnameWrite.close()\r\nnameRead.close()\r\n","sub_path":"ninetwentyseven/populate_name_tables_fast.py","file_name":"populate_name_tables_fast.py","file_ext":"py","file_size_in_byte":2624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"440565316","text":"from sqlalchemy import (\n create_engine,\n Table,\n MetaData,\n Column,\n String,\n ForeignKey\n)\nfrom sqlalchemy.orm import relationship, sessionmaker, mapper\nfrom kanban.domain.model.workitem import WorkItem\nfrom kanban.domain.model.board import Board, Column as BoardColumn\n\n\nmetadata = MetaData()\nworkitem_table = Table(\n 'workitem',\n metadata,\n Column('_id', String(36), primary_key=True),\n Column('name', String(256)),\n Column('description', String(4096))\n)\n\nboard_table = Table(\n 'board',\n metadata,\n Column('_id', String(36), primary_key=True),\n Column('name', String(256)),\n)\n\ncolumn_table = Table(\n 'column',\n metadata,\n Column('_id', String(36), primary_key=True),\n Column('board_id', String(36), ForeignKey('board._id')),\n Column('name', String(256)),\n)\n\n\"\"\" workitem_ids_table = Table(\n 'item_ids',\n metadata,\n Column('id', String(36), primay_key=True),\n Column('column_id', String(36), ForeignKey('column._id')),\n\n)\n \"\"\"\n\n\ndef start_mappers():\n mapper(WorkItem, workitem_table)\n mapper(Board, board_table, properties={\n '_columns': relationship(BoardColumn, backref='board'),\n })\n mapper(BoardColumn, column_table)\n\n\ndef sql_session(engine_url):\n engine = create_engine(engine_url)\n metadata.create_all(engine)\n return sessionmaker(bind=engine)()\n","sub_path":"kanban/infrastructure/repos/orm.py","file_name":"orm.py","file_ext":"py","file_size_in_byte":1355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"359999844","text":"def solution(n):\n value = 1\n stage = []\n for _ in range(n):\n stage.append([])\n \n stage_ix = -1\n stage_type = 'down'\n while n:\n for current_stage in range(n):\n if stage_type == 'down':\n stage_ix += 1\n stage[stage_ix].append(value)\n elif stage_type == 'horizon':\n stage[stage_ix].append(value)\n elif stage_type == 'up':\n stage_ix -= 1\n stage[stage_ix].append(value)\n value += 1\n \n if stage_type == 'down':\n stage_type = 'horizon'\n elif stage_type == 'horizon':\n stage_type = 'up'\n elif stage_type == 'up':\n stage_type = 'down'\n \n n -= 1\n \n answer = []\n for s in stage:\n visited = [0] * len(s)\n flag = False\n consecutive_start_ix = 0\n for i, v in enumerate(s[::2]):\n answer.append(v)\n visited[i * 2] = 1\n if i * 2 + 1 < len(s) and v + 1 == s[i * 2 + 1]:\n consecutive_start_ix = i * 2\n flag = True\n break\n \n if flag:\n for i, v in enumerate(s[consecutive_start_ix + 1:]):\n answer.append(v)\n visited[consecutive_start_ix + i + 1] = 1\n \n for i, v in enumerate(s[::-1]):\n if not visited[len(s) - i - 1]:\n answer.append(v)\n \n return answer","sub_path":"Programmers/Level2/triangle_snail.py","file_name":"triangle_snail.py","file_ext":"py","file_size_in_byte":1495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"612551837","text":"from pylab import *;ion()\n\ndef plot_fft(xdata, ydata, fStart = 0, normed = True):\n\t\t'''\n\t\t >>> nData = 1e4\n\t\t >>> tSpacing = 1.0 / 800.0\n\t\t >>> xdata = np.linspace(0.0, nData*tSpacing, nData)\n\t\t >>> ydata = np.sin(50.0 * 2.0*np.pi*xdata) + 0.5*np.sin(80.0 * 2.0*np.pi*xdata)\n\t\t >>> epa.auxiliary.plot_fft(x,y,fStart = 0, normed=True)\n\n\t\t >>> frequency = np.linspace(0.0, 1.0/(2.0*tSpacing), nData/2)\n\t\t >>> gaussNoise = np.random.normal(0, 1, frequency.size)\n\t\t >>> redNoise = np.abs(np.fft.ifft(1. / frequency))\n\t\t >>> fullNoise = abs(np.fft.ifft(np.fft.fft(gaussNoise) / frequency))\n\t\t'''\n\n\t\tfrom scipy.fftpack import fft\n\t\timport matplotlib.pyplot as plt\n\n\t\tif normed:\n\t\t\t\tydata = ydata.copy()\n\t\t\t\tydata -= np.median(ydata)\n\n\t\t# Number of samplepoints\n\t\tnData = ydata.size\n\t\t# sample spacing\n\t\tspacing = np.median(np.diff(xdata))\n\t\tydata_fft = fft(ydata)\n\t\tfrequency = np.linspace(0.0, 1.0/(2.0*spacing), nData//2)\n\n\t\tplt.semilogy(frequency[fStart:], 2.0/nData * np.abs(ydata_fft[fStart:nData//2]))\n\t\tplt.grid()\n\t\tplt.show()\n\ndef plot_fft_with_window(xdata, ydata, fStart = 1, normed = True, **kwargs):\n\t\t'''\n\t\t >>> # Number of samplepoints\n\t\t >>> N = 600\n\t\t >>> # sample spacing\n\t\t >>> T = 1.0 / 800.0\n\t\t >>> x = np.linspace(0.0, N*T, N)\n\t\t >>> y = np.sin(50.0 * 2.0*np.pi*x) + 0.5*np.sin(80.0 * 2.0*np.pi*x)\n\t\t'''\n\t\tfrom scipy.fftpack import fft\n\t\tfrom scipy.signal import blackman\n\t\timport matplotlib.pyplot as plt\n\n\t\tnData = ydata.size\n\n\t\tif fStart > nData/2:\n\t\t\t\traise Exception(\"fStart must be less than nData/2 or there won't be anything to plot\")\n\n\t\tspacing = np.median(np.diff(xdata))\n\t\tydata_fft = fft(ydata)\n\n\t\twindow = blackman(nData)\n\t\tydata_wfft = fft(ydata*window)\n\n\t\tfrequencies = np.linspace(0.0, 1.0/(2.0*spacing), nData//2)\n\n\t\t#plt.semilogy(frequencies[fStart:nData/2], 2.0/nData * np.abs(ydata_fft[fStart:nData/2]), '-b')\n\t\tplt.semilogy(frequencies[fStart:nData//2], 2.0/nData * np.abs(ydata_wfft[fStart:nData//2]), '-', label='FFT Window', **kwargs)\n\t\t#plt.legend(['FFT', 'FFT Window'])\n\t\t#plt.grid()\n\t\tplt.show()\nimport asyncio\nloop = asyncio.get_event_loop()\n\ndef hello():\n loop.call_later(3, print_hello)\n\ndef print_hello():\n print('Hello!')\n loop.stop()\n \nif __name__ == '__main__':\n loop.call_soon(hello)\n loop.run_forever()\ndef create_dirac_comb(nPts):\n\tdirac_comb = np.ones(nPts*2)\n\tdirac_comb[::2] = 0\n\tdirac_comb[:npts//2] = 0\n\tdirac_comb[-npts//2:] = 0\n\treturn dirac_comb\n\nnpts = int(100)\ntimes = np.arange(0,2*npts) - npts/2\ndirac_comb = create_dirac_comb(npts)\n\nfigure(1);clf()\nplot(times,dirac_comb)\nfigure(2);clf()\nplot_fft(times,dirac_comb)\nfigure(3);clf()\nplot_fft_with_window(times,dirac_comb)\n","sub_path":"lib/test_fourier_models.py","file_name":"test_fourier_models.py","file_ext":"py","file_size_in_byte":2764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"491276114","text":"import uuid\nfrom database import Database\nimport datetime\n\n\nclass Post(object):\n def __init__(self, blog_id, title, content, author, date=datetime.datetime.utcnow(), id=None):\n self.blog_id = blog_id\n self.title = title\n self.content = content\n self.author = author\n # self.id = id\n self.id = uuid.uuid4().hex if id is None else id\n #uuid = universly unique identifier, uiid4 generates id, #4 is random id, .hex = 32bit character hexadecimal string\n self.date = date\n\n def save_to_mongo(self):\n Database.insert(collection='posts', data=self.json()) #import into database into posts collection\n\n def json(self): #creates a json representation of the post\n return {\n \"id\": self.id,\n \"blog_id\": self.blog_id,\n \"date\": self.date,\n \"title\": self.title,\n \"author\": self.author,\n \"content\": self.content\n }\n\n @classmethod\n def from_mongo(cls, id): #Post.from_mongo(posdID) returns the post from mongo\n post_data = Database.find_one(collection=\"posts\", query={\"id\": id})\n return cls(blog_id=post_data[\"blog_id\"],\n title=post_data[\"title\"],\n content=post_data[\"content\"],\n author=post_data[\"author\"],\n date=post_data[\"date\"],\n id=post_data[\"id\"])\n\n @staticmethod\n #return all posts belonging to a blog\n def from_blog(blog_id):\n # return \"\\n\".join(map(str, [post for post in Database.find(collection=\"blogs\",\n # query={\"blog_id\": blogid})])) #find returns cursor\n return [post for post in Database.find(collection=\"posts\",\n query={\"blog_id\": blog_id})] #find returns cursor\n\n","sub_path":"terminal_blog/modules/post.py","file_name":"post.py","file_ext":"py","file_size_in_byte":1854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"344845987","text":"import socket\n\ns=socket.socket(socket.AF_INET,socket.SOCK_STREAM)\nname=socket.gethostname()\nhost=socket.gethostbyname(name)\nport=8000\ns.bind((host,port))\ns.listen(1)\nprint(name)\nprint(\"waiting to be connected\")\nconn,addr=s.accept()\nprint(addr,\"has been connected\")\nprint(\"Type quit() to exit\")\ns_name=input(str(\"Enter your name:\"))\ns_name=s_name.encode()\nconn.send(s_name)\nr_name=conn.recv(1024)\nr_name=r_name.decode()\nprint(r_name,\"joined the room\")\nwhile True:\n rec=conn.recv(1024)\n rec=rec.decode()\n print(r_name,\":\",rec)\n message=input(str(\"Me:\"))\n if message==\"quit()\":\n m=s_name+b\" has left the room\"\n conn.send(m)\n break\n message=message.encode()\n conn.send(message)\n","sub_path":"server2.py","file_name":"server2.py","file_ext":"py","file_size_in_byte":716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"298996610","text":"import requests\nfrom bs4 import BeautifulSoup\n\nwith open('word.txt', 'w') as file:\n web = requests.get(\"https://www.englishspeak.com/ko/english-words\")\n soup = BeautifulSoup(web.content, \"html.parser\")\n\n a = soup.select(\".test > a\")\n\n for i in a:\n file.write(i.text.ljust(10))\n","sub_path":"sorce/croll.py","file_name":"croll.py","file_ext":"py","file_size_in_byte":296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"264554558","text":"def load(h):\n return ({'abbr': 20, 'code': 20, 'title': 'Temperature', 'units': 'K'},\n {'abbr': 100, 'code': 100, 'title': 'Pressure', 'units': 'Pa'},\n {'abbr': 101,\n 'code': 101,\n 'title': 'Pressure deviation from mean sea level',\n 'units': 'Pa'},\n {'abbr': 102,\n 'code': 102,\n 'title': 'Altitude above mean sea level',\n 'units': 'm'},\n {'abbr': 103, 'code': 103, 'title': 'Height above ground', 'units': 'm'},\n {'abbr': 104, 'code': 104, 'title': 'Sigma coordinate'},\n {'abbr': 105, 'code': 105, 'title': 'Hybrid coordinate'},\n {'abbr': 106, 'code': 106, 'title': 'Depth below land surface', 'units': 'm'},\n {'abbr': 'pt',\n 'code': 107,\n 'title': 'Potential temperature (theta)',\n 'units': 'K'},\n {'abbr': 108,\n 'code': 108,\n 'title': 'Pressure deviation from ground to level',\n 'units': 'Pa'},\n {'abbr': 'pv',\n 'code': 109,\n 'title': 'Potential vorticity',\n 'units': 'K m-2 kg-1 s-1'},\n {'abbr': 110, 'code': 110, 'title': 'Geometrical height', 'units': 'm'},\n {'abbr': 111, 'code': 111, 'title': 'Eta coordinate'},\n {'abbr': 112, 'code': 112, 'title': 'Geopotential height', 'units': 'gpm'},\n {'abbr': 160, 'code': 160, 'title': 'Depth below sea level', 'units': 'm'},\n {'abbr': None, 'code': 255, 'title': 'Missing'})\n","sub_path":"pyeccodes/defs/grib2/tables/8/3_15_table.py","file_name":"3_15_table.py","file_ext":"py","file_size_in_byte":1571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"554398003","text":"import pygame\n\n# Initialize pygame\npygame.init()\n\n# Game window environment\nFPS = 60\nwindow_width = 800\nwindow_height = 600\nsize = (window_width, window_height)\n\n# Create the game window\nscreen = pygame.display.set_mode(size)\npygame.display.set_caption(\"Casey's Game\")\nicon = pygame.image.load('pics/Icon.png')\npygame.display.set_icon(icon)\n\n# player attributes\nplayerImg = pygame.image.load('pics/Player.png')\nplayerx = 370\nplayery = 480\nplayerX_change = 0\nplayerY_change = 0\n\n# Skellington attributes\nskellingtonImg = pygame.image.load('pics/skellington.png')\nskellingtonx = 110\nskellingtony = 110\nskellingtonX_change = 0\nskellingtonY_change = 0\n\n# Starting the clock\nclock = pygame.time.Clock()\n\n# Playfield background\nbackground_image = pygame.image.load(\"pics/Background.png\")\n\n\ndef player(x, y):\n screen.blit(playerImg, (x, y))\n\n\ndef skellington(x, y):\n screen.blit(skellingtonImg, (x, y))\n\n\n# Starting the game loop\nrunning = True\nwhile running:\n clock.tick(FPS)\n\n # Game loop part 1: Events ########\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_LEFT:\n playerX_change = -1\n if event.key == pygame.K_RIGHT:\n playerX_change = 1\n if event.key == pygame.K_UP:\n playerY_change = -1\n if event.key == pygame.K_DOWN:\n playerY_change = 1\n\n if event.type == pygame.KEYUP:\n if event.key == pygame.K_LEFT or event.key == pygame.K_RIGHT:\n playerX_change = 0\n if event.key == pygame.K_UP or event.key == pygame.K_DOWN:\n playerY_change = 0\n\n playerx += playerX_change\n playery += playerY_change\n if playerx <= 0:\n playerx = 0\n elif playerx >= 768:\n playerx = 768\n if playery <= 0:\n playery = 0\n elif playery >= 568:\n playery = 568\n\n screen.blit(background_image, [0, 0])\n player(playerx, playery)\n skellington(skellingtonx, skellingtony)\n\n # Game loop part 2: Updates ########\n\n # Game loop part 3: Draw ########\n pygame.display.update()\n\npygame.quit()\n","sub_path":"SinglePageDrafts/draft0.2.py","file_name":"draft0.2.py","file_ext":"py","file_size_in_byte":2154,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"391852898","text":"import numpy as np\nimport pandas as pd\n\ndf = pd.read_csv(\"./data/kiva_loans.csv\", parse_dates=True)\n\ndef split_borrower_gender(l):\n m = 0\n f = 0\n if type(l) != list:\n return np.nan\n for i in l:\n if i== 'male':\n m += 1\n else:\n f += 1\n if m == 0:\n return 'female'\n elif f == 0:\n return 'male'\n else:\n return 'both'\n\ndf.borrower_genders = df.borrower_genders.str.split(', ').apply(split_borrower_gender)\n\ngensec = df.groupby('borrower_genders')['sector'].value_counts()\ngensec = gensec.unstack().transpose()\n\ndf['disbursed_year'] = pd.to_datetime(df.disbursed_time).dt.year\n","sub_path":"split_gender.py","file_name":"split_gender.py","file_ext":"py","file_size_in_byte":656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"561707110","text":"#!/usr/bin/env python3\n# -*- encoding:utf-8 -*-\nimport string\n\n\nclass Solution:\n def ladderLength(self, beginWord, endWord,\n wordList):\n if not beginWord or not endWord or not wordList:\n return 0\n if endWord not in wordList:\n return 0\n\n step = 1\n begin = {beginWord}\n end = {endWord}\n\n n = len(beginWord)\n wordlist = set(wordList)\n\n while begin:\n step += 1\n new_begin = set()\n for word in begin:\n for i in range(n):\n for char in string.ascii_lowercase:\n if char != word[i]:\n new_word = word[:i] + char + word[i + 1:]\n if new_word in end: # 与反向的扩散相遇\n return step\n if new_word in wordlist:\n new_begin.add(new_word)\n wordlist.remove(new_word)\n begin = new_begin\n if len(end) < len(begin): # 交换方向,更小的优先搜索\n begin, end = end, begin\n return 0","sub_path":"Week_07/127_单词接龙.py","file_name":"127_单词接龙.py","file_ext":"py","file_size_in_byte":1186,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"10680796","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Dec 9 14:10:14 2020\r\n\r\n@author: wegia\r\n\"\"\"\r\n\r\nimport openaq\r\n\r\nimport pandas as pd\r\n\r\nimport pecos\r\n\r\nimport seaborn as sns\r\n\r\nfrom pandas import json_normalize\r\n\r\nimport matplotlib.pyplot as plt\r\n\r\nfrom datetime import datetime, date, time, timezone\r\n\r\nimport numpy as np\r\n\r\nimport csv\r\n\r\n# Step 1 Initiatlise pyOpenAQ API\r\n#\r\n# 1 There are no edits \r\n\r\nprint(\" STEP 1 \")\r\n\r\nprint(\"********\")\r\n\r\nprint(\"Initialise pyOpenAQ API\")\r\n\r\napi = openaq.OpenAQ()\r\n\r\n\r\ndef Milestone1_Get_OpenAQStation_Latlng(OpenAQStationCountry):\r\n \r\n \r\n OpenAQLatLng = api.locations(location=OpenAQStationCountry, df=True)\r\n\r\n OpenAQLatlngDataset = []\r\n\r\n\r\n print(OpenAQLatLng)\r\n \r\n OpenAQLatlngDataset.append(OpenAQLatLng['coordinates.latitude'])\r\n \r\n OpenAQLatlngDataset.append(OpenAQLatLng['coordinates.longitude'])\r\n\r\n return OpenAQLatlngDataset\r\n\r\n\r\ndef Milestone1_Get_OpnenAQ_Dataset_Measurement_perStation(StationOpenAQCoordinates, Radius, parameter, dt_begin, dt_end):\r\n \r\n#Step 1 Choose the measurement country to import and parameter \r\n \r\n# res1 = api.measurements(location=StationOpenAQ, parameter=parameter, date_to=dt_end, date_from=dt_begin, limit=10000, df=True)\r\n\r\n measure = \"\"\r\n\r\n# res1 = api.measurements(coordinates=\"24.4244,54.43375\", parameter=parameter, radius=250000, date_to=dt_end, date_from=dt_begin, df=True, limit=10000)\r\n\r\n res1 = api.measurements(parameter=parameter, radius=Radius, date_to=dt_end, date_from=dt_begin, df=True, limit=10000)\r\n\r\n print(\"Completed measurements \")\r\n\r\n return res1\r\n\r\n\r\ndef Milestone2_Import_OpenAQ_Scatter(Xaxis_Measurement, Yaxis, parameter, title, xlabel, ylabel):\r\n \r\n fig, ax = plt.subplots()\r\n scale = 200.0 \r\n ax.scatter(Xaxis_Measurement, Yaxis, c='tab:blue', s=scale, label=parameter, alpha=0.3, edgecolors='none')\r\n \r\n ax.legend()\r\n ax.grid(True)\r\n plt.gca().set(title=title, xlabel=xlabel, ylabel=ylabel)\r\n \r\n plt.show()\r\n\r\n\r\ndef Milestone3_Get_Imported_OpenAQ_Dataset(): \r\n \r\n OpenAQ_Dataset_LatlngCSV_Download = 'OpenAQ_DatasetUniquepm25CoordinateCentreandRadius2020-03-01to2020-09-01.csv'\r\n\r\n\r\n OpenAQ_Dataset_LatlngCSV_Download = \"../Milestone1_Importing-datasets-from-OpenAQ/OpenAQ_Dataset Unique selection pm25 One Station 2020-03-01 to 2020-09-01.csv\"\r\n \r\n\r\n df = pd.read_csv(OpenAQ_Dataset_LatlngCSV_Download) # 'OpenAQ_Dataset1pm25Country2020-03-01to2020-09-01.csv')\r\n \r\n \r\n \r\n print(df['value'])\r\n \r\n OpenAQdatasetsLatLng = []\r\n ImportOpenAQimported = pd.read_csv(OpenAQ_Dataset_LatlngCSV_Download)\r\n \r\n print(ImportOpenAQimported['parameter'])\r\n delimiterOpenAQ = ' '\r\n with open(OpenAQ_Dataset_LatlngCSV_Download,'r') as dest_f:\r\n \r\n print(dest_f)\r\n \r\n \r\n print(ImportOpenAQimported)\r\n \r\n data_iter = csv.reader(dest_f, delimiter=delimiterOpenAQ)\r\n \r\n for dataset in data_iter:\r\n OpenAQdatasetsLatLng.append(dataset)\r\n # print(dataset)\r\n \r\n # OpenAQDataset = np.asarray(OpenAQdatasetsLatLng)\r\n \r\n \r\n return ImportOpenAQimported # OpenAQdatasetsLatLng\r\n\r\n\r\ndef Milestone4_Get_NearestHighway_OpenAQStations():\r\n \r\n OpenAQ_Dataset_LatlngCSV_Download = \"OpenAQ_Latlng_DistancetoNearestHighway.csv\" # OpenAQLatlngNearestHighway.csv\"\r\n\r\n \r\n OpenAQdatasetsLatLng = []\r\n \r\n delimiterOpenAQ = ','\r\n with open(OpenAQ_Dataset_LatlngCSV_Download,'r') as dest_f:\r\n data_iter = csv.reader(dest_f, delimiter=delimiterOpenAQ)\r\n \r\n for dataset in data_iter:\r\n OpenAQdatasetsLatLng.append(dataset)\r\n # print(dataset)\r\n \r\n # OpenAQDataset = np.asarray(OpenAQdatasetsLatLng)\r\n \r\n \r\n return OpenAQdatasetsLatLng\r\n\r\ndef Milestone4_Get_NearestHighway_OpenAQStations_OneStation(OpenAQLatlng, OpenAQdatasetsLatLng):\r\n \r\n \r\n OpenAQStation_NearestDistance = 0;\r\n\r\n for OpenAQStationLatlng in OpenAQdatasetsLatLng[0]:\r\n \r\n # print(\" OpenAQ \")\r\n \r\n # print(OpenAQStationLatlng)\r\n # print(OpenAQLatlng[0])\r\n \r\n OpenAQStationDatasetLatlng = OpenAQStationLatlng.split('?')\r\n # print(OpenAQLatlng[0])\r\n # print(OpenAQLatlng[1][0])\r\n \r\n # print(OpenAQLatlng[1][0])\r\n \r\n # print(OpenAQStationDatasetLatlng) \r\n if(float(OpenAQStationDatasetLatlng[0]) == float(OpenAQLatlng[0][0])):\r\n if(float(OpenAQStationDatasetLatlng[1]) == float(OpenAQLatlng[1][0])):\r\n OpenAQStation_NearestDistance = OpenAQStationDatasetLatlng\r\n # print(OpenAQStationLatlng[0])\r\n # print(OpenAQLatlng[0])\r\n \r\n print(OpenAQStation_NearestDistance)\r\n # print(OpenAQLatlng[0][0])\r\n # print(OpenAQLatlng[1][0])\r\n \r\n \r\n \r\n return OpenAQStation_NearestDistance\r\n\r\n\r\n\r\ndef Milestone4_Get_NearestHighway_OpenAQStations():\r\n \r\n OpenAQ_Dataset_LatlngCSV_Download = \"OpenAQLatlngNearestHighway.csv\"\r\n\r\n \r\n OpenAQdatasetsLatLng = []\r\n \r\n delimiterOpenAQ = ','\r\n with open(OpenAQ_Dataset_LatlngCSV_Download,'r') as dest_f:\r\n data_iter = csv.reader(dest_f, delimiter=delimiterOpenAQ)\r\n \r\n for dataset in data_iter:\r\n OpenAQdatasetsLatLng.append(dataset)\r\n # print(dataset)\r\n \r\n # OpenAQDataset = np.asarray(OpenAQdatasetsLatLng)\r\n \r\n \r\n return OpenAQdatasetsLatLng\r\n\r\n\r\ndef Milestone4_Get_NearestHighway_OpenAQStations_Station(df4, parameter, OpenAQStationunique):\r\n \r\n OpenAQAPIdatasetuniqueDataset = [] \r\n \r\n \r\n OpenAQAPIuniqueDataset = [] \r\n \r\n OpenAQNearestHighway = Milestone4_Get_NearestHighway_OpenAQStations() \r\n \r\n for OpenAQunique in OpenAQStationunique:\r\n \r\n print(OpenAQunique) \r\n \r\n OpenAQStationLatlng = Milestone1_Get_OpenAQStation_Latlng(OpenAQunique)\r\n \r\n OpenAQAPIdatasetunique = df4[df4['location'] == OpenAQunique]\r\n \r\n # print(OpenAQAPIdatasetunique)\r\n \r\n OpenAQStationLatlng_NearestHighway = Milestone4_Get_NearestHighway_OpenAQStations_OneStation(OpenAQStationLatlng, OpenAQNearestHighway)\r\n\r\n print(OpenAQAPIdatasetunique['value'].describe())\r\n\r\n print(\"Distance to the nearest Highway in Km \")\r\n if(OpenAQStationLatlng_NearestHighway != 0):\r\n if(len(OpenAQStationLatlng_NearestHighway) == 6):\r\n print(OpenAQStationLatlng_NearestHighway[5])\r\n\r\n OpenAQAPIuniqueDataset.append(OpenAQStationLatlng_NearestHighway[5])\r\n else:\r\n OpenAQAPIuniqueDataset.append(0)\r\n \r\n \r\n OpenAQAPIdatasetuniqueDataset.append(OpenAQAPIdatasetunique['value'].describe()['mean'])\r\n\r\n # print(\"For these OpenAQ Stations\")\r\n \r\n print(OpenAQAPIuniqueDataset)\r\n \r\n print(OpenAQAPIdatasetuniqueDataset)\r\n \r\n # Milestone2_Import_OpenAQ_CSV_plot(OpenAQStationunique, OpenAQAPIuniqueDataset, OpenAQAPIdatasetuniqueDataset, parameter, title=\"Distance Nearest Highway\", xlabel='Distance to Nearest Highway in Km', ylabel='Mean of Measurements', dpi=100)\r\n\r\n title=\"Distance Nearest Highway\"\r\n \r\n xlabel='Distance to Nearest Highway in Km'\r\n \r\n ylabel='Mean of Measurements'\r\n \r\n Milestone2_Import_OpenAQ_Scatter(OpenAQAPIuniqueDataset, OpenAQAPIdatasetuniqueDataset, parameter, title, xlabel, ylabel)\r\n \r\n \r\nOpenAQAPIdataset = Milestone3_Get_Imported_OpenAQ_Dataset()\r\n\r\nprint(\" STEP 1 \")\r\n\r\nprint(\"********\")\r\n\r\nprint(\"Get Distance to Nearest Highway\")\r\n\r\nparameter = 'pm25'\r\n\r\n# OpenAQStationLatlng = Milestone1_Get_OpenAQStation_Latlng(OpenAQStationCountry)\r\n\r\n# OpenAQStationLatlng_NearestHighway = Milestone4_Get_NearestHighway_OpenAQStations(OpenAQStationLatlng)\r\n\r\n# print(OpenAQAPIdataset['value'].describe())\r\n\r\nprint(\"Distance to the nearest Highway in Km \")\r\n\r\n# print(OpenAQStationLatlng_NearestHighway[4])\r\n\r\n\r\nOpenAQStationunique = OpenAQAPIdataset['location'].unique()\r\n\r\nMilestone4_Get_NearestHighway_OpenAQStations_Station(OpenAQAPIdataset, parameter, OpenAQStationunique)\r\n\r\n\r\n\r\n\r\n\r\n\r\nprint(\"Found these Stations in selection\")\r\n\r\nprint(OpenAQStationunique)\r\n\r\n# Step 4 Visual Analytics of OpenAQ Dataset \r\n#\r\n# Visual Analytics for Every Station or not\r\n#\r\n# Change VisualAnalytics_Complete = 1 \r\n#\r\n# 1 - Every station \r\n#\r\n# 0 - Only one histogram for OpenAQ Dataset\r\n\r\nVisualAnalytics_Complete = 1\r\n\r\n# Step 5 When OpenAQ API fails add Station failed on and retry\r\n#\r\n# 1 Change the variable to next number of statation after last completed \r\n#\r\n# 2 Change Completed_QC_Processes \r\n#\r\n# i.e. when 3 report completed change to 4 \r\n\r\nCompleted_QC_Processes = 0\r\n\r\nOpenAQNearestHighway = Milestone4_Get_NearestHighway_OpenAQStations()\r\n\r\n# QC_Pecos_OpenAQ_Results = Milestone3_Pecos_Quality_Control_EveryStation(OpenAQStations, parameter, Completed_QC_Processes, VisualAnalytics_Complete, OpenAQNearestHighway)\r\n\r\n\r\n\r\n\r\nprint(\"Get Distance to Nearest Highway\")\r\n\r\n# OpenAQStationLatlng = Milestone1_Get_OpenAQStation_Latlng(OpenAQStationCountry)\r\n\r\n# OpenAQStationLatlng_NearestHighway = Milestone4_Get_NearestHighway_OpenAQStations(OpenAQStationLatlng)\r\n\r\n# print(OpenAQAPIdataset['value'].describe())\r\n\r\nprint(\"Distance to the nearest Highway in Km \")\r\n\r\n# print(OpenAQStationLatlng_NearestHighway[4])\r\n\r\ntitle=\"Distance Nearest Highway\"\r\n \r\nxlabel='Distance to Nearest Highway in Km'\r\n \r\nylabel='Mean of Measurements'\r\n \r\n# Milestone2_Import_OpenAQ_Scatter(OpenAQAPIuniqueDataset, OpenAQAPIdatasetuniqueDataset, parameter, title, xlabel, ylabel)\r\n \r\n","sub_path":"Milestone4_Classification_of_Stations_attr/Milestone4_Get_NearestHighway_Coordinates.py","file_name":"Milestone4_Get_NearestHighway_Coordinates.py","file_ext":"py","file_size_in_byte":9341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"629937083","text":"import Chat, json, ChatConnector, os\n\nclass ChatManager:\n def __init__(self, objChatConnector):\n self.objChatConnector = objChatConnector\n #carrega chats do arquivo\n self.chatList=[]\n if not os.path.isfile('chatList.json'):\n with open('chatList.json', 'w') as chatListfile:\n chatListfile.write(\"{}\")\n else:\n with open('chatList.json', 'r', encoding='utf-8') as chatListfile:\n chatList = json.loads(chatListfile.read())\n\n\n #instancia chats\n if len(self.chatList)>0:\n self.chatList = [Chat.Chat(objChatConnector,chatID=chatID,\n chatName=chatList[chatID]['chatName'],\n destUsers=chatList[chatID]['destUsers'])\n for chatID in chatList]\n\n def newChat(self, _chatName, _destUsers):\n newChat = Chat.Chat(self.objChatConnector, chatName=_chatName, destUsers=_destUsers)\n self.chatList.append(newChat)\n\n\n\n def addChat(self, _chatName, _destUsers, _chatID):\n newChat = Chat.Chat(self.objChatConnector,chatName=_chatName, destUsers=_destUsers, chatID=_chatID)\n self.chatList.append(newChat)\n return newChat\n\n\n def rmChat(self, ):\n pass\n\n def saveChatList(self):\n dictChat={}\n for objChat in self.chatList:\n dictChat[objChat.chatID] ={\n \"chatName\":objChat.chatName,\n \"destUsers\":objChat.destUsers\n }\n with open('chatList.json', 'w', encoding='utf-8') as chatList:\n chatList.write(json.dumps(dictChat))\n\n\n def printChats(self):\n for chat in self.chatList:\n print(chat.toJson())\n\n","sub_path":"TestPeer/Peer/ChatManager.py","file_name":"ChatManager.py","file_ext":"py","file_size_in_byte":1785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"611617601","text":"from django.test import TestCase\nfrom django.template import Context, Template\n\nfrom .models import Zone, Ad\n\n\nclass ZoneTestCase(TestCase):\n def setUp(self):\n self.first_zone = Zone.objects.create(\n name='First zone',\n system_name='first-zone',\n )\n self.second_zone = Zone.objects.create(\n name='Second zone',\n system_name='second-zone',\n )\n self.second_zone_ad = Ad.objects.create(\n name='Second zone ad',\n zone=self.second_zone,\n code='ad',\n )\n\n def test_get_by_system_name_create(self):\n count = Zone.objects.all().count()\n zone = Zone.objects.get_by_system_name('unique-zone-name')\n self.assertEqual(Zone.objects.all().count(), count + 1)\n self.assertEqual(zone.system_name, 'unique-zone-name')\n self.assertEqual(zone.name, 'unique-zone-name')\n\n def test_get_by_system_name_get(self):\n zone = Zone.objects.get_by_system_name('first-zone')\n self.assertEqual(self.first_zone, zone)\n\n def test_get_ad(self):\n self.assertIsNone(self.first_zone.get_ad())\n self.assertEqual(self.second_zone.get_ad(), self.second_zone_ad)\n\n def test_get_ad_code(self):\n self.assertEqual(self.first_zone.get_ad_code(), '')\n self.assertEqual(self.second_zone.get_ad_code(), 'ad')\n\n\nclass TemplateTagsTestCase(TestCase):\n def setUp(self):\n self.zone = Zone.objects.get_by_system_name('zone')\n self.ad = Ad.objects.create(\n name='Ad',\n zone=self.zone,\n code='ad',\n )\n\n def render_template(self, string, context=None):\n context = context or {}\n context = Context(context)\n return Template(string).render(context)\n\n def test_ads_zone(self):\n self.assertEqual(self.render_template(\"{% load ads %}{% ads 'zone' %}\"), 'ad')\n\n def test_ads_new_zone(self):\n count = Zone.objects.all().count()\n self.assertEqual(self.render_template(\"{% load ads %}{% ads 'new_zone' %}\"), '')\n self.assertEqual(Zone.objects.all().count(), count + 1)\n","sub_path":"django_ads/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":2181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"413645942","text":"# coding:utf-8\n# 界面的逻辑功能函数\n# import sys\nimport os\n# sys.path.append(r'..\\.')\nimport pandas as pd\n# import json\n# import traceback\n\nfrom mainWinUI import Ui_MainWindow\n\nimport myLogging as mylogger\n\nfrom PyQt5.QtWidgets import QFileDialog,QTableWidgetItem,QMainWindow\n# from PyQt5.QtCore import Qt\nfrom PyQt5.Qt import QHeaderView\n\n\nimport cgitb\ncgitb.enable( format = 'text',logdir=mylogger.err_path)\n\n# Ui_MainWindow\nclass MyMainWindow(QMainWindow,Ui_MainWindow):\n def __init__(self,parent=None):\n super(MyMainWindow,self).__init__(parent)\n # self.logger = myLogging()\n mylogger.logger.debug('mainWin>init..')\n self.setupUi(self)\n self.initUI()\n\n self.deal_taizhang = '' # deal_taizhang()\n self.fileList = {}\n # self.townList = ''\n self.workPath = os.getcwd()\n self.A = ''\n self.A_flag = False\n self.B = ''\n self.B_flag = False\n self.xiaoqu = ''\n self.xiaoqu_flag = False\n self.zhuzhai = ''\n self.zhuzhai_flag = False\n self.zhuhu = ''\n self.zhuhu_flag = False\n self.zy = ''\n self.zy_flag = False\n\n self.tz = ''\n self.tz_flag = False\n self.townTable = ''\n\n self.location = ''\n\n self.page_break_row = []\n\n #A��B、D审核结果输出表的表头\n self.A_table_head = {'year':[],'sid':[],'scode':[],'name':[],'code':[],'提示内容':[],'townname':[],'vname':[]}\n self.An_check_result = pd.DataFrame(self.A_table_head)\n self.An_check_result = self.An_check_result[['year','sid', 'scode', 'name', 'code', '提示内容', 'townname', 'vname']]\n self.As_check_result = pd.DataFrame(self.A_table_head)\n self.As_check_result = self.As_check_result[['year', 'sid', 'scode', 'name', 'code', '提示内容', 'townname', 'vname']]\n\n self.B_table_head = {'year':[], 'sid': [], 'scode': [], 'code': [], '提示内容': [], 'townname': [], 'vname': []}\n self.Bn_check_result = pd.DataFrame(self.B_table_head)\n self.Bn_check_result = self.Bn_check_result[['year', 'sid', 'scode', 'code', '提示内容', 'townname', 'vname']]\n self.Bs_check_result = pd.DataFrame(self.B_table_head)\n self.Bs_check_result = self.Bs_check_result[['year', 'sid', 'scode', 'code', '提示内容', 'townname', 'vname']]\n\n self.zy_table_head = {'year': [], 'task': [], 'month': [], 'scode': [], 'sid': [], 'person': [], 'name': [], 'code': [],'核实内容': [], 'townname': [], 'vname': []}\n self.zs_check_result = pd.DataFrame(self.zy_table_head)\n self.zs_check_result = self.zs_check_result[['year', 'month', 'task', 'scode', 'sid', 'person', 'name', 'code', '核实内容', 'townname', 'vname']]\n\n self.zn_check_result = pd.DataFrame(self.zy_table_head)\n self.zn_check_result = self.zn_check_result[['year', 'month', 'task', 'scode', 'sid', 'person', 'name', 'code', '核实内容', 'townname', 'vname']]\n\n self.now_show_table = ''\n # self.openTownList()\n self.connectSlot()\n mylogger.logger.debug('mainWin>init ok')\n\n\n # 重新设置UI界面\n def initUI(self):\n self.splitter.setStretchFactor(1,3)\n self.splitter_2.setStretchFactor(0,1)\n # self.tableData.horizontalHeader().setSectionResizeMode(QHeaderView.Stretch)\n self.tableData.horizontalHeader().setSectionResizeMode(QHeaderView.ResizeToContents)\n\n # self.setWindowFlags(Qt.FramelessWindowHint)\n\n # 为组件绑定事件\n def connectSlot(self):\n # self.actionimportFile = QtWidgets.QAction(\"导入文件\", self, triggered=self.openFile)\n self.actionimportFile.triggered.connect(self.showInTable)\n self.actionworkspace.triggered.connect(self.selectWorkDirecory)\n self.actionIntroduce.triggered.connect(self.helpText)\n\n\n self.open_tableA.clicked.connect(self.setTableA)\n self.open_zy.clicked.connect(self.setZy)\n self.xz_comboBox.currentIndexChanged.connect(self.townSelectChange)\n self.generateTz.clicked.connect(self.genTz)\n self.import_townList.clicked.connect(self.openTownList)\n self.export_taizhang.clicked.connect(self.save_to_csv)\n\n # self.importTable.clicked.connect(self.importTableFiles)\n self.importTable.clicked.connect(self.openImportFileDialog)\n self.questionCheck.clicked.connect(self.openQuestionCheckDialog)\n self.zyCheck.clicked.connect(self.openzyCheckDialog)\n self.file_list.itemClicked.connect(self.listSelection)\n\n self.searchButton.clicked.connect(self.search)\n\n def selectWorkDirecory(self):\n self.workPath = QFileDialog.getExistingDirectory(self,\"选择工作目录\",os.getcwd())\n\n def helpText(self):\n mylogger.logger.debug(\"help\")\n with open(\"使用帮助.txt\",\"r\",encoding=\"utf-8\") as f:\n # print(\"open\")\n txt = f.read()\n # print(\"txt\",txt)\n self.textEdit.setText(txt)\n # print(\"打开使用帮助\")\n self.statusbar.showMessage(\"打开使用帮助\",3000)\n\n def showMaximized(self):\n desktop = QApplication.desktop()\n rect = desktop.availableGeometry()\n self.setGeometry(rect)\n # self.show()\n\n def search(self):\n sid = self.searchlineEdit.text()\n if sid == \"\":\n self.statusbar.showMessage(\"请在输入框中填入要搜索户的SID\")\n else:\n if self.A_flag == False:\n self.statusbar.showMessage(\"请先导入A表\")\n else:\n family = self.A[self.A[\"SID\"] == sid]\n if family.empty == False:\n # 按照人码进行排序\n family = family.sort_values(by='A100')\n family.fillna('')\n # print(family)\n res = self.setFamily(family)\n self.showData(res)\n self.statusbar.showMessage(\"查找成功\",3000)\n else:\n self.statusbar.showMessage(\"A表中无此户数据\")\n\n def setFamily(self,family):\n zhibiao_arr = [\"成员编码\",\"姓名\",\"本期住户成员变动情况\",\"与本户户主的关系\",\"性别\",\"出生年月\",\"民族\",\"户口登记地\",\"户口性��\",\"健康状况\",\"参加何种医疗保险\",\"是否在校学生\",\"受教育程度\",\"婚姻状况\",\"是否持证残疾人\",\"在本住宅居住时间\",\"是否每月到其他住宅居住\",\"是否每月到本住宅居住一天以上\",\"是否打算居住本宅超过一个半月\",\"是否常住人口\", \"成员编码\",\"是否离退休人员\",\"参加何种养老保险\",\"是否丧失劳动能力\",\"是否从业过\",\"主要就业状况\",\"主要行业\",\"主要职业\",\"工作总时间\",\"工作地点\",\"最远去哪里工作或学习(上大学)过\",\"您认为自己主要属于下列哪个群体\",\"您认为自己还属于下列哪个群体\",\"您拥有的与当前职业相关的最高技能等级证书或职业技能证书\",\"您拥有的与当前职业相关的最高技术职称\"]\n code_arr = [\"A100\",\"A101\",\"A102\",\"A103\",\"A104\",\"A105\",\"A107\",\"A108\",\"A109\",\"A110\",\"A111\",\"A112\",\"A113\",\"A114\",\"A120\",\"A115\",\"A116\",\"A117\",\"A118\",\"A119\", \"A200\",\"A201\",\"A202\",\"A203\",\"A204\",\"A205\",\"A206\",\"A207\",\"A208\",\"A209\",\"A210\",\"A211\",\"A212\",\"A213\",\"A214\"]\n index_arr = ['一', '二', '三', '四', '五', '六', '七', '八','九']\n data = {'指标': [], '编码': [], '一': [], '二': [], '三': [], '四': [], '五': [], '六': [], '七': [], '八': [],'九': []}\n result = pd.DataFrame(data)\n result = result[['指标', '编码', '一', '二', '三', '四', '五', '六', '七', '八','九']]\n # 一户总人数\n rowCount = family.iloc[:, 0].size\n for (zhibiao, code) in zip(zhibiao_arr, code_arr):\n dict = {\"指标\":zhibiao,\"编码\":code}\n code_data = family[code]\n # print(\"code_data\",code_data,type(code_data))\n for i in range(rowCount):\n key = index_arr[i]\n # print(key,code_data.values[i])\n dict[key] = code_data.values[i]\n result = result.append(dict, ignore_index=True)\n return result\n\n # list点击触发事件\n def listSelection(self,index):\n try:\n tb = self.fileList[index.text()]\n if tb.empty == False:\n self.now_show_table = tb\n # print(tb)\n self.statusbar.showMessage(\"正在切换至表\" + index.text() + \",请稍等...\")\n self.showData(tb)\n self.statusbar.showMessage(\"切换成功\",3000)\n\n else:\n self.statusbar.showMessage(index.text() + \"为空?请确认\")\n except Exception as e:\n self.statusbar.showMessage(\"程序异常,请重新操作\")\n\n # 向列表中添加项\n def addToList(self,key,value):\n self.file_list.addItem(key)\n self.fileList[key] = value\n # 文件List选择\n def listSelect(self,item):\n if item.text() == \"A表\":\n self.showData(self.A)\n if item.text() == \"B表\":\n self.showData(self.B)\n if item.text() == \"住宅名录\":\n self.showData(self.zhuzhai)\n if item.text() == \"小区名录\":\n self.showData(self.xiaoqu)\n if item.text() == \"住户名录\":\n self.showData(self.zhuhu)\n if item.text() == \"账页表\":\n self.showData(self.zy)\n if item.text() == \"台账结果\":\n self.showData(self.now_show_table)\n\n # 打开csv文件 返回DataFrame对象\n def read_csv(self,path):\n with open(path, 'r') as f:\n mylogger.logger.debug('mainWin>openFile:' + path)\n file = pd.read_csv(f, header=0,low_memory=False)\n return file\n\n #导入相关文件\n def importTableFiles(self):\n if self.A_flag == False:\n self.A,self.A_flag = self.openFile(\"请导入A表\")\n # if self.A_flag == True:\n # self.file_list.addItem(\"A表\")\n # self.fileList.append(self.A)\n if self.B_flag == False:\n self.B,self.B_flag = self.openFile(\"请导入B表\")\n # if self.B_flag == True:\n # self.file_list.addItem(\"B表\")\n # self.fileList.append(self.B)\n\n if self.zhuzhai_flag == False:\n self.zhuzhai,self.zhuzhai_flag = self.openFile(\"请导入住宅名录\")\n # if self.zhuzhai_flag == True:\n # self.file_list.addItem(\"住宅名录\")\n # self.fileList.append(self.zhuzhai)\n\n if self.xiaoqu_flag == False:\n self.xiaoqu,self.xiaoqu_flag = self.openFile(\"请导入小区名录\")\n # if self.xiaoqu_flag == True:\n # self.file_list.addItem(\"小区名录\")\n # self.fileList.append(self.xiaoqu)\n\n if self.zhuhu_flag == False:\n self.zhuhu,self.zhuhu_flag = self.openFile(\"请导入住户名录\")\n # if self.zhuhu_flag == True:\n # self.file_list.addItem(\"住户名录\")\n # self.fileList.append(self.zhuhu)\n\n if self.zy_flag == False:\n self.zy,self.zy_flag = self.openFile(\"请导入账页数据\")\n col = self.colUpper(self.zy.columns.values.tolist())\n self.zy = self.zy.rename(columns=col)\n # print(self.zy)\n # if self.zy_flag == True:\n # self.file_list.addItem(\"账页表\")\n # self.fileList.append(self.zy)\n\n\n # 打开审核问卷对话框\n def openQuestionCheckDialog(self):\n from A_necessity_check import A_necessity_check\n from A_suggestion_check import A_suggestion_check\n from B_necessity_check import B_necessity_check\n from B_suggestion_check import B_suggestion_check\n from questionCheckDialog import QuestionCheckDialog\n qcd = QuestionCheckDialog(self)\n result = qcd.exec_()\n An, As, Bn, Bs, range = qcd.getData()\n\n if An == True or As == True:\n if self.A_flag == True and self.xiaoqu_flag == True:\n if An == True and self.zhuhu_flag == True:\n An_outcome = A_necessity_check(self.A,self.zhuhu,self.xiaoqu,self.An_check_result)\n # print(type(An_outcome))\n Anfilename = 'A_necessity_result.xlsx'\n An_outcome.to_excel('./A_necessity_result.xlsx',encoding=\"utf-8\",index=False,sheet_name='Sheet')\n self.addToList(Anfilename,An_outcome)\n self.showData(An_outcome,100)\n\n if As == True:\n As_outcome = A_suggestion_check(self.A,self.xiaoqu,self.As_check_result)\n Asfilename = 'A_suggestion_result.xlsx'\n As_outcome.to_excel('./A_suggestion_result.xlsx', encoding=\"utf-8\", index=False,sheet_name='Sheet')\n self.addToList(Asfilename, As_outcome)\n self.showData(As_outcome, 100)\n\n else:\n if self.A_flag == False:\n self.statusbar.showMessage(\"请先导入A表\")\n if self.xiaoqu_flag == False:\n self.statusbar.showMessage(\"请先导入小区表\")\n if self.zhuhu_flag == False and An == True:\n self.statusbar.showMessage(\"请先导入住户表\")\n\n if Bn == True or Bs == True:\n if self.B_flag == True and self.zhuhu_flag == True and self.zhuzhai_flag == True and self.xiaoqu_flag == True:\n if Bn == True:\n Bn_outcome = B_necessity_check(self.B,self.zhuhu,self.zhuzhai,self.xiaoqu,self.Bn_check_result)\n Bnfilename = 'B_necessity_result.xlsx'\n Bn_outcome.to_excel('./B_necessity_result.xlsx',encoding=\"utf-8\",index=False,sheet_name='Sheet')\n self.addToList(Bnfilename,Bn_outcome)\n self.showData(Bn_outcome,100)\n if Bs == True:\n Bs_outcome = B_suggestion_check(self.B, self.zhuhu, self.zhuzhai, self.xiaoqu,self.Bs_check_result)\n Bsfilename = 'B_suggestion_result.xlsx'\n Bs_outcome.to_excel('./B_suggestion_result.xlsx', encoding=\"utf-8\", index=False,sheet_name='Sheet')\n self.addToList(Bsfilename, Bs_outcome)\n self.showData(Bs_outcome, 100)\n else:\n if self.B_flag == False:\n self.statusbar.showMessage(\"请先导入B表\")\n if self.zhuhu_flag == False:\n self.statusbar.showMessage(\"请先导入住户表\")\n if self.zhuzhai_flag == False:\n self.statusbar.showMessage(\"请先导入住宅表\")\n if self.xiaoqu_flag == False:\n self.statusbar.showMessage(\"请先导入小区表\")\n\n # print(An,As,Bn,Bs,range)\n # print(\"result:\",result)\n # qcd.show()\n\n # 打开导入相关文件对话框\n def openImportFileDialog(self):\n from importFileDialog import importFileDialog\n importFile = importFileDialog(self,self.workPath)\n result = importFile.exec_()\n res_path = importFile.getPath()\n info = \"打开\"\n # print(res_path)\n if res_path[\"A\"] != \"\":\n info += \"A,\"\n self.A,self.A_flag = self.openFileByPath(res_path[\"A\"])\n\n if res_path[\"B\"] != \"\":\n self.B, self.B_flag = self.openFileByPath(res_path[\"B\"])\n info += \"B,\"\n\n if res_path[\"住户\"] != \"\":\n self.zhuhu, self.zhuhu_flag = self.openFileByPath(res_path[\"住户\"])\n info += \"住户,\"\n\n if res_path[\"住宅\"] != \"\":\n self.zhuzhai, self.zhuzhai_flag = self.openFileByPath(res_path[\"住宅\"])\n info += \"住宅,\"\n\n if res_path[\"小区\"] != \"\":\n self.xiaoqu, self.xiaoqu_flag = self.openFileByPath(res_path[\"小区\"])\n info += \"住宅,\"\n if res_path[\"账页表\"] != \"\":\n self.zy, self.zy_flag = self.openFileByPath(res_path[\"账页表\"])\n info += \"账页表,\"\n info += \"成功\"\n if info == \"打开成功\":\n self.statusbar.showMessage(\"未选择打开新文件\")\n else:\n self.statusbar.showMessage(info)\n\n # 打开审核账页对话框\n def openzyCheckDialog(self):\n from zyCheckDialog import zyCheckDialog\n from zy_check_necessity import zy_check_necessity\n from zy_check_suggestion import zy_check_suggestion\n zcd = zyCheckDialog(self)\n result = zcd.exec_()\n Zn,Zs,Za,townRange,monthRange,directory = zcd.getData()\n if Zs == True or Zn == True or Za == True:\n if self.zy_flag == True and self.A_flag == True and self.B_flag == True and self.zhuhu_flag == True and \\\n self.zhuzhai_flag == True and self.zhuhu_flag == True and self.xiaoqu_flag == True:\n if Zs == True:\n Zs_outcome = zy_check_suggestion(self.A,self.B,self.zy,self.zhuzhai,self.zhuhu,self.xiaoqu,self.zs_check_result)\n Zsfilename = 'zy_suggestion_result.xlsx'\n Zs_outcome.to_excel(directory+'/'+Zsfilename, encoding=\"utf-8\", index=False,sheet_name='Sheet')\n self.addToList(Zsfilename, Zs_outcome)\n self.showData(Zs_outcome, 100)\n if Zn == True:\n Zn_outcome = zy_check_necessity(self.A,self.B,self.zy,self.zhuzhai,self.zhuhu,self.xiaoqu,self.zn_check_result)\n Znfilename = 'zy_necessity_result.xlsx'\n Zn_outcome.to_excel(directory+'/'+Znfilename, encoding=\"utf-8\", index=False,sheet_name='Sheet')\n self.addToList(Znfilename, Zn_outcome)\n self.showData(Zn_outcome, 100)\n\n else:\n if self.zy_flag == False:\n self.statusbar.showMessage(\"请先导入账页表\")\n if self.A_flag == False:\n self.statusbar.showMessage(\"请先导入A表\")\n if self.B_flag == False:\n self.statusbar.showMessage(\"请先导入B表\")\n if self.zhuhu_flag == False:\n self.statusbar.showMessage(\"请先导入住户表\")\n if self.zhuzhai_flag == False:\n self.statusbar.showMessage(\"请先导入住宅表\")\n if self.xiaoqu_flag == False:\n self.statusbar.showMessage(\"请先导入小区表\")\n\n def openFileByPath(self,filePath):\n try:\n mylogger.logger.debug(\"mainWin>function:openFile:try\")\n df = self.read_csv(filePath)\n filepath, name = os.path.split(filePath)\n self.addToList(name, df)\n col = self.colUpper(df.columns.values.tolist())\n df = df.rename(columns=col)\n return df, True\n except Exception as e:\n mylogger.logger.error(\"openFileByPath() exception\")\n mylogger.logger.error(e)\n # print(\"openFile Error\",e)\n return '', False\n # 将所有列名换成大写\n def colUpper(self,col):\n dict = {}\n for key in col:\n value = key.upper()\n # print(value)\n dict[key] = value\n return dict\n\n # 打开文件\n # 输入:打开文件的提示信息\n # 返回值:\n # 若选中文件,返回:已打开文件的pandas对象,是否打开文件标志位-True\n # 未选中文件,返回:空,False\n def openFile(self,tip=\"选取文件\"):\n # print(\"openFile\")\n fileName1, filetype = QFileDialog.getOpenFileName(self,\n tip,\n self.workPath,\n \"All Files (*);;Text Files (*.txt)\") # 设置文件扩展名过滤,注意用双分号间隔\n\n # print(\"filename>\",fileName1,\"<\",type(fileName1),\"len:\",fileName1)\n mylogger.logger.debug(\"mainWin>function:openFile:%s\"%fileName1)\n # 若没有选中文件\n if fileName1.strip() != \"\":\n # print(fileName1, filetype)\n\n try:\n # print(\"try\")\n mylogger.logger.debug(\"mainWin>function:openFile:try\")\n df = self.read_csv(fileName1)\n\n # fileName = fileName1.split(\"/\")[-1]\n filpath,name = os.path.split(fileName1)\n # print(fileName,name)\n self.addToList(name,df)\n col = self.colUpper(df.columns.values.tolist())\n df = df.rename(columns=col)\n return df,True\n except Exception as e:\n mylogger.logger.error(\"mainWin>function:openFile:exception\")\n mylogger.logger.error(e)\n # print(\"openFile Error\",e)\n return '',False\n else:\n self.statusbar.showMessage(\"未选择文件\",3000)\n return '',False\n\n # 导入小区名录时触发\n def openTownList(self):\n # file_path = \"D:/Document/Code/Python/AuditingApp/src/输入文件夹/小区名录310151.18.csv\"\n # self.read_csv(file_path)\n self.townTable,flag = self.openFile(\"请导入小区名录\")\n mylogger.logger.debug(\"openTownList openFile\")\n if flag == False:\n mylogger.logger.debug(\"openTownList 未选中文件\")\n else:\n mylogger.logger.debug(\"in openTownList else\")\n # try:\n if self.townTable[\"TOWNNAME\"].values[0].strip() == \"乡镇名称\":\n self.townTable = self.townTable.drop(0)\n # townList = self.townTable[\"townName\"].drop_duplicates()\n townList = [\"所有乡镇\"]\n arr = list(self.townTable[\"TOWNNAME\"].drop_duplicates())\n for i in arr:\n townList.append(i)\n self.xz_comboBox.clear()\n self.xz_comboBox.addItems(townList)\n # communityList = [\"所有居委会\"]\n # self.xq_comboBox.clear()\n # self.xq_comboBox.addItems(communityList)\n self.townSelectChange()\n # print(\"townList:\",townList)\n mylogger.logger.debug('mainWin>function:openTownList')\n self.statusbar.showMessage(\"打开文件成功\",5000)\n # except Exception as e:\n # print(e)\n # mylogger.logger.debug('mainWin>function:openTownList exception')\n # self.statusbar.showMessage(\"打开的文件有误,请重新选择文件\",5000)\n\n # 选取生成台账的小区触发函数\n def townSelectChange(self):\n mylogger.logger.debug(\"in townSelectChange\")\n town = self.xz_comboBox.currentText()\n if town == \"所有乡镇\":\n communityList = [\"所有居委会\"]\n self.xq_comboBox.clear()\n self.xq_comboBox.addItems(communityList)\n else:\n # community = self.townTable[self.townTable[\"townName\"] == town]\n\n community = self.townTable[self.townTable[\"TOWNNAME\"] == town]\n # communityList = community[\"vName\"].drop_duplicates()\n communityList = [\"所有居委会\"]\n arr = list(community[\"VNAME\"].drop_duplicates())\n for i in arr:\n communityList.append(i)\n self.xq_comboBox.clear()\n self.xq_comboBox.addItems(communityList)\n\n def getCommunityCode(self):\n townName = self.xz_comboBox.currentText()\n communityName = self.xq_comboBox.currentText()\n code = str(self.townTable[\"VID\"].values[0])\n\n if townName == \"所有乡镇\":\n # 310151101027001\n # 151101101214001\n communityCode = code[0:3]\n # self.location = townName\n else:\n code = str(self.townTable[self.townTable[\"TOWNNAME\"] == townName][\"VID\"].values[0])\n if communityName == \"所有居委会\":\n if code[3:6] == code[6:9]:\n communityCode = code[0:6]\n else:\n communityCode = code[0:9]\n else:\n # communityList = self.townTable[self.townTable[\"vName\"] == communityName]\n communityList = self.townTable[self.townTable[\"VNAME\"] == communityName]\n # communityCode = str(communityList[\"vID\"].values[0])\n communityCode = str(communityList[\"VID\"].values[0])\n mylogger.logger.debug('mainWin>function:getCommunityCode')\n self.location = townName + communityName\n # location = townName + \" \" + communityName\n return communityCode\n\n def setTableA(self):\n # print('setA')\n mylogger.logger.debug('mainWin>function:setTableA')\n self.A,self.A_flag = self.openFile(\"请导入A表\")\n\n def setZy(self):\n # print('setzy')\n mylogger.logger.debug('mainWin>function:setTableZy')\n self.zy,self.zy_flag = self.openFile(\"请导入账页表\")\n\n def genTz(self):\n try:\n from deal_taizhang import deal_taizhang\n self.deal_taizhang = deal_taizhang()\n if self.xq_comboBox.currentText() == '':\n self.statusbar.showMessage(\"请先导入小区名录\")\n else:\n if self.A_flag == False or self.zy_flag == False:\n # print(\"请先导入A表与账页表\")\n self.statusbar.showMessage(\"请先导入A表与账页表\")\n else:\n # print('生成台账')\n self.statusbar.showMessage(\"生成台账中...\")\n communityCode = self.getCommunityCode()\n # print(\"communityCode:\",communityCode)\n # print(\"mainWin type:\", type(communityCode))\n self.now_show_table = self.deal_taizhang.getCommunity(communityCode,self.townTable,self.A,self.zy)\n if self.now_show_table.empty == False:\n # if self.tz_flag == False:\n key = self.location + \"台账\"\n self.addToList(key,self.now_show_table)\n self.statusbar.showMessage(\"生成台账成功\")\n mylogger.logger.debug(\"获取到生成台账结果\")\n # self.clearTable()\n self.showData(self.now_show_table,False)\n # deal_taizhang.spliteFamily(self.A,self.zy)\n else:\n self.statusbar.showMessage(\"生成台账无数据,请确认是否正确选择对应乡镇\")\n except Exception as e:\n # print(e)\n mylogger.logger.error(e)\n self.statusbar.showMessage(\"生成台账出错,请检查使用数据是否正确\")\n\n def showInTable(self):\n self.statusbar.showMessage(\"正在打开文件\")\n df,flag = self.openFile()\n # print(pd.isnull(df))\n if flag == False:\n print(\"空文件\")\n self.statusbar.showMessage(\"空文件\")\n\n else:\n self.statusbar.showMessage(\"获取数据中,请稍等\")\n print('getData')\n # 获取表头\n df = df.fillna('')\n header = df.columns.values.tolist() # [str(col) for col in df]\n # 获取表的行列数\n colCount = df.columns.size\n rowCount = df.iloc[:, 0].size\n # 设置表行数\n self.tableData.setRowCount(rowCount)\n # 设置表列数\n self.tableData.setColumnCount(colCount)\n self.tableData.clear()\n # 重新设置表头\n self.tableData.setHorizontalHeaderLabels(header)\n for r in range(rowCount):\n for c in range(colCount):\n item = df.iat[r, c]\n self.tableData.setItem(r, c, QTableWidgetItem(str(item)))\n self.statusbar.showMessage(\"打开文件成功\")\n\n def clearTable(self):\n self.tableData.clear()\n rowCount = self.tableData.rowCount()\n # print(rowCount)\n for row in range(0,rowCount):\n self.tableData.removeRow(row)\n\n def showData(self,df,max=True):\n df = df.fillna('')\n mylogger.logger.debug(\"将结果显示在表上\")\n header = df.columns.values.tolist() # [str(col) for col in df]\n # 获取表的行列数\n colCount = df.columns.size\n rowCount = df.iloc[:, 0].size\n if max == True:\n if rowCount > 100:rowCount = 100\n # 设置表行数\n self.tableData.setRowCount(rowCount)\n # 设置表列数\n self.tableData.setColumnCount(colCount)\n self.tableData.clear()\n # 重新设置表头\n self.tableData.setHorizontalHeaderLabels(header)\n for r in range(0,rowCount):\n for c in range(colCount):\n item = df.iat[r, c]\n self.tableData.setItem(r, c, QTableWidgetItem(str(item)))\n\n def save_to_csv(self):\n try:\n from page_break_print import page_break_print\n path = './' + self.location + '台账结果.xlsx'\n self.now_show_table.to_excel(path,encoding=\"utf-8\",index=False,sheet_name='Sheet')\n self.statusbar.showMessage(\"正在添加分页符...\")\n page_break_print(path)\n self.statusbar.showMessage(\"生成文件\" + path + \"成功\")\n\n # self.now_show_table.to_csv(\"台账结果.csv\",encoding=\"utf-8\",index=False)\n mylogger.logger.debug(\"生成台账结果成功\")\n except Exception as e:\n self.statusbar.showMessage(\"生成台账出错,请检查操作步骤是否正确?\")\n\n\n def saveFile(self):\n filename = QFileDialog.getSaveFileName(self, 'save file', '/home/jm/study')\n with open(filename[0], 'w') as f:\n my_text = self.textEdit.toPlainText()\n f.write(my_text)\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n mywin = MyMainWindow()\n mywin.show()\n sys.exit(app.exec_())","sub_path":"mainWin.py","file_name":"mainWin.py","file_ext":"py","file_size_in_byte":30246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"150892881","text":"import sqlite3\nconn = sqlite3.connect('0000.sqlite')\ncursor = conn.cursor()\n\nclass ScoreGame:\n \n def __int__(self, c, d):\n self.id = c\n self.score = d\n \n def A(self):\n a = int(input(\"請輸入成績id?\"))\n b = int(input(\"請輸入成績?\"))\n sqlstr = \"insert into Enter_Score values ({},{})\".format(a,b)\n cursor.execute(sqlstr)\n conn.commit()\n conn.close()\n \npp = ScoreGame()\npp.A()\n","sub_path":"1-A.py","file_name":"1-A.py","file_ext":"py","file_size_in_byte":462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"170595761","text":"# Sexes\nMALE = \"M\"\nFEMALE = \"F\"\nOTHER = \"O\"\nSEXES = (\n (MALE, \"Male\"),\n (FEMALE, \"Female\"),\n (OTHER, \"Other\"),\n)\n\n\n# Classifications\nNO_CLASS = \"NONE\"\nFRESHMAN = \"FR\"\nSOPHOMORE = \"SO\"\nJUNIOR = \"JR\"\nSENIOR = \"SR\"\nSUPER_SENIOR = \"SR+\"\nSCHOOL_CLASSIFICATION = (\n (NO_CLASS, \"Not a Student\"),\n (FRESHMAN, \"Freshman\"),\n (SOPHOMORE, \"Sophomore\"),\n (JUNIOR, \"Junior\"),\n (SENIOR, \"Senior\"),\n (SUPER_SENIOR, \"Super Senior\")\n)\n\n\n# Clubs\nALPHA_GAMMA_OMEGA = \"ALPHA\"\nCHI_LAMBDA_PHI = \"CHI\"\nDELTA_GAMMA_SIGMA = \"DELTA\"\nGAMMA_RHO = \"GAMMA\"\nIOTA_KAPPA_PHI = \"IOTA\"\nKAPPA_SIGMA_TAU = \"KAPPA\"\nPHI_OMEGA_NU = \"PHI\"\nPI_ZETA_PHI = \"PI\"\nPSI_EPSILON = \"PSI\"\nTHETA_THETA_THETA = \"THETA\"\nNO_CLUB = \"NONE\"\nSOCIAL_CLUBS = (\n (ALPHA_GAMMA_OMEGA, \"Alpha Gamma Omega\"),\n (CHI_LAMBDA_PHI, \"Chi Lambda Phi\"),\n (DELTA_GAMMA_SIGMA, \"Delta Gamme Sigma\"),\n (GAMMA_RHO, \"Gamma Rho\"),\n (IOTA_KAPPA_PHI, \"Iota Kappa Phi\"),\n (KAPPA_SIGMA_TAU, \"Kappa Sigma Tau\"),\n (PHI_OMEGA_NU, \"Phi Omega Nu\"),\n (PI_ZETA_PHI, \"Pi Zeta Phi\"),\n (PSI_EPSILON, \"Psi Epsilon\"),\n (THETA_THETA_THETA, \"Theta Theta Theta\"),\n (NO_CLUB, \"No club affiliate\")\n)\n\nMALE_CLUBS = [ALPHA_GAMMA_OMEGA, CHI_LAMBDA_PHI, DELTA_GAMMA_SIGMA, KAPPA_SIGMA_TAU, PSI_EPSILON]\nFEMALE_CLUBS = [GAMMA_RHO, IOTA_KAPPA_PHI, PHI_OMEGA_NU, PI_ZETA_PHI, THETA_THETA_THETA]\n","sub_path":"twitter/choices.py","file_name":"choices.py","file_ext":"py","file_size_in_byte":1344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"644136689","text":"import unittest\nfrom unittest import mock\n\nfrom bot.cogs.sync.syncers import UserSyncer, _Diff, _User\nfrom tests import helpers\n\n\ndef fake_user(**kwargs):\n \"\"\"Fixture to return a dictionary representing a user with default values set.\"\"\"\n kwargs.setdefault(\"id\", 43)\n kwargs.setdefault(\"name\", \"bob the test man\")\n kwargs.setdefault(\"discriminator\", 1337)\n kwargs.setdefault(\"roles\", (666,))\n kwargs.setdefault(\"in_guild\", True)\n\n return kwargs\n\n\nclass UserSyncerDiffTests(unittest.IsolatedAsyncioTestCase):\n \"\"\"Tests for determining differences between users in the DB and users in the Guild cache.\"\"\"\n\n def setUp(self):\n self.bot = helpers.MockBot()\n self.syncer = UserSyncer(self.bot)\n\n @staticmethod\n def get_guild(*members):\n \"\"\"Fixture to return a guild object with the given members.\"\"\"\n guild = helpers.MockGuild()\n guild.members = []\n\n for member in members:\n member = member.copy()\n del member[\"in_guild\"]\n\n mock_member = helpers.MockMember(**member)\n mock_member.roles = [helpers.MockRole(id=role_id) for role_id in member[\"roles\"]]\n\n guild.members.append(mock_member)\n\n return guild\n\n async def test_empty_diff_for_no_users(self):\n \"\"\"When no users are given, an empty diff should be returned.\"\"\"\n guild = self.get_guild()\n\n actual_diff = await self.syncer._get_diff(guild)\n expected_diff = (set(), set(), None)\n\n self.assertEqual(actual_diff, expected_diff)\n\n async def test_empty_diff_for_identical_users(self):\n \"\"\"No differences should be found if the users in the guild and DB are identical.\"\"\"\n self.bot.api_client.get.return_value = [fake_user()]\n guild = self.get_guild(fake_user())\n\n actual_diff = await self.syncer._get_diff(guild)\n expected_diff = (set(), set(), None)\n\n self.assertEqual(actual_diff, expected_diff)\n\n async def test_diff_for_updated_users(self):\n \"\"\"Only updated users should be added to the 'updated' set of the diff.\"\"\"\n updated_user = fake_user(id=99, name=\"new\")\n\n self.bot.api_client.get.return_value = [fake_user(id=99, name=\"old\"), fake_user()]\n guild = self.get_guild(updated_user, fake_user())\n\n actual_diff = await self.syncer._get_diff(guild)\n expected_diff = (set(), {_User(**updated_user)}, None)\n\n self.assertEqual(actual_diff, expected_diff)\n\n async def test_diff_for_new_users(self):\n \"\"\"Only new users should be added to the 'created' set of the diff.\"\"\"\n new_user = fake_user(id=99, name=\"new\")\n\n self.bot.api_client.get.return_value = [fake_user()]\n guild = self.get_guild(fake_user(), new_user)\n\n actual_diff = await self.syncer._get_diff(guild)\n expected_diff = ({_User(**new_user)}, set(), None)\n\n self.assertEqual(actual_diff, expected_diff)\n\n async def test_diff_sets_in_guild_false_for_leaving_users(self):\n \"\"\"When a user leaves the guild, the `in_guild` flag is updated to `False`.\"\"\"\n leaving_user = fake_user(id=63, in_guild=False)\n\n self.bot.api_client.get.return_value = [fake_user(), fake_user(id=63)]\n guild = self.get_guild(fake_user())\n\n actual_diff = await self.syncer._get_diff(guild)\n expected_diff = (set(), {_User(**leaving_user)}, None)\n\n self.assertEqual(actual_diff, expected_diff)\n\n async def test_diff_for_new_updated_and_leaving_users(self):\n \"\"\"When users are added, updated, and removed, all of them are returned properly.\"\"\"\n new_user = fake_user(id=99, name=\"new\")\n updated_user = fake_user(id=55, name=\"updated\")\n leaving_user = fake_user(id=63, in_guild=False)\n\n self.bot.api_client.get.return_value = [fake_user(), fake_user(id=55), fake_user(id=63)]\n guild = self.get_guild(fake_user(), new_user, updated_user)\n\n actual_diff = await self.syncer._get_diff(guild)\n expected_diff = ({_User(**new_user)}, {_User(**updated_user), _User(**leaving_user)}, None)\n\n self.assertEqual(actual_diff, expected_diff)\n\n async def test_empty_diff_for_db_users_not_in_guild(self):\n \"\"\"When the DB knows a user the guild doesn't, no difference is found.\"\"\"\n self.bot.api_client.get.return_value = [fake_user(), fake_user(id=63, in_guild=False)]\n guild = self.get_guild(fake_user())\n\n actual_diff = await self.syncer._get_diff(guild)\n expected_diff = (set(), set(), None)\n\n self.assertEqual(actual_diff, expected_diff)\n\n\nclass UserSyncerSyncTests(unittest.IsolatedAsyncioTestCase):\n \"\"\"Tests for the API requests that sync users.\"\"\"\n\n def setUp(self):\n self.bot = helpers.MockBot()\n self.syncer = UserSyncer(self.bot)\n\n async def test_sync_created_users(self):\n \"\"\"Only POST requests should be made with the correct payload.\"\"\"\n users = [fake_user(id=111), fake_user(id=222)]\n\n user_tuples = {_User(**user) for user in users}\n diff = _Diff(user_tuples, set(), None)\n await self.syncer._sync(diff)\n\n calls = [mock.call(\"bot/users\", json=user) for user in users]\n self.bot.api_client.post.assert_has_calls(calls, any_order=True)\n self.assertEqual(self.bot.api_client.post.call_count, len(users))\n\n self.bot.api_client.put.assert_not_called()\n self.bot.api_client.delete.assert_not_called()\n\n async def test_sync_updated_users(self):\n \"\"\"Only PUT requests should be made with the correct payload.\"\"\"\n users = [fake_user(id=111), fake_user(id=222)]\n\n user_tuples = {_User(**user) for user in users}\n diff = _Diff(set(), user_tuples, None)\n await self.syncer._sync(diff)\n\n calls = [mock.call(f\"bot/users/{user['id']}\", json=user) for user in users]\n self.bot.api_client.put.assert_has_calls(calls, any_order=True)\n self.assertEqual(self.bot.api_client.put.call_count, len(users))\n\n self.bot.api_client.post.assert_not_called()\n self.bot.api_client.delete.assert_not_called()\n","sub_path":"tests/bot/cogs/sync/test_users.py","file_name":"test_users.py","file_ext":"py","file_size_in_byte":6097,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"434320908","text":"# Sampling\n\nimport numpy as np\nfrom load import era5\n\nmask_filepath = \"_Data/ERA5_Upper_Indus_mask.nc\"\n\n\ndef random_location_sampler(df):\n \"\"\" Returns DataFrame of random location, apply to clean df only \"\"\"\n\n df_squished = df[[\"lat\", \"lon\"]].reset_index()\n df_s_reset = df_squished.drop_duplicates()\n i = np.random.randint(len(df_s_reset), size=1)\n\n df_location = df_s_reset.iloc[i]\n lat = df_location[\"lat\"].values[0]\n lon = df_location[\"lon\"].values[0]\n print(\"lat=\" + str(lat) + \", lon=\" + str(lon))\n\n df1 = df[df[\"lat\"] == lat]\n df2 = df1[df1[\"lon\"] == lon]\n\n return df2\n\n\ndef random_location_generator(location, N=50):\n \"\"\" Returns DataFrame of random location, apply to clean df only \"\"\"\n\n coord_list = []\n\n df = era5.download_data(location)\n df_squished = df[[\"lat\", \"lon\"]].reset_index()\n df_s_reset = df_squished.drop_duplicates(subset=[\"lat\", \"lon\"])\n\n indices = np.random.randint(len(df_s_reset), size=N)\n\n for i in indices:\n df_location = df_s_reset.iloc[i]\n lat = df_location[\"lat\"]\n lon = df_location[\"lon\"]\n coord_list.append([lat, lon])\n\n return coord_list\n\n\ndef random_location_and_time_sampler(df, length=1000, seed=42):\n \"\"\"Return DataFrame of random locations and times.\"\"\"\n\n np.random.seed(seed)\n i = np.random.randint(len(df), size=length)\n df_sampled = df.iloc[i]\n\n return df_sampled\n","sub_path":"gp/Sampling.py","file_name":"Sampling.py","file_ext":"py","file_size_in_byte":1408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"3864107","text":"from django.contrib import admin\nfrom .models import Product\n\n\n@admin.register(Product)\nclass ProductAdmin(admin.ModelAdmin):\n\tlist_display \t= ('id','categoria','name','stock','peso','precio_a','precio_b', 'orden')\n\tlist_editable \t= ('stock','peso','precio_a','precio_b','orden',)\n\tlist_filter \t= ('categoria',)\n\n\n\tprepopulated_fields = {'slug' : ('name',)} ","sub_path":"producto/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"363916775","text":"import cv2\nimport os\nimport numpy as np \n\n\n#Given an image below function returns rectangle for face detected alongwith gray scale image\ndef faceDetection(test_img):\n gray_img = cv2.cvtColor(test_img,cv2.COLOR_BGR2GRAY) #convert image to grayscale\n face_haar_cascade = cv2.CascadeClassifier('HaarCascades/haarcascade_frontalface_default.xml') #Load haar classifier\n faces=face_haar_cascade.detectMultiScale(gray_img,scaleFactor=1.3,minNeighbors=5) #returns rectangles\n\n return faces,gray_img\n\ndef labels_for_training_data(directory):\n faces = []\n faceID = []\n\n for path,subdirnames,filenames in os.walk(directory):\n for filename in filenames:\n if filename.startswith(\".\"):\n print(\"Skipping hidden system file !\") #skips hidden system files- those that start with .\n continue\n \n id = os.path.basename(path)\n img_path = os.path.join(path,filename) #fetching image path\n print(\"img_path\",img_path)\n print(\"id : \",id)\n test_img = cv2.imread(img_path) #loads image one by one\n if test_img is None:\n print(\"Image not loaded\")\n continue\n faces_rect,gray_img = faceDetection(test_img) #Calling faceDetection function to return faces detected in particular image\n if len(faces_rect)!=1: # if more than 1 faces detected in the training image,ignore it as training images will only include 1 person in it.\n continue \n (x,y,w,h) = faces_rect[0]\n roi_gray = gray_img[y:y+w , x:x+h] #cropping region of interest i.e. face area from grayscale image\n faces.append(roi_gray)\n faceID.append(int(id))\n return faces,faceID\n\n#trains haar classifier and takes faces,faceID returned by previous function as its arguments\ndef train_classifier(faces,faceID):\n face_recognizer = cv2.face.LBPHFaceRecognizer_create()\n face_recognizer.train(faces,np.array(faceID))\n return face_recognizer\n\n#draws bounding boxes around detected face in image\ndef draw_rect(test_img,face):\n (x,y,w,h) = face\n cv2.rectangle(test_img,(x,y),(x+w,y+h),(255,0,0),thickness=5)\n\n#writes name of person for detected label\ndef put_text(test_img,text,x,y):\n cv2.putText(test_img,text,(x,y),cv2.FONT_HERSHEY_DUPLEX,5,(255,0,0),6)\n\n\n\n\n\n\n","sub_path":"faceRecognition.py","file_name":"faceRecognition.py","file_ext":"py","file_size_in_byte":2358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"122935619","text":"from django.shortcuts import render\nfrom django.shortcuts import redirect\nfrom django.contrib.auth.forms import UserCreationForm, AuthenticationForm\nfrom django.contrib.auth import login, logout\n\n\ndef homepage(request):\n return render(request, 'homepage.html')\n # return HttpResponse('homepage')\n\n\ndef signup_view(request):\n if request.method == 'POST':\n form = UserCreationForm(request.POST)\n if form.is_valid():\n user = form.save()\n # log the user in\n login(request, user)\n return redirect('homepage')\n else:\n form = UserCreationForm()\n return render(request, 'signup.html', {'form': form})\n\n\ndef login_view(request):\n if request.method == 'POST':\n form = AuthenticationForm(data=request.POST)\n if form.is_valid():\n # log in the user\n user = form.get_user()\n if not request.user.is_authenticated:\n login(request, user)\n return redirect('user:user_home', username=request.user)\n if request.user != user:\n return render(request, 'invalid.html')\n if request.user == user:\n return redirect('homepage')\n else:\n form = AuthenticationForm()\n\n return render(request, 'login.html', {'form': form})\n\n\ndef logout_view(request):\n if request.method == 'POST':\n logout(request)\n return render(request, 'logout.html')\n else:\n logout(request)\n return render(request, 'logout.html')\n","sub_path":"Server/Server/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"581601477","text":"a = float(input(\"a value:\"))\nb = float(input(\"b value:\"))\nstart_x = float(input(\"start x: \"))\nstart_y = float(input(\"start y: \"))\ncycle = int(input(\"stable cycle: \"))\ntrys = int(input(\"How many newton trys?\"))\n\n\ndef cycles(a,b, start_x, start_y, iterations):\n give_x = (start_y + (a-(start_x*start_x)))\n give_y = (b*start_x)\n for n in range(iterations):\n temp = give_x\n store.append([give_x,give_y])\n give_x = (give_y + (a - (give_x * give_x)))\n give_y = (b * temp)\n\n\nfor n in range(trys):\n det = 0\n store = []\n d_xx = []\n d_xy = []\n d_yx = []\n d_yy = []\n x_c = []\n y_c = []\n cycles(a,b,start_x,start_y,cycle)\n for m in range(cycle):\n if m == 0:\n d_xx.append(-2*store[m][0])\n d_xy.append(1)\n d_yx.append(b)\n d_yx.append(0)\n\n else:\n d_xx.append((-2 * store[m][0] * d_xx[-1])+(b*d_yx[-1]))\n d_xy.append((-2 * store[m][0] * d_xy[-1])+(b*d_yy[-1]))\n d_yx.append(d_xx[-1])\n d_yx.append(d_xy[-1])\n det = 1 / ((d_xx*d_yy)-(d_xy*d_yx))\n d_xx.append(det * d_yy[-1])\n d_xy.append(det * -1 * d_xy[-1])\n d_yx.append(det * -1 * d_yx[-1])\n d_yx.append(det * d_xx[-2])\n x_c = (store[m][0]-start_x) * (1/deriv_y[len(deriv_y)-1])\n y_c = (store[m][1]-start_y) * ((1/(b*deriv_x[len(deriv_x)-1]))+(((2*store[m][0]*deriv_x[len(deriv_x)-1])-(b*deriv_x[len(deriv_x)-2]))/(b*deriv_y[len(deriv_y)-1]*deriv_x[len(deriv_x)-1])))\n x_correction = 0.01/(0.01 + abs(x_c))\n y_correction = 0.01/(0.01 + abs(y_c))\n start_x = start_x - (x_correction*x_c)\n start_y = start_y - (y_correction * y_c)\n\nprint(start_x)\nprint(start_y)\n\ntemp = start_x\nstart_x = (start_y + (a - (start_x ** 2)))\nstart_y = (b * temp)\n\nprint(start_x)\nprint(start_y)\n","sub_path":"Project2/henon_map_cycles.py","file_name":"henon_map_cycles.py","file_ext":"py","file_size_in_byte":1806,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"62175880","text":"from datapackage_pipelines.wrapper import ingest, spew\nfrom datapackage_pipelines.utilities.resources import PROP_STREAMING\n\nimport os\nimport logging\nimport itertools\nfrom sqlalchemy import create_engine\n\ndef generate_sitemap(kind, db_table, doc_id):\n engine = create_engine(os.environ['DPP_DB_ENGINE'])\n rows = (dict(r) for r in engine.execute('select * from {}'.format(db_table)))\n doc_ids = [(doc_id.format(**r), r['__last_modified_at']) for r in rows]\n index = 0\n while len(doc_ids) > 0:\n batch = doc_ids[:10000]\n doc_ids = doc_ids[10000:]\n\n filename = '/var/datapackages/sitemaps/{}.{:04d}.xml'.format(kind, index)\n with open(filename, 'w') as out:\n out.write('''\n\n''')\n for doc_id, last_modified in batch:\n doc_id = doc_id.replace('&', '&')\n out.write(''' \n https://next.obudget.org/i/{}\n {}\n \n'''.format(doc_id, last_modified.isoformat()[:10]))\n out.write('''''')\n\n logging.info('WRITTEN -> %s', filename)\n yield {'filename': filename}\n index += 1\n\ndef process_rows(res_iter):\n try:\n first = next(res_iter)\n except:\n first = []\n yield from itertools.chain(first, generate_sitemap(kind, db_table, doc_id))\n\nif __name__ == '__main__':\n params, dp, res_iter = ingest()\n\n os.makedirs('/var/datapackages/sitemaps', exist_ok=True)\n\n kind = params['kind']\n db_table = params['db-table']\n doc_id = params['doc-id']\n\n if not dp.get('resources'):\n dp['resources'] = [\n {\n 'name': 'sitemaps',\n 'path': 'sitemaps.csv',\n PROP_STREAMING: True,\n 'schema': {\n 'fields': [\n {\n 'name': 'filename',\n 'type': 'string'\n }\n ]\n }\n }\n ]\n\n spew(dp, [process_rows(res_iter)])","sub_path":"datapackage_pipelines_budgetkey/processors/build_sitemaps.py","file_name":"build_sitemaps.py","file_ext":"py","file_size_in_byte":2142,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"220540693","text":"def find(x):\n xn = len(x)\n if xn % 2 == 0:\n if x[:xn//2] == x[xn-1:(xn//2)-1:-1]:\n return x\n else:\n if x[:xn//2] == x[xn-1:(xn//2):-1]:\n return x\ndef turn(y):\n a2 = [[0 for i6 in range(n)] for ii in range(n)]\n for i5 in range(n):\n for j5 in range(n):\n a2[i5][j5] = y[n-1 - j5][i5]\n return a2\n\n\ntn = int(input())\nfor ir in range(tn):\n n, m = list(map(int, input().split()))\n a = [list(input()) for ii in range(n)]\n\n result = []\n for i1 in range(n):\n for i2 in range(n - m + 1):\n re = []\n for j2 in range(m):\n re.append(a[i1][i2+j2])\n if re == find(re):\n result.append(find(re))\n b = turn(a)\n for i2 in range(n):\n for i3 in range(n - m + 1):\n re = []\n for j3 in range(m):\n re.append(b[i2][i3+j3])\n if find(re) == re:\n result.append(find(re))\n print('#{} '.format(ir+1),end='')\n res = result[0]\n for iii in range(m):\n print(res[iii],end='')\n print()","sub_path":"lecture/algorithm/problem/4861.회문.py","file_name":"4861.회문.py","file_ext":"py","file_size_in_byte":1095,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"603250789","text":"from typing import List\n\n\nclass Solution:\n def can_jump(self, nums: List[int]) -> bool:\n right: int = len(nums) - 1\n left: int = right - 1\n\n while left >= 0:\n if nums[left] >= right - left:\n right = left\n left = left - 1\n\n return right == 0\n","sub_path":"python/medium/task_55_jump_game/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"235271226","text":"from django.urls import path\nfrom .views import (CommentListCreateAPIView,\n CommentGetDeleteUpdateView,\n GetAllCommentsByUserIDView,\n LikeUnlikeCommentView,\n CommentReactionsListAPIView,\n SearchComment)\n\nurlpatterns = [\n path('', CommentListCreateAPIView.as_view(), name='restaurant-comment'),\n path('/', CommentGetDeleteUpdateView.as_view(), name='comment-get-update-delete'),\n path('user//', GetAllCommentsByUserIDView.as_view(), name='user-search'),\n path('reaction/', LikeUnlikeCommentView.as_view(), name='like-unlike-comment'),\n path('reactions', CommentReactionsListAPIView.as_view(), name='all-comments-reactions'),\n\n # Search example: /api/comments/search/?search=\n path('search/', SearchComment.as_view(), name='comment-search'),\n\n]\n","sub_path":"app/comments/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":909,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"432052158","text":"# -*- coding:utf-8 -*-\nimport numpy as np\n\nclass statistic_calculation:\n\n # 係数からyの値を求める\n # [In] model:係数の配列 x:データのxの値\n # [Out] y :係数におけるデータのxの値でのyの値\n def applyModel(self, model, x):\n a = model[0]\n b = model[1]\n y = a * x + b\n return y\n\n # 最小二乗法\n # [In] points:データ(二次元配列)\n # [Out] 係数(a,b)\n def leastSquare(self, points):\n x = points[:,0]\n y = points[:,1]\n deno_a = (len(points) * ((x * y).sum())) - (x.sum() * y.sum()) # aの分子\n deno_b = ((x**2).sum() * y.sum()) - ((x * y).sum() * x.sum()) # bの分子\n nume = len(points) * ((x**2).sum()) - (x.sum()**2) # 分母\n a = deno_a / nume\n b = deno_b / nume\n return np.array([a,b])\n\n # 取得した2点の座標から係数a,bを求める\n def getParamWithSamples(self, samples):\n p1 = samples[0]\n p2 = samples[1]\n diff_p = p1 - p2\n a = diff_p[1] / diff_p[0] # a = y1-y0 / x1 -x0\n b = p1[1] - a * p1[0] # b = y - ax\n return np.array([a,b])\n\n def getError(self, model, point):\n x = point[0]\n y = point[1]\n error = np.abs(self.applyModel(model, x) - y) # 誤差の絶対値\n return error\n\n # RANSAC\n def ransac(self,\n points, # データ(座標リスト)\n sample_num, # パラメータ設定のための取得サンプル数\n max_loop_count, # 最大ループ回数\n error_val, # インライアのための誤差閾値\n inliner_sample_num # インライア内のサンプル数の閾値\n ):\n\n # 正しいモデル格納用リスト\n good_models = []\n # モデルとの誤差格納用リスト(最も誤差が小さいモデルを探索するため)\n good_model_errors = []\n # 繰り返し回数\n iterations = 0\n\n while iterations < max_loop_count:\n # 1. データ集合から、モデルの決定に必要な数以上の「少数の」サンプルをランダムに選ぶ.\n sample = points[np.random.choice(len(points), sample_num, replace=False)]\n # 2. 得られた「少数の」サンプルから最小二乗法などで臨時のモデルパラメータを算出\n # 2点のみの抽出のため2点が得られたときの一次関数導出でパラメータを決めている\n if sample_num == 2:\n param = self.getParamWithSamples(sample)\n #else:\n # param = self.leastSquare(sample)\n\n # インライア(許容範囲内にある座標リスト)\n inliner = []\n # 3. 臨時のモデルパラメータにデータを当てはめ、外れ値がそれほど多くなければ、「正しいモデル候補」に加える\n for p in points:\n if (p == sample).all(1).any():\n continue\n # 臨時パラメータでの誤差が許容範囲か確認\n if self.getError(param, p) > error_val:\n continue\n else:\n # インライアとして座標を追加\n inliner.append(p)\n\n # インライア内の座標が閾値以上か確認\n if len(inliner) > inliner_sample_num:\n # 誤差の平均を算出\n current_error = np.array([self.getError(param, p) for p in points]).mean()\n good_models.append(param)\n good_model_errors.append(current_error)\n\n iterations += 1\n # もっとも誤差平均が小さいモデルのインデックスを取り出す\n best_index = np.argmin(good_model_errors)\n return good_models[best_index]\n","sub_path":"RANSAC/statistic_calculation.py","file_name":"statistic_calculation.py","file_ext":"py","file_size_in_byte":4210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"537170512","text":"from flask import Flask, json, jsonify, request\r\n\r\napp = Flask(__name__)\r\ntasks = [\r\n {\r\n \"id\": 1,\r\n \"title\": \"Practice for tomorrow's games\",\r\n \"description\": \"Do shooting drills in the morning\",\r\n \"done\": False\r\n },\r\n {\r\n \"id\": 2,\r\n \"title\": \"Finish WHJ projects\",\r\n \"description\": \"Finish all of them\",\r\n \"done\": False\r\n }\r\n]\r\n\r\n@app.route(\"/add-data\", methods = [\"POST\"])\r\n\r\ndef add_tasks():\r\n if not request.json:\r\n return jsonify({\r\n \"status\": \"Error\",\r\n \"message\": \"Please provide data in correct format\"\r\n })\r\n task = {\r\n \"id\": tasks[-1]['id']+1,\r\n \"title\": request.json['title'],\r\n \"description\": request.json.get('description'),\r\n \"done\": False\r\n }\r\n tasks.append(task)\r\n return jsonify({\r\n \"status\": \"Success\",\r\n \"message\": \"Task added successfully!\"\r\n })\r\n\r\n@app.route(\"/get-data\")\r\n\r\ndef get_task():\r\n return jsonify({\r\n \"data\": tasks\r\n })\r\n\r\nif(__name__ == \"__main__\"):\r\n app.run(debug=True)\r\n \r\n\r\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1107,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"82207682","text":"from hazm import Lemmatizer, Normalizer, word_tokenize\nimport xlrd\nimport re\n\n\ndef isdigit(word):\n return (word[0] in ['۹','۸','۷','۶','۵','۴','۳','۲','۱','۰'])\n\ndef create_dictionary_from_text(i, str):\n str = normalizer.normalize(str)\n str_for_tokenize = str.translate(str.maketrans('_|ẖ–;،\"…=$&@*-/:<>!+.()«»؟', ' ',\n '\\u200c\\u202c\\u200f\\u200e\\u2069\\u2067\\u200b\\u200d'))\n words = word_tokenize(str_for_tokenize)\n # print('main words = ')\n # print(words)\n new_dict = {}\n k = 0\n while k < len(words):\n while True:\n if k >= len(words): break\n repeat = False\n word = words[k]\n # list_of_positions = [m.start() for m in re.finditer(word, str)]\n lem_word = lemmatizer.lemmatize(word).split('#')[0]\n if lem_word == '':\n lem_word = 'است'\n if word in stopwords or lem_word in stopwords or isdigit(word):\n words.remove(word)\n repeat = True\n else:\n words[k] = lem_word\n break\n if k >= len(words): break\n k = k + 1\n # print('after process = ')\n # print(words)\n for k in range(len(words)):\n list_of_positions = [x for x, y in enumerate(words) if y == words[k]]\n new_dict[words[k]] = [{i: list_of_positions}]\n\n sorted_dict = {}\n for i in sorted(new_dict):\n sorted_dict[i] = new_dict[i]\n return sorted_dict\n\n\ndef normalize_content_and_create_dict(i, str):\n result = ''\n for word in str:\n if not re.match(r'[A-Z]+', word, re.I):\n result = result + word\n return create_dictionary_from_text(i, result)\n\n\ndef merge_dicts(d):\n if len(d) == 0:\n return d\n new_dict = d[0]\n for i in range(1, len(d)):\n for word in d[i]:\n if word in new_dict.keys():\n posting_list = new_dict[word]\n posting_list = posting_list + d[i][word]\n new_dict[word] = posting_list\n else:\n new_dict[word] = d[i][word]\n sorted_dict = {}\n for i in sorted(new_dict):\n sorted_dict[i] = new_dict[i]\n return sorted_dict\n\n\nstopwords_f = open('stop_words.txt', 'r', encoding='utf-8')\nstopwords = stopwords_f.readlines()\nfor i in range(len(stopwords)):\n stopwords[i] = stopwords[i].replace(\"\\n\", \"\")\nstopwords_f.close()\nwb = xlrd.open_workbook(\"IR-F19-Project01-Input.xlsx\")\nsheet = wb.sheet_by_index(0)\nlemmatizer = Lemmatizer()\nnormalizer = Normalizer()\n# nim faseleha va alaeme negareshi ro dorost mikone fasele inashuno, \"ha\" ro michasboone ba nim fasele, vali ghalat emlayi na\n\n# lemmatizer behtar ast tanha be kar ravad chon stemmer feel hara xarab mikonad vali alamate jam ra\n## ham hazf mitavanad bokonad va baazan shenase ra ham hazf mikonad\n\ntitle_Dicts = []\nsummary_Dicts = []\ncontent_Dicts = []\nfor i in range(1, sheet.nrows):\n title_Dicts.append(create_dictionary_from_text(i, sheet.cell_value(i, 1)))\n summary_Dicts.append(create_dictionary_from_text(i, sheet.cell_value(i, 3)))\n content_Dicts.append(normalize_content_and_create_dict(i, sheet.cell_value(i, 5)))\n\nf_t = open(\"title_dict.txt\", \"w\", encoding='utf-8')\nf_s = open(\"summary_dict.txt\", \"w\", encoding='utf-8')\nf_c = open(\"content_dict.txt\", \"w\", encoding='utf-8')\nf_t.write(str(merge_dicts(title_Dicts)))\nf_s.write(str(merge_dicts(summary_Dicts)))\nf_c.write(str(merge_dicts(content_Dicts)))\nf_t.close()\nf_s.close()\nf_c.close()\nf = open('title_dict.txt', 'r', encoding='utf-8')\nprint(f.read())\n","sub_path":"api/create_dictionary.py","file_name":"create_dictionary.py","file_ext":"py","file_size_in_byte":3616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"288814439","text":"from django import forms\nfrom .models import NewYearEveWord, NewYearSpirit\n\n\nclass NewYearsEveWordForm(forms.ModelForm):\n def __init__(self, *args, **kwargs):\n kwargs.setdefault(\"label_suffix\", \"\")\n super(NewYearsEveWordForm, self).__init__(*args, **kwargs)\n\n class Meta:\n model = NewYearEveWord\n fields = \"__all__\"\n\n\nclass NewYearSpiritForm(forms.ModelForm):\n def __init__(self, *args, **kwargs):\n kwargs.setdefault(\"label_suffix\", \"\")\n super(NewYearSpiritForm, self).__init__(*args, **kwargs)\n\n class Meta:\n model = NewYearSpirit\n fields = \"__all__\"\n","sub_path":"event/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":620,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"475995405","text":"# https://www.hackerrank.com/challenges/new-year-chaos/problem?h_l=interview&playlist_slugs%5B%5D=interview-preparation-kit&playlist_slugs%5B%5D=arrays\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\nt0 = '''2\n5\n2 1 5 3 4\n5\n2 5 1 3 4 \n'''\n\nt1 = '''2\n8\n5 1 2 3 7 8 6 4\n8\n1 2 5 3 7 8 6 4'''\n\nt2 = '''2\n5\n2 1 5 3 4\n5\n2 5 1 3 4'''\n\ndef minimumBribes(q):\n n = len(q)\n bribes = 0\n for i in range(n):\n q_i = q[i]\n if (q_i - i - 1) > 2:\n return \"Too chaotic\"\n for j in range(i,q_i-2,-1):\n bribes += (j>0) and (q[j-1] > q_i)\n return str(bribes)\n\n\nif __name__ == '__main__':\n t = t1.splitlines()\n n=int(t[0])\n #t = int(test_in[0])\n q=[int(i) for i in '5 1 2 3 7 8 6 4'.split()]\n print(minimumBribes(q))\n #t = int(input())\n\n\n for t_itr in range(n):\n break\n #n = int(input())\n #q = list(map(int, input().rstrip().split()))\n \n n = int(t[t_itr*2+1])\n q = [int(i) for i in list(t[t_itr*2+2].split())] \n print(minimumBribes(q)) ","sub_path":"Interviews/Arrays/new_years.py","file_name":"new_years.py","file_ext":"py","file_size_in_byte":1051,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"329766457","text":"import torch\nimport torchvision\nfrom torchvision import datasets, models, transforms\nimport os\nimport sys\n\n# function for getting an identifier for a given net state\ndef get_net_tag(net_name, case_id, sample, epoch):\n net_tag = f\"{net_name}\"\n \n if (case_id is not None):\n net_tag += f\"_case-{case_id}\"\n \n if (sample is not None):\n net_tag += f\"_sample-{sample}\"\n \n if (epoch is not None):\n net_tag += f\"_epoch-{epoch}\"\n \n return net_tag\n\ndef get_net_dir(data_dir, dataset, net_name, train_scheme, case, sample):\n \"\"\"\n Builds and ensures the proper net directory exists, then returns\n its full path\n \"\"\"\n \n net_dir = \"nets/\"\n\n if dataset is not None:\n net_dir += f\"{dataset}/\"\n\n if net_name is not None:\n net_dir += f\"{net_name}/\"\n\n if train_scheme is not None:\n net_dir += f\"{train_scheme}/\"\n\n if case is not None:\n net_dir += f\"{case}/\"\n\n if sample is not None:\n net_dir += f\"sample-{sample}/\"\n\n return ensure_sub_dir(data_dir, net_dir)\n\ndef ensure_sub_dir(data_dir, sub_dir):\n \"\"\"\n Ensures existence of sub directory of data_dir and \n returns its absolute path.\n\n Args:\n sub_dir (TYPE): DESCRIPTION.\n\n Returns:\n sub_dir (TYPE): DESCRIPTION.\n\n \"\"\"\n sub_dir = os.path.join(data_dir, sub_dir)\n \n if not os.path.exists(sub_dir):\n os.makedirs(sub_dir)\n \n return sub_dir\n\n# standard normalization applied to all stimuli\nnormalize = transforms.Normalize(\n [0.485, 0.456, 0.406], \n [0.229, 0.224, 0.225])\n\ndef load_dataset(data_dir, name, batch_size=4):\n\n dataset_dir = os.path.join(data_dir, name)\n n_workers = 4\n\n if name == \"cifar10\":\n return load_cifar10(dataset_dir, batch_size, n_workers)\n elif name == \"imagenette2\":\n return load_imagenette(dataset_dir, batch_size, n_workers)\n else:\n print(f\"Unrecognized dataset name {name}\")\n sys.exit(-1)\n\ndef load_imagenette(dataset_dir, batch_size, n_workers):\n\n # standard transforms\n img_xy = 227\n train_xform = transforms.Compose([\n transforms.CenterCrop(img_xy),\n transforms.RandomHorizontalFlip(), \n transforms.ToTensor(),\n normalize\n ])\n val_xform = transforms.Compose([\n transforms.CenterCrop(img_xy),\n transforms.ToTensor(),\n normalize\n ])\n\n # datasets\n train_set = datasets.ImageFolder(os.path.join(dataset_dir, \"train\"),\n transform=train_xform)\n val_set = datasets.ImageFolder(os.path.join(dataset_dir, \"val\"),\n transform=val_xform)\n \n # loaders\n train_loader = torch.utils.data.DataLoader(train_set, \n batch_size=batch_size, shuffle=True, num_workers=n_workers)\n \n val_loader = torch.utils.data.DataLoader(val_set, \n batch_size=batch_size, shuffle=False, num_workers=n_workers)\n \n return (train_set, val_set, train_loader, val_loader)\n\ndef load_cifar10(dataset_dir, batch_size, n_workers):\n\n # standard transforms\n train_xform = transforms.Compose([\n transforms.RandomHorizontalFlip(), \n transforms.RandomCrop(32, 4),\n transforms.ToTensor(),\n normalize\n ])\n val_xform = transforms.Compose([\n transforms.ToTensor(),\n normalize\n ])\n\n # datasets\n train_set = torchvision.datasets.CIFAR10(root=dataset_dir, train=True,\n download=True, transform=train_xform)\n \n val_set = torchvision.datasets.CIFAR10(root=dataset_dir, train=False,\n download=True, transform=val_xform)\n\n # loaders\n train_loader = torch.utils.data.DataLoader(train_set,\n batch_size=batch_size, shuffle=True, num_workers=n_workers)\n\n val_loader = torch.utils.data.DataLoader(val_set, \n batch_size=batch_size, shuffle=False, num_workers=n_workers)\n\n return (train_set, val_set, train_loader, val_loader)\n","sub_path":"modules/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":3882,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"642015253","text":"from torch.autograd import Variable\nimport torch\nimport torch.nn as nn\nfrom models.NeuralNet import NeuralNet\nfrom models.discriminative.artificial_neural_networks.hebbian_network.utils import hebb_values_transform, hebb_array_transform\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom models.discriminative.artificial_neural_networks.hebbian_network.utils import indices_h, indices_h_conv\nfrom utils.utils import create_missing_folders\n\n\ndef sigmoid(x, derivative=False):\n return x * (1 - x) if derivative else 1 / (1 + np.exp(-x))\n\n\nclass HebbLayersMLP(NeuralNet):\n def __init__(self, input_size, input_shape, indices_names, num_classes, Ns, hebb_rates, gt, hebb_rates_neurites, hebb_rates_multiplier,\n new_ns, kernels=None, gt_neurites=None, lambd=1., clamp_max=1000000, clamp_min=-1000000, gt_input=-1000,\n padding_pooling=1, padding_no_pooling=1, hebb_max_value=10000, a_dim=0,\n how_much_more=1.0, hyper_count=100, keep_grad=True, is_pruning=True,\n hb=True, is_conv=False, schedule_value=0, gt_convs=None, new_ns_convs=None):\n\n super().__init__()\n self.init_function = torch.nn.init.kaiming_normal_\n self.input_shape = input_shape\n self.input_size = input_size\n try:\n self.n_channels = input_shape[0]\n except:\n self.n_channels = input_shape\n\n self.hebb_log = open(\"logs/\" + self.__class__.__name__ + \"involvment.log\", 'w+')\n self.hebb_rate_input = 0\n self.previous_loss = 10000000\n self.best_loss = 10000000\n self.count = 0\n self.indices_names = indices_names\n self.count_down = 0\n self.is_conv = is_conv\n self.gt_input = gt_input\n self.lambd = lambd\n self.hb = hb\n self.hebb_input_values_history = []\n self.keep_grad = keep_grad\n self.is_pruning = is_pruning\n self.a_dim = a_dim\n self.hebb_input_values = Variable(torch.zeros(self.input_size + a_dim))\n if torch.cuda.is_available():\n self.hebb_input_values.cuda()\n self.valid_bool = list(range(self.input_size))\n self.valid_bool_tensor = torch.Tensor(self.valid_bool).cuda()\n self.alive_inputs = list(range(self.input_size))\n self.previous_valid_len = self.input_size\n self.num_classes = num_classes\n\n # Integers\n self.clamp_max = clamp_max\n self.clamp_min = clamp_min\n self.hyper_count = hyper_count\n self.schedule_value = schedule_value\n\n # Floats\n self.how_much_more = how_much_more\n\n self.hebb_max_value = hebb_max_value\n if Ns is not None:\n self.Ns = Ns\n self.n_neurites = [[] for _ in self.Ns]\n # self.n_neurites[0] += [self.input_size * self.Ns[0]]\n # for i in range(len(self.Ns)-1):\n # self.n_neurites[i] += [self.Ns[i] * self.Ns[i+1]]\n if hebb_rates_multiplier is not None:\n self.hebb_rates_multiplier = hebb_rates_multiplier\n if hebb_rates_multiplier is not None:\n self.hebb_rates_multiplier = hebb_rates_multiplier\n if hebb_rates_neurites is not None:\n self.hebb_rates_neurites = hebb_rates_neurites\n if hebb_rates is not None:\n self.hebb_rates = hebb_rates\n if new_ns is not None:\n self.new_ns = new_ns\n if gt is not None:\n self.gt = gt\n if gt_neurites is not None:\n self.gt_neurites = gt_neurites\n if gt_convs is not None:\n self.gt_convs = gt_convs\n if kernels is not None:\n self.gt_convs = gt_convs\n\n\n # Booleans\n self.descending = True\n self.is_conv = is_conv\n\n if is_conv:\n self.convs = nn.ModuleList()\n self.convs_bn = nn.ModuleList()\n self.hebb_values_conv = [[]] * len(kernels)\n self.new_ns_convs = new_ns_convs\n self.gt_convs = gt_convs\n self.hebb_rates_conv = [0, 0, 0, 0, 0, 0]\n self.n_convs_layers = []\n self.padding_pooling = padding_pooling\n self.padding_no_pooling = padding_no_pooling\n\n list1 = [self.input_size] + self.Ns\n self.list1 = list1\n self.hebb_values_neurites = [torch.zeros(list1[i + 1], list1[i]) for i in range(len(self.Ns))]\n self.original_num_neurites = [int(x.shape[0] * int(x.shape[1])) for x in self.hebb_values_neurites]\n\n self.hebb_values = [Variable(torch.Tensor([0] * n)) for n in self.Ns]\n self.n_neurons = [[] for _ in self.Ns]\n\n lenlen = {\n \"gt\": len(self.gt),\n \"hebb_rates_multiplier\": len(self.hebb_rates_multiplier),\n \"hebb_rates_neurites\": len(self.hebb_rates_neurites),\n \"hebb_rates\": len(self.hebb_rates),\n \"new_ns\": len(self.new_ns),\n \"gt_neurites\": len(self.gt_neurites)\n }\n try:\n assert len(set(lenlen.values())) == 1\n except:\n print(lenlen)\n print(\"All arguments doesnt have the same lenght. \")\n set_lens = set(lenlen.values())\n for l in set_lens:\n names = print([key for key in lenlen.keys() if lenlen[key] == l], \"have lenght\", l)\n exit()\n # Dicts\n self.labels_dict = {\"train\": [], \"valid\": [], \"valid\": []}\n self.accuracies_dict = {\"train\": [], \"valid\": [], \"valid\": []}\n self.losses_dict = {\"train\": [], \"valid\": [], \"valid\": []}\n\n def init_parameters_conv(self, hebb_rates_conv_multiplier, gt_convs, new_ns_convs, planes, kernels,\n n_channels, padding_pooling, padding_no_pooling, pooling_layers):\n self.n_channels = n_channels\n self.planes = [self.n_channels].extend(planes)\n self.kernels = kernels\n self.padding_pooling = padding_pooling\n self.padding_no_pooling = padding_no_pooling\n self.pooling_layers = pooling_layers\n self.hebb_rates_conv_multiplier = hebb_rates_conv_multiplier\n self.gt_convs = gt_convs\n self.new_ns_convs = new_ns_convs\n\n def calculate_neurons_usage(self):\n pass\n\n def calculate_neurites_usage(self):\n pass\n\n def print_parameters(self, fcs):\n print(\"Optimizer\", self.optimizer)\n print(\"fcs\", fcs)\n for i, fc in enumerate(fcs):\n print(\"fcs\", i, \":\", fc)\n print(\"fcs grad\", i, \":\", fc.weight.grad.shape)\n print(\"fcs weight\", i, \":\", fc.weight.shape)\n print(\"bns[i]\", self.bns[i])\n for i, bn in enumerate(self.bns):\n print(\"bns\", i, \":\", bn)\n\n def compute_hebb(self, running_loss, epoch, verbose, fcs, results_path, count_down_limit=10, display_rate=10):\n self.epoch = epoch\n valid_bool = [1. for i in range(self.input_size)]\n print(\"input_size:\", self.input_size)\n print(\"valid_bool:\", len(valid_bool))\n if verbose > 1:\n print(\"Hebb rates inputs:\", self.hebb_rate_input, file=self.hebb_log)\n print(\"Hebb rates neurons:\", self.hebb_rates, file=self.hebb_log)\n print(\"Hebb rates neurites:\", self.hebb_rates_neurites, file=self.hebb_log)\n print(\"Input layer mean hebb\", torch.mean(self.hebb_input_values), file=self.hebb_log)\n print(\"Input layer min hebb\", torch.min(self.hebb_input_values), file=self.hebb_log)\n print(\"Input layer max hebb\", torch.max(self.hebb_input_values), file=self.hebb_log)\n #print(\"Input layer mean hebb\", torch.mean(self.hebb_input_values))\n #print(\"Input layer min hebb\", torch.min(self.hebb_input_values))\n #print(\"Input layer max hebb\", torch.max(self.hebb_input_values))\n print(\"First layer mean hebb\", torch.mean(self.hebb_values[0]), file=self.hebb_log)\n print(\"First layer min hebb\", torch.min(self.hebb_values[0]), file=self.hebb_log)\n print(\"First layer max hebb\", torch.max(self.hebb_values[0]), file=self.hebb_log)\n print(\"First layer mean hebb\", torch.mean(self.hebb_values[0]))\n print(\"First layer min hebb\", torch.min(self.hebb_values[0]))\n print(\"First layer max hebb\", torch.max(self.hebb_values[0]))\n if epoch == 0:\n pass\n elif running_loss < self.previous_loss:\n if self.descending:\n self.count += 1\n if verbose > 2:\n print(\"Changing direction: doing better\", sep=\"\\t\", file=self.hebb_log)\n self.descending = False\n self.count_down = 0\n\n else:\n if not self.descending:\n if verbose > 2:\n print(\"Changing direction: doing worst\", sep=\"\\t\", file=self.hebb_log)\n else:\n self.count_down += 1\n self.descending = True\n\n if running_loss < self.best_loss:\n if verbose > 0:\n print(\"Better loss!\", sep=\"\\t\", file=self.hebb_log)\n if running_loss < (self.best_loss + self.how_much_more):\n if verbose > 2:\n print(\"Count reset to 0\", sep=\"\\t\", file=self.hebb_log)\n print(\"Better loss! Count reset to 0.\")\n self.count = 0\n self.best_loss = running_loss\n else:\n if verbose > 0:\n print(\"Improvement not big enough. Count still going up\", sep=\"\\t\", file=self.hebb_log)\n print(\"HYPER COUNT\", self.hyper_count, sep=\"\\t\", file=self.hebb_log)\n if self.count == self.hyper_count or self.count_down == count_down_limit:\n if verbose > 2:\n print(\"new ne urons\", sep=\"\\t\", file=self.hebb_log)\n if self.count == self.hyper_count:\n print(\"Reason: Hyper count reached\", sep=\"\\t\", file=self.hebb_log)\n else:\n print(\"Reason: Worsening limit reached\", sep=\"\\t\", file=self.hebb_log)\n if self.is_conv:\n self.add_conv_units(new_conv_channels=self.new_ns_convs, keep_grad=True, init=\"he\")\n print(\"new neurons\")\n fcs = self.add_neurons(fcs)\n self.count = 0\n elif self.is_pruning:\n # print(\"input pruning...\")\n #valid_bool, _ = self.input_pruning(results_path)\n if self.is_conv:\n exit(\"NOT IMPLMENTED\")\n # self.pruning_conv()\n fcs, self.bn = self.pruning(fcs)\n if verbose > 0:\n print(\"count: \", self.count, sep=\"\\t\", file=self.hebb_log)\n print(\"count down: \", self.count_down, sep=\"\\t\", file=self.hebb_log)\n\n self.running_losses.append(running_loss)\n if (epoch > 0):\n if epoch % display_rate == 0 and verbose > 1:\n print(\"previous accuracy: \", self.previous_loss, sep=\"\\t\", file=self.hebb_log)\n print(\"running_loss: \", running_loss, sep=\"\\t\", file=self.hebb_log)\n self.previous_loss = running_loss\n self.previous_acc = self.accuracies_dict[\"train\"]\n return fcs, valid_bool, nn.ModuleList(self.bn).cuda()\n\n def add_hebb_neurites(self, mul, layer):\n hvals_neurites1 = self.hebb_values_neurites[layer]\n hvals_neurites1 = Variable(hvals_neurites1, requires_grad=False)\n\n hrate_neurites1 = -torch.mean(mul, 1)\n\n self.hebb_rates_neurites[layer] = hrate_neurites1\n if torch.cuda.is_available():\n hvals_neurites1 = hvals_neurites1.cuda()\n matrix_to_add = Variable(hebb_values_transform(mul, hrate_neurites1), requires_grad=False)\n if torch.cuda.is_available():\n matrix_to_add = matrix_to_add.cuda()\n hvals_neurites1 = hvals_neurites1.cuda()\n\n self.hebb_values_neurites[layer] = torch.add(hvals_neurites1, matrix_to_add)\n self.hebb_values_neurites[layer] = torch.clamp(self.hebb_values_neurites[layer], max=self.clamp_max)\n\n def add_hebb_neurons_input(self, xs, fcs, clamp=False):\n x_input = self.bn_input(xs[0]).cuda()\n x_input[x_input != x_input] = 0\n matmul = xs[-1]\n for i in range(len(fcs)-1).__reversed__():\n matmul = torch.matmul(matmul, fcs[i].weight)\n mul = torch.mul(x_input, matmul)\n\n mul[mul != mul] = 0\n\n self.hebb_rate_input = -torch.mean(mul, 1)\n #for j in range(len(self.hebb_rate_input)):\n val_to_add_input = torch.sum(hebb_array_transform(mul, self.hebb_rate_input), dim=0).cuda()\n self.hebb_input_values = torch.add(val_to_add_input.cuda(), self.hebb_input_values.cuda())\n if clamp:\n self.hebb_input_values = torch.clamp(self.hebb_input_values, min=self.clamp_min, max=self.clamp_max)\n\n def add_hebb_neurons(self, x, i):\n hvals = self.hebb_values[i]\n x[x != x] = 0\n hrate = -torch.mean(x)\n\n #\n hrate *= self.lambd\n\n self.hebb_rates[i] = hrate * self.lambd\n vals = Variable(hebb_values_transform(x, hrate), requires_grad=False)\n val_to_add = torch.sum(vals, dim=0)\n if torch.cuda.is_available():\n val_to_add = val_to_add.cuda()\n hvals = hvals.cuda()\n self.hebb_values[i] = torch.add(hvals, val_to_add)\n self.hebb_values[i] = torch.clamp(self.hebb_values[i], max=self.clamp_max)\n\n def pruning(self, fcs, minimum_neurons=2):\n bn = []\n for i in range(len(self.gt)):\n alive_neurons_out = self.hebb_values[i] > float(self.gt[i])\n indices_alive_neurons_out = indices_h(alive_neurons_out)\n if len(indices_alive_neurons_out) < minimum_neurons:\n indices_alive_neurons_out = indices_h(torch.sort(self.hebb_values[i])[1] < minimum_neurons)\n print(\"Minimum neurons on layer \", (i + 1), sep=\"\\t\", file=self.hebb_log)\n print(\"Minimum neurons on layer \", (i + 1), sep=\"\\t\")\n #self.hebb_values_neurites[i] = self.hebb_values_neurites[i][indices_alive_neurons_out, :]\n\n w2 = fcs[i].weight.data.copy_(fcs[i].weight.data).cpu().numpy()\n b2 = fcs[i].bias.data.copy_(fcs[i].bias.data).cpu().numpy()\n wg2 = fcs[i].weight.grad.data.copy_(fcs[i].weight.grad.data).cpu().numpy()\n bg2 = fcs[i].bias.grad.data.copy_(fcs[i].bias.grad.data).cpu().numpy()\n\n bg2 = bg2[indices_alive_neurons_out]\n b2 = b2[indices_alive_neurons_out]\n\n wg2 = wg2[indices_alive_neurons_out, :]\n w2 = w2[indices_alive_neurons_out, :]\n\n if i > 0:\n alive_neurons_in = torch.Tensor([True if x > float(self.gt[i - 1]) else False for x in self.hebb_values[i - 1]])\n indices_alive_neurons_in = indices_h(alive_neurons_in)\n\n #self.hebb_values_neurites[i] = self.hebb_values_neurites[i][:, indices_alive_neurons_in]\n wg2 = wg2[:, indices_alive_neurons_in]\n w2 = w2[:, indices_alive_neurons_in]\n fcs[i].in_features = wg2.shape[1]\n\n self.Ns[i] = len(b2)\n fcs[i].out_features = len(b2)\n\n b2 = torch.from_numpy(b2)\n bg2 = torch.from_numpy(bg2)\n w2 = torch.from_numpy(w2)\n wg2 = torch.from_numpy(wg2)\n\n if torch.cuda.is_available():\n w2 = Variable(w2).cuda()\n wg2 = Variable(wg2).cuda()\n b2 = Variable(b2).cuda()\n bg2 = Variable(bg2).cuda()\n\n fcs[i].weight = nn.Parameter(w2)\n fcs[i].weight.grad = nn.Parameter(wg2)\n fcs[i].bias = nn.Parameter(b2)\n fcs[i].bias.grad = nn.Parameter(bg2)\n\n # alive_neurites = self.hebb_values_neurites[i] > self.gt_neurites[i]\n # alive_neurites = torch.Tensor(alive_neurites.data.cpu().numpy()).cuda()\n\n # fcs[i].weight.data = fcs[i].weight.data * alive_neurites\n # self.n_neurites[i] += [int(torch.sum(alive_neurites))]\n self.hebb_values[i] = self.hebb_values[i][indices_alive_neurons_out]\n bn += [nn.BatchNorm1d(len(self.hebb_values[i]))]\n\n w3 = fcs[-1].weight.data.copy_(fcs[-1].weight.data).cpu().numpy()\n wg3 = fcs[-1].weight.grad.data.copy_(fcs[-1].weight.grad.data).cpu().numpy()\n\n\n try:\n wg3 = wg3[:, indices_alive_neurons_out]\n fcs[-1].in_features = len(indices_alive_neurons_out)\n fcs[-1].weight = nn.Parameter(Variable(torch.from_numpy(w3[:, indices_alive_neurons_out])).cuda())\n fcs[-1].weight.grad = nn.Parameter(Variable(torch.from_numpy(wg3)).cuda())\n\n except:\n fcs[-1].weight = nn.Parameter(Variable(torch.from_numpy(w3)).cuda())\n fcs[-1].weight.grad = nn.Parameter(Variable(torch.from_numpy(wg3)).cuda())\n\n if torch.cuda.is_available():\n fcs = fcs.cuda()\n print(\"Neurons in layers:\", self.Ns)\n return fcs, bn\n\n def input_pruning(self, results_path, min_n_input_dims=20, minimum_neurons=20):\n \"\"\"\n :param net:\n :param gt:\n :param min_n_input_dims:\n :param minimum_neurons:\n :return:\n \"\"\"\n hebb_input = self.hebb_input_values.data.copy_(self.hebb_input_values.data).cpu().numpy()\n if len(hebb_input) >= min_n_input_dims:\n to_keep = hebb_input > float(self.gt_input)\n print(\"min_hebb_value:\", self.gt_input)\n valid_indices = indices_h(to_keep)\n if len(valid_indices) < minimum_neurons:\n # TODO Replace neurons that could not be removed?\n valid_indices = indices_h(torch.sort(hebb_input)[1] < minimum_neurons)\n print(\"Minimum neurons on layer 1\", sep=\"\\t\", file=self.hebb_log)\n\n print(\"previous_valid_len\", self.previous_valid_len)\n self.valid_bool = [1. if x in valid_indices else 0. for x in range(self.input_size)]\n self.alive_inputs = [x for x in range(len(hebb_input)) if x in valid_indices]\n alive_inputs = np.array(self.alive_inputs)\n #if len(self.alive_inputs) < self.previous_valid_len:\n masks_path = results_path + \"/images/masks/\" + str(self.dataset_name) + \"/\"\n create_missing_folders(masks_path)\n img_path = \"_\".join([\"alive_inputs\", str(len(valid_indices)), str(self.epoch), \".png\"])\n print(\"self.n_channels\", self.n_channels)\n if len(self.input_shape) == 3:\n print(\"SAVING MASK at\", results_path)\n mask = np.reshape(self.valid_bool, newshape=(28, 28)) # TODO change hard coding\n plt.imsave(masks_path + img_path, mask)\n self.previous_valid_len = len(valid_indices)\n self.valid_bool_tensor = self.valid_bool_tensor * torch.Tensor(self.valid_bool).cuda()\n return self.valid_bool, self.alive_inputs\n\n def add_neurons(self, fcs):\n for i in range(len(self.new_ns)):\n if self.new_ns[i] > 0:\n self.bn[i] = nn.BatchNorm1d(len(self.bn[i].weight) + int(self.new_ns[i]))\n hebbs = Variable(self.hebb_values[i].data.copy_(self.hebb_values[i].data)).cpu()\n new_neurons = Variable(torch.zeros(int(self.new_ns[i])))\n hebbs = Variable(torch.cat((hebbs, new_neurons)))\n self.Ns[i] = len(hebbs)\n hebbs_neurites = Variable(\n self.hebb_values_neurites[i].data.copy_(self.hebb_values_neurites[i].data)).cpu()\n new_neurites1 = Variable(torch.zeros(int(self.new_ns[i]), hebbs_neurites.shape[1]))\n hebbs_neurites = Variable(torch.cat((hebbs_neurites, new_neurites1), dim=0))\n\n w2 = fcs[i].weight.data.copy_(fcs[i].weight.data).cpu()\n b2 = fcs[i].bias.data.copy_(fcs[i].bias.data).cpu()\n wg2 = fcs[i].weight.grad.data.copy_(fcs[i].weight.grad.data).cpu()\n bg2 = fcs[i].bias.grad.data.copy_(fcs[i].bias.grad.data).cpu()\n new_biases2 = torch.zeros(int(self.new_ns[i]))\n b2 = torch.cat((b2, new_biases2))\n bg2 = Variable(torch.cat((bg2, new_biases2)))\n\n new_weights1 = torch.zeros([w2.shape[0] + int(self.new_ns[i]), w2.shape[1]])\n new_weights1 = torch.nn.init.kaiming_normal_(new_weights1)[0:int(self.new_ns[i]), :]\n new_weights_grad1 = torch.zeros([w2.shape[0] + int(self.new_ns[i]), w2.shape[1]])[0:int(self.new_ns[i]), :]\n w2 = torch.cat((w2, new_weights1), dim=0)\n wg2 = torch.cat((wg2, new_weights_grad1), dim=0)\n\n if i > 0:\n new_neurites2 = Variable(torch.zeros(len(hebbs_neurites), int(self.new_ns[i - 1])))\n hebbs_neurites = Variable(torch.cat((hebbs_neurites, new_neurites2), dim=1))\n new_weights2_2 = torch.zeros([w2.shape[0], w2.shape[1] + int(self.new_ns[i - 1])])\n new_weights2_2 = torch.nn.init.kaiming_normal_(new_weights2_2)[:, 0:int(self.new_ns[i - 1])]\n new_weights_grad2_2 = torch.zeros([w2.shape[0], w2.shape[1] + int(self.new_ns[i - 1])])[:,\n 0:int(self.new_ns[i - 1])]\n w2 = Variable(torch.cat((w2, new_weights2_2), dim=1))\n wg2 = Variable(torch.cat((wg2, new_weights_grad2_2), dim=1))\n\n if torch.cuda.is_available():\n w2, wg2, b2, bg2 = w2.cuda(), wg2.cuda(), b2.cuda(), bg2.cuda()\n self.hebb_values[i] = hebbs.cuda()\n self.hebb_values_neurites[i] = hebbs_neurites.cuda()\n\n fcs[i].weight = nn.Parameter(Variable(w2).cuda())\n fcs[i].weight.grad = nn.Parameter(Variable(wg2).cuda())\n fcs[i].bias = nn.Parameter(Variable(b2).cuda())\n fcs[i].bias.grad = nn.Parameter(Variable(bg2).cuda())\n fcs[i].in_features = wg2.shape[1]\n fcs[i].out_features = wg2.shape[0]\n\n w3 = fcs[-1].weight.data.copy_(fcs[-1].weight.data).cpu()\n wg3 = fcs[-1].weight.grad.data.copy_(fcs[-1].weight.grad.data).cpu()\n new_weights3 = torch.zeros([w3.shape[0], int(w3.shape[1] + int(self.new_ns[i - 1]))])\n new_weights3 = torch.nn.init.kaiming_normal_(new_weights3[:, 0:int(self.new_ns[i - 1])])\n new_weights_grad3 = torch.zeros([w3.shape[0], w3.shape[1] + int(self.new_ns[i - 1])])[:, 0:int(self.new_ns[i - 1])]\n w3 = Variable(torch.cat((w3, new_weights3), dim=1))\n wg3 = Variable(torch.cat((wg3, new_weights_grad3), dim=1))\n if torch.cuda.is_available():\n w3 = w3.cuda()\n wg3 = wg3.cuda()\n fcs[-1].weight = nn.Parameter(w3)\n fcs[-1].weight.grad = nn.Parameter(wg3)\n fcs[-1].in_features = len(fcs[-1].bias)\n return fcs\n\n def add_conv_units(self, new_conv_channels, keep_grad=True, init=\"he\", clip_max=100000):\n # TODO augment by a factor, e.g. x2. like that the archtecture would be kept\n hebbs = self.hebb_values_conv\n hebb_zeros = Variable(torch.zeros(new_conv_channels))\n\n for i in range(len(new_conv_channels)):\n if new_conv_channels[i] > 0:\n hebbs[i] = torch.cat((hebbs[i],))\n w1 = None\n b1 = None\n wg1 = None\n bg1 = None\n w2s = [[] for _ in len(hebb_zeros)]\n b2s = [[] for _ in len(hebb_zeros)]\n wg2s = [[] for _ in len(hebb_zeros)]\n bg2s = [[] for _ in len(hebb_zeros)]\n wg3 = None\n w3 = None\n\n if keep_grad and init == \"he\":\n b1 = self.convs[0].bias.data\n w1 = self.convs[0].weight.data\n if new_conv_channels[0] > 0 and len(b1) <= clip_max:\n print(\"New neurons with kaiming init\", sep=\"\\t\", file=self.hebb_log)\n w_zeros1 = torch.zeros([w1.shape[0] + new_conv_channels[0], w1.shape[1], w1.shape[2], w1.shape[3]])\n wg_zeros1 = torch.zeros([wg1.shape[0] + new_conv_channels[0], wg1.shape[1], wg1.shape[2], wg1.shape[3]])\n new_weights1 = self.init_function(w_zeros1)[0:new_conv_channels[0]]\n new_biases1 = torch.zeros(new_conv_channels[0])\n\n w1 = torch.cat((w1, new_weights1), dim=0)\n b1 = torch.cat((b1.data, new_biases1))\n wg1 = wg1\n wg1 = torch.cat((wg1, self.init_function(wg_zeros1)[0:new_conv_channels[0]]), dim=0)\n\n b1.grad.data = torch.cat((b1.grad.data, torch.zeros(new_conv_channels[0])))\n\n self.convs[0].out_channels = len(b1)\n self.planes[1] = len(b1.data)\n\n for i in range(1, len(new_conv_channels)):\n b2s[i] = self.convs[i].bias.data\n bg2s[i] = self.convs[i].bias.grad.data\n w2s[i] = self.convs[i].weight.data\n wg2s[i] = self.convs[i].weight.grad.data\n\n if new_conv_channels[i] > 0 and len(b2s[i]) < clip_max:\n print(\"New neurons with kaiming init\", sep=\"\\t\", file=self.hebb_log)\n w_zeros2_1 = torch.zeros([w2s[i].shape[0], w2s[i].shape[1] + new_conv_channels[i - 1],\n wg2s[i].shape[2], wg2s[i].shape[3]])\n w_zeros2_2 = torch.zeros([w2s[i].shape[0] + new_conv_channels[i], w2s[i].shape[1],\n w2s[i].shape[2], w2s[i].shape[3]])\n wg_zeros2_1 = torch.zeros([wg2s[i].shape[0], new_conv_channels[i - 1],\n wg2s[i].shape[2], wg2s[i].shape[3]])\n wg_zeros2_2 = torch.zeros([new_conv_channels[i], wg2s[i].shape[1],\n wg2s[i].shape[2], wg2s[i].shape[3]])\n b_zeros2 = torch.zeros(new_conv_channels[i])\n b2s[i] = torch.cat((b2s[i], b_zeros2))\n w2s[i] = torch.cat((w2s[i], self.init_function(w_zeros2_1)[:, 0:new_conv_channels[i - 1]]), dim=1)\n w2s[i] = torch.cat((w2s[i], self.init_function(w_zeros2_2)[0:new_conv_channels[i], :]), dim=0)\n\n bg2s[i] = torch.cat((bg2s[i], b_zeros2))\n wg2s[i] = torch.cat((wg2s[i], wg_zeros2_1), dim=1)\n wg2s[i] = torch.cat((wg2s[i], wg_zeros2_2), dim=0)\n self.planes[i + 1] = len(bg2s[i])\n\n else:\n print(\"Already the max neurons. Put them on another layer or place new layer\", sep=\"\\t\", file=self.hebb_log)\n else:\n print(\"ERROR\")\n\n def replace_neurons(self):\n pass\n\n def pruning_conv(self, fcs, gt_convs, min_neurons=4):\n hebb_conv = self.hebb_values_conv[0].data.copy_(self.hebb_values_conv[0].data)\n to_keep = hebb_conv > float(gt_convs[0])\n to_keep_array = to_keep == 1\n indices_neurons1 = indices_h_conv(to_keep_array)\n if len(indices_neurons1) < min_neurons:\n # TODO Replace neurons that could not be removed?\n print(\"Minimum neurons on layer 1\", sep=\"\\t\", file=self.hebb_log)\n indices_neurons1 = indices_h_conv(torch.sort(hebb_conv)[1] < min_neurons)\n self.hebb_values_conv[0] = Variable(hebb_conv[indices_neurons1])\n\n w1 = self.convs[0].weight\n b1 = self.convs[0].bias\n weight1 = w1.data[indices_neurons1, :]\n bias1 = b1.data[indices_neurons1]\n gw1 = self.convs[0].weight.grad[indices_neurons1, :]\n gb1 = self.convs[0].bias.grad[indices_neurons1]\n\n self.convs[0].weight = torch.nn.Parameter(weight1)\n self.convs[0].bias = torch.nn.Parameter(bias1)\n self.convs[0].in_channels = len(weight1[0])\n self.convs[0].out_channels = len(weight1)\n self.convs[0].weight.grad = gw1\n self.convs[0].bias.grad = gb1\n\n self.bns[0] = nn.BatchNorm1d(len(self.convs[0].bias))\n\n for i in range(1, len(gt_convs)):\n hebb2 = self.hebb_values_conv[i].data.copy_(self.hebb_values_conv[i].data)\n to_keep2 = hebb2 > float(gt_convs[i])\n to_keep2_array = to_keep2 == 1\n indices_neurons2 = indices_h_conv(to_keep2_array)\n if len(indices_neurons2) < min_neurons:\n # TODO Replace neurons that could not be removed?\n indices_neurons2 = indices_h_conv(torch.sort(hebb2)[1] < min_neurons)\n print(\"Minimum neurons on layer \", (i + 1), sep=\"\\t\", file=self.hebb_log)\n\n self.hebb_values_conv[i] = Variable(hebb2[indices_neurons2])\n w2 = self.convs[i].weight.data.copy_(self.convs[i].weight.data).cpu().numpy()\n b2 = self.convs[i].bias.data.copy_(self.convs[i].bias.data).cpu().numpy()\n\n gw2 = self.convs[i].weight.grad.data.copy_(self.convs[i].weight.grad.data).cpu().numpy()\n gb2 = self.convs[i].bias.data.copy_(self.convs[i].bias.grad.data).cpu().numpy()\n gb2 = gb2[indices_neurons2]\n\n gw2 = gw2[indices_neurons2, :]\n gw2 = gw2[:, indices_neurons1]\n gw2 = torch.from_numpy(gw2)\n gb2 = torch.from_numpy(gb2)\n\n w2 = w2[indices_neurons2, :]\n w2 = w2[:, indices_neurons1]\n b2 = b2[indices_neurons2]\n w2 = torch.from_numpy(w2)\n b2 = torch.from_numpy(b2)\n\n if torch.cuda.is_available():\n gw2 = gw2.cuda()\n w2 = w2.cuda()\n gb2 = gb2.cuda()\n b2 = b2.cuda()\n\n self.convs[i].weight = torch.nn.Parameter(w2)\n self.convs[i].bias = torch.nn.Parameter(b2)\n self.convs[i].in_channels = len(w2[0])\n self.convs[i].out_channels = len(w2)\n self.convs[i].weight.grad = torch.nn.Parameter(gw2)\n self.convs[i].bias.grad = torch.nn.Parameter(gb2)\n self.bns[i] = nn.BatchNorm1d(len(self.convs[i].bias))\n indices_neurons1 = indices_neurons2\n fc1_w = fcs[i].weight.data.copy_(fcs[i].weight.data).cpu().numpy()\n fc1_wg = fcs[i].weight.grad.data.copy_(fcs[i].weight.grad.data).cpu().numpy()\n fc1_w = fc1_w[:, indices_neurons1]\n fc1_wg = fc1_wg[:, indices_neurons1]\n fc1_w = torch.from_numpy(fc1_w)\n fc1_wg = torch.from_numpy(fc1_wg)\n fcs[i].weight = torch.nn.Parameter(fc1_w)\n fcs[i].weight.grad = torch.nn.Parameter(fc1_wg)\n\n def sort_pruning_values(self, n_remove):\n gts = [[]] * len(n_remove)\n for i in range(len(gts)):\n hebb = Variable(self.hebb_values[i].data.copy_(self.hebb_values[i].data))\n sorted_hebb = np.sort(hebb.data)\n gts[i] = sorted_hebb[n_remove[i]]\n return gts\n\n","sub_path":"models/discriminative/artificial_neural_networks/hebbian_network/hebbLayers.py","file_name":"hebbLayers.py","file_ext":"py","file_size_in_byte":30678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"647660326","text":"import os\nimport logging\nfrom scrapy.spiders.init import InitSpider\nfrom scrapy.http import Request, FormRequest\nfrom scrapy import Selector\nfrom scrapy.loader import ItemLoader\nfrom scrapy.exceptions import CloseSpider\nfrom scrapy.loader.processors import Join, MapCompose\n\nfrom app.items import Posting\n\nclass PostingSpider(InitSpider):\n\tname = \"app\"\n\t\t\t\t\t\t\n\tlogin_url = os.environ.get('POSTING_SPIDER_LOGIN')\n\tallowed_domains = [os.environ.get('POSTING_SPIDER_ALLOWED_DOMAINS')]\n\tbase_url = os.environ.get('POSTING_SPIDER_START_URL')\n\tstart_urls = [base_url + \"00207812\"]\n\n\t# Defining xpath for the posting fields\n\tposting_xpath = '//*[@id=\"win12divPSPAGECONTAINER\"]'\n\tposting_fields = {\n\t'id': 'normalize-space(.//span[@id=\"UW_CO_JOBDTL_VW_UW_CO_JOB_ID\"]/text())',\n\t'employer': 'normalize-space(.//span[@id=\"UW_CO_JOBDTL_DW_UW_CO_EMPUNITDIV\"]/text())',\n\t'title' :'normalize-space(.//span[@id=\"UW_CO_JOBDTL_VW_UW_CO_JOB_TITLE\"]/text())',\n\t'location': 'normalize-space(.//span[@id=\"UW_CO_JOBDTL_VW_UW_CO_WORK_LOCATN\"]/text())',\n\t'open_date' :'normalize-space(.//span[@id=\"UW_CO_JOBDTL_VW_UW_CO_CHAR_EDATE\"]/text())',\n\t'close_date' :'normalize-space(.//span[@id=\"UW_CO_JOBDTL_VW_UW_CO_CHAR_DATE\"]/text())',\n\t'discipline' :'.//span[@id=\"UW_CO_JOBDTL_DW_UW_CO_DESCR\" or @id=\"UW_CO_JOBDTL_DW_UW_CO_DESCR100\"]/text()',\n\t'level' :'normalize-space(.//span[@id=\"UW_CO_JOBDTL_DW_UW_CO_DESCR_100\"]/text())',\n\t'description' :'normalize-space(.//span[@class=\"PSTEXT\" and @id=\"UW_CO_JOBDTL_VW_UW_CO_JOB_DESCR\"])'\n\t}\n\trequest_meta={'dont_redirect': True,\"handle_httpstatus_list\": [302]}\t\n\t\n\tdef __init__(self):\n\t\tself.posting_id = \"00207812\"\n\n\tdef init_request(self):\n\t\tformdata = {'userid': os.environ.get('POSTING_SPIDER_USER') , 'pwd': os.environ.get('POSTING_SPIDER_PWD')}\n\t\tyield FormRequest(url=self.login_url, callback=self.on_login_response, formdata=formdata)\n\t\n\tdef on_login_response(self, response):\n\t\tif os.environ.get('POSTING_LOGIN_RESPONSE_URL') in response.url:\n\t\t\tself.log(\"Successfully logged in. Let's start crawling!\")\n\t\t\treturn self.initialized()\n\t\telse:\n\t\t\tself.log(\"Error \" + str(response.status) + \" while trying to login\", level=log.ERROR)\n\n\tdef parse(self, response):\n\t\t# Instaniate HtmlXPath\n\t\tselector = Selector(response)\n\t\tposting = selector.xpath(self.posting_xpath)\n\t\tif os.environ.get('POSTING_SPIDER_REDIRECT') in response.url:\n\t\t\tlogging.warn(\"Got redirect, link doesn't exist\")\n\t\t\tself.posting_id = str(int(self.posting_id) + 1).zfill(len(self.posting_id))\n\t\t\tyield Request(self.base_url + self.posting_id, meta = self.request_meta)\n\n\t\tfor field in posting:\n\t\t\t# load field for processing\n\t\t\tloader = ItemLoader(Posting(), selector=field)\n\t\t\t# strip off white space \n\t\t\tloader.default_input_processor = MapCompose(unicode.strip)\n\t\t\tloader.default_output_processor = Join()\n\n\t\t\t# iterate over (key, values) in posting dict\n\t\t\tfor field, xpath in self.posting_fields.iteritems():\n\t\t\t\tloader.add_xpath(field, xpath)\n\t\t\tyield loader.load_item()\n\t\tself.posting_id = str(int(self.posting_id) + 1).zfill(len(self.posting_id))\n\t\tyield Request(self.base_url + self.posting_id, meta = self.request_meta)\n","sub_path":"spiders/app/posting_spider.py","file_name":"posting_spider.py","file_ext":"py","file_size_in_byte":3118,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"508053978","text":"import matplotlib.pyplot as plt\nimport numpy as np\nfrom skimage.draw import line\nimport math\nimport sys\n\n \n\ndef constrain(val, min_val, max_val):\n return min(max_val, max(min_val, val))\n\n\nclass Line:\n def __init__(self, x0: float, x1: float, y0: float, y1: float):\n self.x0 = x0\n self.x1 = x1\n self.y0 = y0\n self.y1 = y1\n\n def draw(self, pixels, color = 255):\n x0_ = constrain(int(round(self.x0)), 0, pixels.shape[1] - 1)\n x1_ = constrain(int(round(self.x1)), 0, pixels.shape[1] - 1)\n y0_ = constrain(int(round(self.y0)), 0, pixels.shape[0] - 1)\n y1_ = constrain(int(round(self.y1)), 0, pixels.shape[0] - 1)\n\n\n\n if x1_ >= pixels.shape[1]:\n x1_ = pixels.shape[1] - 1\n\n if x0_ < 0:\n x0_ = 0\n\n if y1_ >= pixels.shape[0]:\n y1_ = pixels.shape[0] - 1\n\n if y0_ < 0:\n y0_ = 0\n\n rr, cc = line(y0_, x0_, y1_, x1_)\n\n pixels[rr, cc] = color \n\n\nclass Fractal:\n def __init__(self, height, width):\n self.lines = []\n self.height = height\n self.width = width\n\n self.pixels = np.full((height, width), 0, dtype=np.uint16)\n\n self.origin = (3 * height / 4, width / 2)\n\n\n def execute(self, itterations, frac_1, frac_2, angle_1=90, angle_2=90):\n\n angle_1 = np.radians(angle_1)\n angle_2 = np.radians(angle_2)\n\n self.pixels = np.full((self.height, self.width), 0, dtype=np.uint16)\n\n self.draw_next(self.width / 2 - 1, self.origin[1] - 1, self.height - 1, self.origin[0] - 1, itterations, frac_1 = frac_1, frac_2 = frac_2, angle_1 = angle_1, angle_2 = angle_2)\n\n\n ##\n # @brief appends a new line to lines.\n #\n # if plane = 'x':\n # draw a horizontal line\n # if plane = 'y':\n # draw a vertical line\n #\n # num is the number iteration, decrements recursively\n def draw_next(self, x0, x1, y0, y1, num, frac_1, frac_2, angle_1, angle_2):\n\n # Recursive break condition\n num = num - 1\n if num <= 0:\n return\n\n\n # Starting Vector (to be rotated)\n A = np.array([x1 - x0, y1 - y0]).reshape(2, 1)\n\n # Rotation about the right\n R1 = np.array([[np.cos(angle_1), -np.sin(angle_1)], [np.sin(angle_1), np.cos(angle_1)]])\n\n # Rotation about the left (negative angle)\n R2 = np.array([[np.cos(-angle_2), -np.sin(-angle_2)], [np.sin(-angle_2), np.cos(-angle_2)]])\n \n\n\n self.lines.append(Line(x0, x1, y0, y1))\n\n dist = np.sqrt((x1 - x0)**2 + (y1 - y0)**2)\n\n\n right = frac_1 * np.matmul(R1, A) + np.array([x1, y1]).reshape(2, 1)\n left = frac_2 * np.matmul(R2, A) + np.array([x1, y1]).reshape(2, 1)\n\n \n self.draw_next(x1, right[0][0], y1, right[1][0], num, frac_1, frac_2, angle_1, angle_2)\n self.draw_next(x1, left[0][0], y1, left[1][0], num, frac_1, frac_2, angle_1, angle_2)\n\n\n\n\n def draw(self):\n for line in self.lines:\n line.draw(pixels = self.pixels)\n\n plt.axis('off')\n plt.imshow(self.pixels, cmap='Greys') \n plt.show()\n \n\nif __name__ == '__main__':\n f = Fractal(700, 700)\n\n\n f.execute(16, 0.2, 0.2, angle_1 = 40, angle_2 = 60)\n\n f.draw()\n\n plt.savefig(f'RotatedSuperScaled.png')\n\n","sub_path":"fractal_tree.py","file_name":"fractal_tree.py","file_ext":"py","file_size_in_byte":3286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"642954535","text":"import pygame\nimport sys\n\npygame.init()\n\nwindows = (800, 600)\nscreen = pygame.display.set_mode(windows)\n\nmyfont = pygame.font.SysFont('Menlo', 48)\nhelloWorld = myfont.render('Hello World', 1, (255, 0, 255), (255, 255, 255))\n\nhelloWorldSize = helloWorld.get_size() # busca o tamanho do hello world\n\nx, y = 0, 0\n\ndirectionX = 1 # Sentido esquerto\ndirectionY = 1 # Sentido direito\n\nclock = pygame.time.Clock()\n\nwhile 1:\n\n clock.tick(40) # clock controla o tempo e a velocidade do objecto\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT: sys.exit()\n\n screen.fill((0, 0, 0)) # Cor do fundo\n\n mousePosition = pygame.mouse.get_pos() # Pega a posiçāo do mouse\n x, y = mousePosition\n\n if x + helloWorldSize[0] > 800:\n x = 800 - helloWorldSize[0]\n\n screen.blit(helloWorld, (x, y)) # Exibe o aposição de elementos\n pygame.display.update()\n","sub_path":"PygameProjects/pyGame_cursos/Game Programming with Python and PyGame/Theory/05. Using the mouse inside the application.py","file_name":"05. Using the mouse inside the application.py","file_ext":"py","file_size_in_byte":894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"83935386","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2020/1/7 下午3:13\n# @Author : Ryu\n# @Site : \n# @File : 03_同一进程内的线程共享该进程的数据?.py\n# @Software: PyCharm\n\nfrom threading import Thread\nfrom multiprocessing import Process\nimport os\n\n\ndef work():\n global n\n n = 0\n\n\nif __name__ == '__main__':\n # n=100\n # p=Process(target=work)\n # p.start()\n # p.join()\n # print('主',n) #毫无疑问子进程p已经将自己的全局的n改成了0,但改的仅仅是它自己的,查看父进程的n仍然为100\n\n n = 1\n t = Thread(target=work)\n t.start()\n t.join()\n print('主', n) # 查看结果为0,因为同一进程内的线程之间共享进程内的数据\n\n\n\n","sub_path":"10_并发编程/02_多线程/02_在一个进程下开启多个线程与在一个进程下开启多个子进程的区别/03_同一进程内的线程共享该进程的数据?.py","file_name":"03_同一进程内的线程共享该进程的数据?.py","file_ext":"py","file_size_in_byte":732,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"115189060","text":"import os\nimport tempfile\nfrom contextlib import contextmanager\nfrom typing import Optional\n\nimport click\n\nfrom dagster._core.instance import DagsterInstance, InstanceRef\nfrom dagster._core.instance.config import is_dagster_home_set\n\n\n@contextmanager\ndef get_instance_for_service(\n service_name, instance_ref: Optional[InstanceRef] = None, logger_fn=click.echo\n):\n if instance_ref:\n with DagsterInstance.from_ref(instance_ref) as instance:\n yield instance\n elif is_dagster_home_set():\n with DagsterInstance.get() as instance:\n yield instance\n else:\n # make the temp dir in the cwd since default temp dir roots\n # have issues with FS notif based event log watching\n with tempfile.TemporaryDirectory(dir=os.getcwd()) as tempdir:\n logger_fn(\n f\"Using temporary directory {tempdir} for storage. This will be removed when\"\n f\" {service_name} exits.\"\n )\n logger_fn(\n \"To persist information across sessions, set the environment variable DAGSTER_HOME\"\n \" to a directory to use.\"\n )\n\n with DagsterInstance.from_ref(\n InstanceRef.from_dir(tempdir, config_dir=os.getcwd())\n ) as instance:\n yield instance\n","sub_path":"python_modules/dagster/dagster/_cli/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"126428692","text":"from rest_framework import serializers\nfrom .models import Task, Project\n\nclass ProjectSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = Project\n fields = ('title', 'description')\n\n\nclass TaskSerializer(serializers.ModelSerializer):\n project = ProjectSerializer(required=False)\n class Meta:\n model = Task\n fields = ('project','title', 'description', 'due_date')\n\n def create(self, validated_data):\n print('fuck')\n if 'project' in validated_data:\n project = validated_data.pop('project')\n projectObj = Project.objects.create(**project)\n \n task = Task.objects.create(project=projectObj,**validated_data)\n task.save() \n return task","sub_path":"core/serializer.py","file_name":"serializer.py","file_ext":"py","file_size_in_byte":754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"374389189","text":"#function returning value\r\ndef sum(a,b):\r\n\tc=a+b\r\n\treturn c\r\na=sum(30,23)\r\nprint(a)\r\nprint(sum(34.5,67.8))\r\n\r\n# c=0x037E07C8\r\n# print(\"hexavalue\",c)\r\n# print(\"id of c\",id(c))","sub_path":"irshad dir/kamal/demo_python/python/L5/functionTest2.py","file_name":"functionTest2.py","file_ext":"py","file_size_in_byte":174,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"14784183","text":"from django.shortcuts import render, redirect\nfrom django.http import HttpResponse,HttpResponseRedirect\nfrom CIMIT.forms import VenueForm\nfrom CIMIT.models import Venue\n\n# Create your views here.\ndef edit(request,id):\n employee = Venue.objects.get(id=id)\n return render(request , \"edit.html\", {\"employee\":employee})\n\ndef update(request, id):\n employee = Venue.objects.get(id=id)\n form = VenueForm(request.POST , instance=employee)\n if form.is_valid():\n form.save()\n return redirect('/list')\n return render(request , \"edit.html\", {\"employee\":employee})\n\ndef delete(request , id):\n employee = Venue.objects.get(id=id)\n employee.delete()\n return redirect(\"/list\")\n\ndef list(request):\n context = {'list': Venue.objects.all()}\n return render(request , 'list.html' , context)\n\n\n\n\n\ndef add(request ):\n \n if request.method == \"POST\":\n form = VenueForm(request.POST)\n #form.save()\n if form.is_valid():\n try:\n form.save()\n return redirect('/list')\n except:\n pass\n else :\n form = VenueForm\n return render(request , \"add.html\" , {\"form\":form})\n\n\n","sub_path":"Django_1st_Project/din/CIMIT/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1196,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"262912363","text":"def gcd(x,y):\r\n (x,y) = (y,x) if x>y else (x,y)\r\n for factor in range(x,0,-1):\r\n if x % factor ==0 and y % factor ==0:\r\n return factor\r\n\r\ndef divi(x,y):\r\n return x*y // gcd(x,y)\r\nb = gcd(10,8)\r\na = divi(10,8)\r\nprint(a,b)","sub_path":"python_exercise/project/基础练手/调用/函数/func11.py","file_name":"func11.py","file_ext":"py","file_size_in_byte":248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"538851077","text":"# -*- coding: utf-8 -*-\n\nfrom appium import webdriver\nimport time\nimport unittest\n\n# ————启动app并进入首页—————success————\n\ncaps = {}\n# Android7.0及以上系统需要加下面这一行参数\ncaps['automationName'] = 'UiAutomator2'\ncaps[\"platformName\"] = \"android\"\ncaps[\"deviceName\"] = \"5142c29f\"\ncaps[\"app\"] = \"C:\\\\Users\\Administrator\\Desktop\\meiweigx.apk\"\ncaps[\"platformVersion\"] = \"7.1.1\"\ncaps['appPackage'] = 'com.meiweigx.customer'\ncaps['appActivity'] = 'com.meiweigx.customer.ui.LaunchActivity'\ncaps[\"moReset\"] = True\n\n\n# TestCase类,所有测试用例类 继承的基本类\nclass OpenApp(unittest.TestCase):\n\n # setUp()方法用于测试用例执行前的初始化工作。如测试用例中需要访问数据库,可以在setUp中建立数据库链接\n # 并进行初始化。如测试用例需要启动Appium服务,则需要在该方法内启动Appium服务。\n def setUp(self):\n self.driver = webdriver.Remote(\"http://127.0.0.1:4723/wd/hub\", caps)\n\n # tearDown()方法用于测试用例执行之后的善后工作。如关闭数据库连接,退出应用。\n # 无论这个方法写在哪里,都是最后才执行\n # def tearDown(self): #退出程序\n # self.driver.quit()\n\n # 具体的测试用例,必须要以test开头\n def test_start(self):\n # 点击“不允许后不再访问”\n self.driver.find_element_by_id('com.android.packageinstaller:id/do_not_ask_checkbox').click()\n # 点击始终允许\n self.driver.find_element_by_id('com.android.packageinstaller:id/permission_allow_button').click()\n # 第二个弹框点击始终允许\n self.driver.find_element_by_id('com.android.packageinstaller:id/permission_allow_button').click()\n # 点击“不允许后不再访问”\n self.driver.find_element_by_id('com.android.packageinstaller:id/do_not_ask_checkbox').click()\n # 第三个弹框点击始终允许\n self.driver.find_element_by_id('com.android.packageinstaller:id/permission_allow_button').click()\n time.sleep(1)\n # 滑动引导页,x、y分别是起点和终点坐标,duration是滑动时间\n self.driver.swipe(start_x=1000, start_y=1000, end_x=200, end_y=1000, duration=800)\n time.sleep(1)\n self.driver.swipe(start_x=1000, start_y=1000, end_x=200, end_y=1000, duration=800)\n time.sleep(1)\n self.driver.swipe(start_x=1000, start_y=1000, end_x=200, end_y=1000, duration=800)\n time.sleep(1)\n # 点击“立即体验”\n self.driver.find_element_by_xpath(\n '//android.widget.Button[@resource-id=\\\"com.meiweigx.customer:id/btn\\\"]').click()\n\n time.sleep(1)\n # 点击首页弹窗右上角的x按钮\n self.driver.find_element_by_id('com.meiweigx.customer:id/first_view_closeBtn').click()\n\n # 一个断言(0是热销商品,1是新品推荐,2是特价促销,以此类推)\n a = self.driver.find_elements_by_id(\"com.meiweigx.customer:id/sub_menu_title\")[0].text\n # print(a)\n # 热销商品不等于a时,打印出“没有找到热销商品”的信息,等于则不打印\n self.assertEqual(\"热销商品\", a, \"没有找到热销商品\")\n\n # ————找到“com.meiweigx.customer:id/sub_menu_title”对应的一组元素的text内容并打印出来————\n # for i in range(5):\n # l = self.driver.find_elements_by_id('com.meiweigx.customer:id/sub_menu_title')[i].text\n # print(\"下标 %d 对应的栏目是 %s.\"%(i,l))\n # print(i)\n # print(l)\n\nif __name__ == '__main__':\n # 构造测试集 defaultTestLoader()即TestLoader()测试用例加载器,包括多个加载测试用例的方法,返回一个测试套件\n # loadTestsFromTestCase()根据给定的测试类,获取其中的所有测试方法,并返回一个测试套件\n suite = unittest.TestLoader.loadTestsFromTestCase(OpenApp)\n\n # unittest框架的TextTestRunner()类,通过该类下面的run()方法来运行suite所组装的测试用例,入参为suite测试套件\n unittest.TextTestRunner(verbosity=2).run(suite)\n\n # 上面两行代码可以换成下面一行\n # unittest.main()\n","sub_path":"test_mwgx/test_openapp.py","file_name":"test_openapp.py","file_ext":"py","file_size_in_byte":4283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"447865991","text":"from __future__ import division, unicode_literals\nimport numpy as np, math\n\nimport numpy as np\nfrom colour.utilities import from_range_1, to_domain_1, as_float_array\n\n\ndef basic_logarithm(x, base=2, style='log2'):\n FLT_MIN = 1.175494e-38 \n\n def log_base(x, base=2):\n y = math.log(x, base)\n return y\n \n def antilog_base(y, base=2):\n return base ** y\n\n style = style.lower()\n if style == 'log10':\n return np.where(x>FLT_MIN, log_base(x, 10), log_base(FLT_MIN, 10))\n elif style == 'antilog10':\n return antilog_base(x, 10)\n elif style == 'log2':\n return np.where(x>FLT_MIN, log_base(x), log_base(FLT_MIN))\n elif style == 'antilog2':\n return antilog_base(x)\n\ndef encoding_decoding_logarithm(x, style='linToLog', linSideBreak=1, base=2, logSideSlope=1, linSideSlope=1, logSideOffset=0, linSideOffset=0):\n FLT_MIN = 1.175494e-38\n\n def linToLog(x, base=2, logSideSlope=1, linSideSlope=1, logSideOffset=0, linSideOffset=0):\n y = logSideSlope * math.log(max(linSideSlope * x + linSideOffset, FLT_MIN), base) + logSideOffset\n return y\n\n def logToLin(y, base=2, logSideSlope=1, linSideSlope=1, logSideOffset=0, linSideOffset=0):\n return ((base ** ((y-logSideOffset) / logSideSlope) - linSideOffset) / linSideSlope)\n\n logSideBreak = logSideSlope * math.log((linSideSlope * linSideBreak + linSideOffset), base) + logSideOffset\n linearSlope = logSideSlope * (linSideSlope / ((linSideSlope * linSideBreak + linSideOffset) * np.log(base)))\n linearOffset = logSideBreak - linearSlope * linSideBreak\n\n style = style.lower()\n if style == 'lintolog':\n return linToLog(x)\n elif style == 'logtolin':\n return logToLin(x)\n elif style == 'cameralintolog':\n return np.where(x <= linSideBreak, linearSlope * x + linearOffset, linToLog(x))\n elif style == 'cameralogtolin':\n return np.where(x <= logSideBreak, (x-linearOffset)/linearSlope, logToLin(x))\n ","sub_path":"LogNode/log_node.py","file_name":"log_node.py","file_ext":"py","file_size_in_byte":1985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"307192535","text":"# %s/\\v192\\.101\\.3\\.178\\\\g\\$/192.168.1.104\\\\f$\\\\tmp/\r\nfor a in []:\r\n print(a)\r\nelse:\r\n print(\"aaaa\")\r\nimport os\r\nimport time\r\n# for i in range(100):\r\n# os.makedirs('\\\\\\\\192.101.1.227\\\\e$\\\\huangming\\\\def\\\\%s'%i)\r\na = os.path.abspath('tmp/%s.dbf'%1)\r\nprint(a)\r\npath,_ = os.path.split(a)\r\nprint(path)\r\nimport dbf\r\nimport log\r\nimport shutil\r\nmylog=log.Log()\r\n# path = os.path.abspath('9200000271Prop000000026.dbf')\r\npath = os.path.abspath('9200000285.dbf')\r\n# print(path)\r\nrecords = dbf.Table(path, dbf_type='db3',codepage='cp936')\r\nprint(records.field_names)\r\nrecords.open()\r\nprint(len(records))\r\na = records[:1]\r\nprint(a)\r\nrecords.close()\r\nprint(a)\r\nprint([1,2]+[3,4])\r\n# print(records[0]['ywlsh'])\r\n# path = r'\\\\192.101.1.108\\d$\\TradeSystem\\\\XSanfjyi_ZD\\dbf\\ZD\\rep.dbf'\r\n# shutil.copy(path, 'rep.dbf')\r\n# records = dbf.Table('rep.dbf', dbf_type='db3',codepage='cp936')\r\n# records.open()\r\n# print(len(records))\r\n# print(records[0])\r\n# records.pack()\r\n# print(len(records))\r\n# records.close()\r\n\r\n\r\n","sub_path":"tmp.py","file_name":"tmp.py","file_ext":"py","file_size_in_byte":1005,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"230644816","text":"# coding=utf-8\nfrom __future__ import unicode_literals\nfrom .. import Provider as PersonProvider\n\n\nclass Provider(PersonProvider):\n formats = (\n '{{first_name}} {{last_name}}',\n '{{first_name}} {{last_name}}',\n '{{first_name}} {{last_name}}',\n '{{first_name}} {{last_name}}',\n '{{first_name}} {{last_name}}',\n '{{first_name}} {{last_name}}',\n '{{first_name}} {{last_name}}',\n '{{first_name}} {{last_name}}-{{last_name}}',\n '{{first_name_female}} {{last_name}} {{last_name}}',\n '{{prefix}} {{first_name}} {{last_name}}',\n )\n\n first_names_male = (\n 'Adolf', 'Adrian', 'Alain', 'Albert', 'Alberto', 'Aldo', 'Alex',\n 'Alexander', 'Alexandre', 'Alfons', 'Alfred', 'Alois', 'Andre',\n 'Andrea', 'Andreas', 'André', 'Angelo', 'Antoine', 'Anton', 'Antonio',\n 'Armin', 'Arnold', 'Arthur', 'Beat', 'Bernard', 'Bernhard', 'Bruno',\n 'Carlo', 'Charles', 'Christian', 'Christoph', 'Christophe', 'Claude',\n 'Claudio', 'Daniel', 'David', 'Denis', 'Didier', 'Dieter', 'Dominique',\n 'Edgar', 'Eduard', 'Edwin', 'Emil', 'Enrico', 'Eric', 'Erich', 'Ernst',\n 'Erwin', 'Eugen', 'Felix', 'Ferdinand', 'Francesco', 'Francis',\n 'Franco', 'Franz', 'François', 'Fredy', 'Fridolin', 'Friedrich',\n 'Fritz', 'Frédéric', 'Georg', 'Georges', 'Gerhard', 'Gianni',\n 'Gilbert', 'Giovanni', 'Giuseppe', 'Gottfried', 'Guido', 'Guy',\n 'Gérald', 'Gérard', 'Hans', 'Hans-Peter', 'Hans-Rudolf', 'Hans-Ulrich',\n 'Hansjörg', 'Hanspeter', 'Hansruedi', 'Heinrich', 'Heinz', 'Helmut',\n 'Henri', 'Herbert', 'Hermann', 'Hubert', 'Hugo', 'Jacques', 'Jakob',\n 'Jan', 'Jean', 'Jean-Claude', 'Jean-Daniel', 'Jean-François',\n 'Jean-Jacques', 'Jean-Louis', 'Jean-Luc', 'Jean-Marc', 'Jean-Marie',\n 'Jean-Paul', 'Jean-Pierre', 'Johann', 'Johannes', 'Josef', 'Joseph',\n 'Jörg', 'Jürg', 'Karl', 'Klaus', 'Konrad', 'Kurt', 'Laurent', 'Leo',\n 'Louis', 'Luigi', 'Manfred', 'Manuel', 'Marc', 'Marcel', 'Marco',\n 'Mario', 'Markus', 'Martin', 'Massimo', 'Matthias', 'Maurice', 'Max',\n 'Michael', 'Michel', 'Nicolas', 'Niklaus', 'Norbert', 'Olivier',\n 'Oskar', 'Otto', 'Paolo', 'Pascal', 'Patrick', 'Paul', 'Peter',\n 'Philipp', 'Pierre', 'Pierre-Alain', 'Pierre-André', 'Rainer',\n 'Raymond', 'Reinhard', 'Remo', 'Renato', 'Rene', 'René', 'Reto',\n 'Richard', 'Robert', 'Roberto', 'Roger', 'Roland', 'Rolf', 'Roman',\n 'Rudolf', 'Ruedi', 'Samuel', 'Sandro', 'Serge', 'Silvio', 'Simon',\n 'Stefan', 'Stephan', 'Stéphane', 'Theo', 'Theodor', 'Thomas', 'Toni',\n 'Ueli', 'Ulrich', 'Urs', 'Victor', 'Viktor', 'Walter', 'Werner',\n 'Willi', 'Willy', 'Wolfgang', 'Yves',\n )\n\n first_names_female = (\n 'Alice', 'Andrea', 'Anita', 'Anna', 'Anne', 'Anne-Marie', 'Annemarie',\n 'Astrid', 'Barbara', 'Beatrice', 'Bernadette', 'Brigitta', 'Brigitte',\n 'Béatrice', 'Carmen', 'Catherine', 'Chantal', 'Christiane',\n 'Christina', 'Christine', 'Claudia', 'Claudine', 'Corinne', 'Cornelia',\n 'Daniela', 'Danielle', 'Denise', 'Dominique', 'Dora', 'Doris', 'Edith',\n 'Eliane', 'Elisabeth', 'Elsbeth', 'Erika', 'Esther', 'Eva', 'Evelyne',\n 'Fabienne', 'Florence', 'Franziska', 'Françoise', 'Gabriela',\n 'Gabrielle', 'Gertrud', 'Gisela', 'Heidi', 'Helen', 'Helena', 'Helene',\n 'Hildegard', 'Irene', 'Isabelle', 'Jacqueline', 'Janine', 'Jeannette',\n 'Johanna', 'Jolanda', 'Josiane', 'Judith', 'Karin', 'Katharina',\n 'Laura', 'Laurence', 'Liliane', 'Liselotte', 'Lydia', 'Madeleine',\n 'Manuela', 'Margrit', 'Margrith', 'Maria', 'Marianne', 'Marlies',\n 'Marlis', 'Martha', 'Martina', 'Martine', 'Maya', 'Monica', 'Monika',\n 'Monique', 'Myriam', 'Nathalie', 'Nelly', 'Nicole', 'Paola',\n 'Patricia', 'Petra', 'Pia', 'Priska', 'Regina', 'Regula', 'Renata',\n 'Renate', 'Rita', 'Rosemarie', 'Rosmarie', 'Ruth', 'Sabine', 'Sandra',\n 'Silvia', 'Simone', 'Sonja', 'Susanna', 'Susanne', 'Suzanne', 'Sylvia',\n 'Therese', 'Ursula', 'Verena', 'Vreni', 'Véronique', 'Yvonne',\n )\n\n first_names = first_names_male + first_names_female\n\n last_names = (\n 'Ackermann', 'Aebi', 'Albrecht', 'Ammann', 'Amrein', 'Arnold',\n 'Bachmann', 'Bader', 'Bär', 'Bättig', 'Bauer', 'Baumann',\n 'Baumgartner', 'Baur', 'Beck', 'Benz', 'Berger', 'Bernasconi',\n 'Betschart', 'Bianchi', 'Bieri', 'Blaser', 'Blum', 'Bolliger',\n 'Bosshard', 'Braun', 'Brun', 'Brunner', 'Bucher', 'Bühler', 'Bühlmann',\n 'Burri', 'Christen', 'Egger', 'Egli', 'Eichenberger', 'Erni', 'Ernst',\n 'Eugster', 'Fankhauser', 'Favre', 'Fehr', 'Felber', 'Felder',\n 'Ferrari', 'Fischer', 'Flückiger', 'Forster', 'Frei', 'Frey', 'Frick',\n 'Friedli', 'Fuchs', 'Furrer', 'Gasser', 'Geiger', 'Gerber', 'Gfeller',\n 'Giger', 'Gloor', 'Graf', 'Grob', 'Gross', 'Gut', 'Haas', 'Häfliger',\n 'Hafner', 'Hartmann', 'Hasler', 'Hauser', 'Hermann', 'Herzog', 'Hess',\n 'Hirt', 'Hodel', 'Hofer', 'Hoffmann', 'Hofmann', 'Hofstetter', 'Hotz',\n 'Huber', 'Hug', 'Hunziker', 'Hürlimann', 'Imhof', 'Isler', 'Iten',\n 'Jäggi', 'Jenni', 'Jost', 'Kägi', 'Kaiser', 'Kälin', 'Käser',\n 'Kaufmann', 'Keller', 'Kern', 'Kessler', 'Knecht', 'Koch', 'Kohler',\n 'Kuhn', 'Küng', 'Kunz', 'Lang', 'Lanz', 'Lehmann', 'Leu', 'Leunberger',\n 'Lüscher', 'Lustenberger', 'Lüthi', 'Lutz', 'Mäder', 'Maier', 'Marti',\n 'Martin', 'Maurer', 'Mayer', 'Meier', 'Meili', 'Meister', 'Merz',\n 'Mettler', 'Meyer', 'Michel', 'Moser', 'Müller', 'Näf', 'Ott', 'Peter',\n 'Pfister', 'Portmann', 'Probst', 'Rey', 'Ritter', 'Roos', 'Roth',\n 'Rüegg', 'Schäfer', 'Schaller', 'Schär', 'Schärer', 'Schaub',\n 'Scheidegger', 'Schenk', 'Scherrer', 'Schlatter', 'Schmid', 'Schmidt',\n 'Schneider', 'Schnyder', 'Schoch', 'Schuler', 'Schumacher', 'Schürch',\n 'Schwab', 'Schwarz', 'Schweizer', 'Seiler', 'Senn', 'Sidler',\n 'Siegrist', 'Sigrist', 'Spörri', 'Stadelmann', 'Stalder', 'Staub',\n 'Stauffer', 'Steffen', 'Steiger', 'Steiner', 'Steinmann', 'Stettler',\n 'Stocker', 'Stöckli', 'Stucki', 'Studer', 'Stutz', 'Suter', 'Sutter',\n 'Tanner', 'Thommen', 'Tobler', 'Vogel', 'Vogt', 'Wagner', 'Walder',\n 'Walter', 'Weber', 'Wegmann', 'Wehrli', 'Weibel', 'Wenger',\n 'Wettstein', 'Widmer', 'Winkler', 'Wirth', 'Wirz', 'Wolf', 'Wüthrich',\n 'Wyss', 'Zbinden', 'Zehnder', 'Ziegler', 'Zimmermann', 'Zingg',\n 'Zollinger', 'Zürcher',\n )\n\n prefixes = ('Dr.', 'Prof.',)\n","sub_path":"faker/providers/person/de_CH/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":6710,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"175816673","text":"import os\n\n\ndef path_log(path):\n def log_func(old_func):\n def new_func(*args, **kwargs):\n from datetime import datetime\n call_time = datetime.now().strftime(\"%d-%m-%Y %H:%M:%S\")\n result = old_func(*args, **kwargs)\n with open(f'{path}/file.log', 'a', encoding='utf-8') as file:\n file.write(f'Время вызова функции: {call_time}\\n'\n f'Имя функции: {old_func.__name__}\\n'\n f'Аргументы функции: {args}_{kwargs}\\n'\n f'Возвращаемое значение: {result}\\n'\n f'\\n')\n return result\n\n return new_func\n\n return log_func\n\n\n@path_log(os.getcwd())\ndef func(*args, **kwargs):\n return args, kwargs\n\n\nfunc(3, 4)\n","sub_path":"netology_advanced_python_tasks/4_decorators/task/task_decorators_param.py","file_name":"task_decorators_param.py","file_ext":"py","file_size_in_byte":852,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"441329739","text":"# https://app.diagrams.net/?page-id=_ntfaa3PA2ggRSIUykr3#G1eKp2W5wpHgpv28qTq0SMU-f0MtLq0pst\n# 4. Найти сумму n элементов следующего ряда чисел: 1, -0.5, 0.25, -0.125,…\n# Количество элементов (n) вводится с клавиатуры.\n\nn = int(input('Вветие число: '))\nx = 1\nz = x\nwhile n != 1:\n x = x * -1 / 2\n z += x\n n -= 1\n print(x)\nprint(f'Сумма ряда = {z}')\n\n## recursive\n\ndef my_func(num, n):\n n -= 1\n if n == 0:\n return num\n else:\n return num + my_func(num * -1 / 2, n)\n\nprint(my_func(1, 4))","sub_path":"lesson2/task4.py","file_name":"task4.py","file_ext":"py","file_size_in_byte":615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"287621438","text":"\"\"\"Training algorithm track submission functions for MNIST.\"\"\"\nfrom typing import Iterator, List, Tuple, Union\n\nimport functools\nimport jax\nimport jax.numpy as jnp\nimport optax\nimport spec\nfrom flax import jax_utils\n\nfrom . import workload\n\n\ndef get_batch_size(workload_name):\n batch_sizes = {'mnist_jax': 1024}\n return batch_sizes[workload_name]\n\n\ndef optimizer(hyperparameters):\n opt_init_fn, opt_update_fn = optax.chain(\n optax.scale_by_adam(\n b1=1.0 - hyperparameters.one_minus_beta_1,\n b2=0.999,\n eps=hyperparameters.epsilon),\n optax.scale(-hyperparameters.learning_rate)\n )\n return opt_init_fn, opt_update_fn\n\n\ndef init_optimizer_state(\n workload: spec.Workload,\n model_params: spec.ParameterContainer,\n model_state: spec.ModelAuxiliaryState,\n hyperparameters: spec.Hyperparamters,\n rng: spec.RandomState) -> spec.OptimizerState:\n del model_params\n del model_state\n del rng\n params_zeros_like = jax.tree_map(\n lambda s: jnp.zeros(s.shape_tuple), workload.param_shapes)\n opt_init_fn, _ = optimizer(hyperparameters)\n return opt_init_fn(params_zeros_like)\n\n\n# We need to jax.pmap here instead of inside update_params because the latter\n# the latter would recompile the function every step.\n@functools.partial(\n jax.pmap,\n axis_name='batch',\n in_axes=(None, 0, 0, None, 0, 0, 0, None, 0),\n static_broadcasted_argnums=(0,))\ndef pmapped_update_params(\n workload: spec.Workload,\n current_param_container: spec.ParameterContainer,\n model_state: spec.ModelAuxiliaryState,\n hyperparameters: spec.Hyperparamters,\n input_batch: spec.Tensor,\n label_batch: spec.Tensor,\n optimizer_state: spec.OptimizerState,\n rng: spec.RandomState,\n local_device_index) -> spec.UpdateReturn:\n # Note that `rng` is the same across all devices! If a per-device RNG is\n # required, then `local_device_index` can be folded into `rng`.\n del local_device_index\n\n def loss_fn(params):\n logits_batch, new_model_state = workload.model_fn(\n params,\n input_batch,\n model_state,\n spec.ForwardPassMode.TRAIN,\n rng,\n update_batch_norm=True)\n loss = workload.loss_fn(label_batch, logits_batch)\n return jnp.mean(loss), new_model_state\n\n grad_fn = jax.value_and_grad(loss_fn, has_aux=True)\n (_, new_model_state), grad = grad_fn(current_param_container)\n _, opt_update_fn = optimizer(hyperparameters)\n updates, new_optimizer_state = opt_update_fn(\n grad, optimizer_state, current_param_container)\n updated_params = optax.apply_updates(current_param_container, updates)\n return new_optimizer_state, updated_params, new_model_state\n\ndef update_params(\n workload: spec.Workload,\n current_param_container: spec.ParameterContainer,\n current_params_types: spec.ParameterTypeTree,\n model_state: spec.ModelAuxiliaryState,\n hyperparameters: spec.Hyperparamters,\n input_batch: spec.Tensor,\n label_batch: spec.Tensor,\n # This will define the output activation via `output_activation_fn`.\n loss_type: spec.LossType,\n optimizer_state: spec.OptimizerState,\n eval_results: List[Tuple[int, float]],\n global_step: int,\n rng: spec.RandomState) -> spec.UpdateReturn:\n \"\"\"Return (updated_optimizer_state, updated_params, updated_model_state).\"\"\"\n del current_params_types\n del loss_type\n del eval_results\n del global_step\n\n num_devices = jax.local_device_count()\n input_shape = input_batch.shape\n reshaped_input_batch = jnp.reshape(\n input_batch,\n (num_devices, input_shape[0] // num_devices, *input_shape[1:]))\n reshaped_label_batch = jnp.reshape(\n label_batch,\n (num_devices, label_batch.shape[0] // num_devices,\n *label_batch.shape[1:]))\n\n # TODO(znado) we should be more efficient than replicating state each step.\n new_optimizer_state, updated_params, new_model_state = pmapped_update_params(\n workload,\n jax_utils.replicate(current_param_container),\n jax_utils.replicate(model_state),\n hyperparameters,\n reshaped_input_batch,\n reshaped_label_batch,\n jax_utils.replicate(optimizer_state),\n rng,\n jnp.arange(num_devices))\n return (\n jax_utils.unreplicate(new_optimizer_state),\n jax_utils.unreplicate(updated_params),\n jax_utils.unreplicate(new_model_state))\n\n\n# Not allowed to update the model parameters, hyperparameters, global step, or\n# optimzier state.\ndef data_selection(\n workload: spec.Workload,\n input_queue: Iterator[Tuple[spec.Tensor, spec.Tensor]],\n optimizer_state: spec.OptimizerState,\n current_param_container: spec.ParameterContainer,\n hyperparameters: spec.Hyperparamters,\n global_step: int,\n rng: spec.RandomState) -> Tuple[spec.Tensor, spec.Tensor]:\n \"\"\"Select data from the infinitely repeating, pre-shuffled input queue.\n\n Each element of the queue is a single training example and label.\n\n We left out `current_params_types` because we do not believe that it would\n # be necessary for this function.\n\n Return a tuple of input label batches.\n \"\"\"\n del workload\n del optimizer_state\n del current_param_container\n del hyperparameters\n del global_step\n del rng\n return next(input_queue)\n\n","sub_path":"workloads/mnist/mnist_jax/submission.py","file_name":"submission.py","file_ext":"py","file_size_in_byte":5192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"580151457","text":"#!/usr/bin/env python3\n\n\nclass Node(object):\n def __init__(self, val):\n self.val = val\n self.prev = None\n self.next = None\n\n\ndef main():\n numplayers = 426\n lastmarb = 72058\n players = [0 for i in range(numplayers)]\n\n currentplr = 0\n curmarb = Node(0)\n curmarb.next = curmarb\n curmarb.prev = curmarb\n nextmarb = 1\n while True:\n if nextmarb % 23 == 0:\n remove = curmarb.prev.prev.prev.prev.prev.prev.prev\n players[currentplr] += nextmarb + remove.val\n between_a = remove.prev\n between_b = remove.next\n between_a.next = between_b\n between_b.prev = between_a\n curmarb = between_b\n else:\n place_between_a = curmarb.next\n place_between_b = place_between_a.next\n curmarb = Node(nextmarb)\n curmarb.next = place_between_b\n place_between_b.prev = curmarb\n curmarb.prev = place_between_a\n place_between_a.next = curmarb\n\n if nextmarb == lastmarb:\n break\n nextmarb += 1\n currentplr = (currentplr + 1) % numplayers\n\n print(\"Winning score:\", max(players))\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"9/a-dll.py","file_name":"a-dll.py","file_ext":"py","file_size_in_byte":1236,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"228878673","text":"from flask import Flask, jsonify, request, send_from_directory\nfrom flask_cors import CORS\n\nfrom blockchain import Blockchain\nfrom wallet import Wallet\n\napp = Flask(__name__)\nCORS(app)\n\n\n@app.route('/wallet', methods=['POST'])\ndef create_keys():\n\twallet.create_keys()\n\tif wallet.save_keys():\n\t\tglobal blockchain\n\t\tblockchain = Blockchain(wallet.public_key, port)\n\t\tresponse = {\n\t\t\t'public_key': wallet.public_key,\n\t\t\t'private_key': wallet.private_key,\n\t\t\t'balance': blockchain.get_balance()\n\t\t}\n\t\treturn jsonify(response), 201\n\telse:\n\t\tresponse = {\n\t\t\t'message': 'Failed to save the keys'\n\t\t}\n\t\treturn jsonify(response), 500\n\n\n@app.route('/wallet', methods=['GET'])\ndef load_keys():\n\tif wallet.load_keys():\n\t\tglobal blockchain\n\t\tblockchain = Blockchain(wallet.public_key, port)\n\t\tresponse = {\n\t\t\t'public_key': wallet.public_key,\n\t\t\t'private_key': wallet.private_key,\n\t\t\t'balance': blockchain.get_balance()\n\t\t}\n\t\treturn jsonify(response), 201\n\telse:\n\t\tresponse = {\n\t\t\t'message': 'Failed to load the keys'\n\t\t}\n\t\treturn jsonify(response), 500\n\n\n@app.route('/balance', methods=['GET'])\ndef get_balance():\n\tbalance = blockchain.get_balance()\n\tif balance is not None:\n\t\tresponse = {\n\t\t\t'message': 'Successfully fetched balance',\n\t\t\t'balance': balance\n\t\t}\n\t\treturn jsonify(response), 200\n\telse:\n\t\tresponse = {\n\t\t\t'message': 'Failed to load balance',\n\t\t\t'wallet_set_up': wallet.public_key is not None\n\t\t}\n\t\treturn jsonify(response), 500\n\n\n@app.route('/', methods=['GET'])\ndef get_node_ui():\n\treturn send_from_directory('ui', 'node.html')\n\n\n@app.route('/network', methods=['GET'])\ndef get_network_ui():\n\treturn send_from_directory('ui', 'network.html')\n\n\n@app.route('/transaction', methods=['POST'])\ndef add_transaction():\n\tif wallet.public_key is None:\n\t\tresponse = {\n\t\t\t'message': 'No wallet wet up'\n\t\t}\n\t\treturn jsonify(response), 400\n\tdata = request.get_json()\n\tif not data:\n\t\tresponse = {\n\t\t\t'message': 'No data found'\n\t\t}\n\t\treturn jsonify(response), 400\n\n\trequired_fields = ['recipient', 'amount']\n\tif not all(field in data for field in required_fields):\n\t\tresponse = {\n\t\t\t'message': 'Required data is missing'\n\t\t}\n\t\treturn jsonify(response), 400\n\n\trecipient = data['recipient']\n\tamount = data['amount']\n\tsignature = wallet.sign_transaction(wallet.public_key, recipient, amount)\n\tsuccess = blockchain.add_transaction(recipient, wallet.public_key, signature, amount)\n\tif success:\n\t\tresponse = {\n\t\t\t'message': 'Successfully added transaction',\n\t\t\t'transaction': {\n\t\t\t\t'sender': wallet.public_key,\n\t\t\t\t'recipient': recipient,\n\t\t\t\t'amount': amount,\n\t\t\t\t'signature': signature\n\t\t\t},\n\t\t\t'balance': blockchain.get_balance()\n\t\t}\n\t\treturn jsonify(response), 201\n\telse:\n\t\tresponse = {\n\t\t\t'message': 'Failed to create a transaction'\n\t\t}\n\t\treturn jsonify(response), 500\n\n\n@app.route('/mine', methods=['POST'])\ndef mine():\n\tblock = blockchain.mine_block()\n\tif block is not None:\n\t\tdict_block = block.__dict__.copy()\n\t\tdict_block['transactions'] = [tx.__dict__ for tx in dict_block['transactions']]\n\t\tresponse = {\n\t\t\t'message': 'Block added successfully',\n\t\t\t'block': dict_block,\n\t\t\t'balance': blockchain.get_balance()\n\t\t}\n\t\treturn jsonify(response), 201\n\telse:\n\t\tresponse = {\n\t\t\t'message': 'Failed to mine a block',\n\t\t\t'wallet_set_up': wallet.public_key is not None\n\t\t}\n\t\treturn jsonify(response), 500\n\n\n@app.route('/transactions', methods=['GET'])\ndef get_open_transactions():\n\ttransactions = blockchain.get_open_transactions()\n\tdict_transactions = [tx.__dict__ for tx in transactions ]\n\treturn jsonify(dict_transactions), 200\n\n\n@app.route('/chain', methods=['GET'])\ndef get_chain():\n\tchain_snapshot = blockchain.chain\n\tdict_chain = [block.__dict__.copy() for block in chain_snapshot]\n\tfor dict_block in dict_chain:\n\t\tdict_block['transactions'] = [tx.__dict__ for tx in dict_block['transactions']]\n\treturn jsonify(dict_chain), 200\n\n\n@app.route('/node', methods=['POST'])\ndef add_node():\n\tdata = request.get_json()\n\tif not data:\n\t\tresponse = {\n\t\t\t'message': 'No data attached'\n\t\t}\n\t\treturn jsonify(response), 400\n\tif 'node' not in data:\n\t\tresponse = {\n\t\t\t'message': 'No node data found'\n\t\t}\n\t\treturn jsonify(response), 400\n\tnode = data['node']\n\tblockchain.add_peer_node(node)\n\tresponse = {\n\t\t'message': 'Successfully added new node',\n\t\t'all_nodes': blockchain.get_peer_nodes()\n\t}\n\treturn jsonify(response), 201\n\n\n@app.route('/node/', methods=['DELETE'])\ndef remove_node(node_url):\n\tif node_url == '' or node_url is None:\n\t\tresponse = {\n\t\t\t'message': 'No node found'\n\t\t}\n\t\treturn jsonify(response), 400\n\tblockchain.remove_peer_node(node_url)\n\tresponse = {\n\t\t'message': 'Node removed',\n\t\t'all_nodes': blockchain.get_peer_nodes()\n\t}\n\treturn jsonify(response), 200\n\n\n@app.route('/nodes', methods=['GET'])\ndef get_nodes():\n\tresponse = {\n\t\t'all_nodes': blockchain.get_peer_nodes()\n\t}\n\treturn jsonify(response), 200\n\n\nif __name__ == '__main__':\n\tfrom argparse import ArgumentParser\n\tparser = ArgumentParser()\n\tparser.add_argument('-p', '--port', type=int, default=5000)\n\targs = parser.parse_args()\n\tport = args.port\n\twallet = Wallet(port)\n\tblockchain = Blockchain(wallet.public_key, port)\n\tapp.run(host='0.0.0.0', port=port)\n","sub_path":"node.py","file_name":"node.py","file_ext":"py","file_size_in_byte":5112,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"197079120","text":"Import('env')\nImport('config')\n\nsources = []\n\nif config['platform'] == 'tile' or config['platform'] == 'sock':\n is_l47 = True\nelse:\n is_l47= False\n\nif is_l47 :\n sources.append([\n \t'AppInterface.cpp',\n \t'SslInterface.cpp',\n \t'Event.cpp',\n \t'Ip6Header.cpp',\n \t'ListenInterface.cpp',\n \t'PacketQueue.cpp',\n \t'TcpConnection.cpp',\n \t'SslConnection.cpp',\n \t'TimeWheel.cpp',\n \t'XMLDb.cpp',\n \t'CommandConfiguration.cpp',\n \t'EventLog.cpp',\n \t'IpAddress.cpp',\n \t'LoadConfiguration.cpp',\n \t'ProtocolClient.cpp',\n \t'TcpListen.cpp',\n \t'SslListen.cpp',\n \t'TimeWheelEvent.cpp',\n \t'XMLDbFile.cpp',\n \t'CommandInstance.cpp',\n \t'Exception.cpp',\n \t'IpHeader.cpp',\n \t'LoadProfileConfig.cpp',\n \t'Protocol.cpp',\n \t'ProtocolsManager.cpp',\n \t'SimUser.cpp',\n \t'SimUserConf.cpp',\n \t'TcpStackConfig.cpp',\n \t'TimeWheelEventProcessor.cpp',\n \t'CommandSequenceInstance.cpp',\n \t'Checksum.cpp',\n \t'IpSettingsConfig.cpp',\n \t'LoopBeginCommandInstance.cpp',\n \t'ProtocolServer.cpp',\n \t'TeleCycle.cpp',\n \t'UdpConnection.cpp',\n \t'XMLParser.cpp',\n \t'Connection.cpp',\n \t'Interface.cpp',\n \t'IpStackConfig.cpp',\n \t'LoopEndCommandInstance.cpp',\n \t'Session.cpp',\n \t'TelePortConfig.cpp',\n \t'UdpHeader.cpp',\n \t'XMLTag.cpp',\n \t'EthAddress.cpp',\n \t'Ip4Address.cpp',\n \t'L4PortManagement.cpp',\n \t'Network.cpp',\n \t'Statistics.cpp',\n \t'ThinkCommandInstance.cpp',\n \t'UdpListen.cpp',\n \t'XMLTagIterator.cpp',\n 'XMLTagProcessor.cpp',\n \t'EthHeader.cpp',\n \t'Ip6Address.cpp',\n \t'Listen.cpp',\n \t'Packet.cpp',\n \t'StringTransform.cpp',\n \t'Timer.cpp',\n \t'VLANTag.cpp',\n\t\t'Uint24.cpp',\n ])\nelif config['platform'] == 'x86' :\n sources.append([\n 'EthAddress.cpp',\n 'EthHeader.cpp',\n 'Interface.cpp',\n 'Checksum.cpp',\n 'Exception.cpp',\n 'Ip4Address.cpp',\n 'Ip6Address.cpp',\n 'IpAddress.cpp',\n 'Ip6Header.cpp',\n 'IpAddress.cpp',\n 'StringTransform.cpp',\n 'EventLog.cpp',\n 'VLANTag.cpp',\n 'XMLDb.cpp',\n 'XMLDbFile.cpp',\n 'XMLParser.cpp',\n 'XMLTag.cpp',\n 'XMLTagIterator.cpp',\n 'XMLTagProcessor.cpp',\n 'TimeWheelEventProcessor.cpp',\n 'TimeWheel.cpp',\n 'TimeWheelEvent.cpp',\n 'TeleCycle.cpp',\n\t\t'Uint24.cpp',\n ])\n\n# TL releated files\n# Always used files (tele-tester, tele-manager for both BigTao and TestStorm)\nsources.append([\n 'TLDispatcher.cpp',\n 'TLEngine.cpp',\n 'TLLogger.cpp',\n 'TLProcessor.cpp',\n 'TLWrapper.cpp',\n 'TLMsg.cpp',\n 'TLCounterClientFactory.cpp',\n 'TLCounter.cpp',\n 'TLCounterInProcessBroker.cpp',\n 'TLCounterInProcessClient.cpp',\n 'TLStatDBSqlite.cpp',\n 'TLStatDBStatement.cpp',\n 'TLStatDBTabSchema.cpp',\n 'TLStatData.cpp',\n 'TLTester.cpp',\n 'TLStatSubscriber.cpp',\n 'TLSubscribeStatMsg.cpp',\n 'TLProtoProcessor.cpp',\n\t'hash_32.c',\n\t'TLRoutine.cpp',\n 'utils/PathUtils.cpp',\n 'utils/Wildcard.cpp',\n 'utils/Location.cpp',\n 'TLChassis.cpp',\n])\n\n# tele-tester or tele-manager for BigTao\nif is_l47 or config['platform'] == 'x86' and (config['target'] == 'bigtao' or config['target'] == 'utest'):\n sources.append([\n 'LoadGen.cpp',\n 'l23/L23Processor.cpp',\n 'l23/L23Session.cpp',\n 'l23/L23Stack.cpp',\n 'l23/L23StackEventMsg.cpp',\n 'l23/L23StackMgr.cpp',\n ])\n\n if is_l47:\n \tsources.append([\n \t\t'l47/L47CtrlProcessor.cpp'\n \t])\n\ne = env.Clone()\n\n# TODO\n\n# Generate Library\nlib = e.Library(config['outputlibs'] + '/framework', sources)\n\n","sub_path":"TL/tile/framework/SConscript","file_name":"SConscript","file_ext":"","file_size_in_byte":3664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"635128079","text":"from distutils.extension import Extension\nfrom Cython.Distutils import build_ext\nimport os\nfrom os.path import join as pjoin\nfrom distutils.errors import DistutilsExecError, DistutilsPlatformError, \\\n\t\t\t\t\t\t\t CompileError, LibError, LinkError\nfrom distutils.ccompiler import gen_lib_options\nfrom distutils import log\n\ndef get_cuda_support():\n\tdef find_in_path(name, path):\n\t\t\"Find a file in a search path\"\n\t\tfor dir in path.split(os.pathsep):\n\t\t\tbinpath = pjoin(dir, name)\n\t\t\tif os.path.exists(binpath):\n\t\t\t\treturn os.path.abspath(binpath)\n\t\treturn None\n\n\tdef locate_cuda():\n\t\tif 'CUDA_PATH' in os.environ:\n\t\t\thome = os.environ['CUDA_PATH']\n\t\t\tnvcc = pjoin(home, 'bin', 'nvcc')\n\t\telse:\n\t\t\tnvcc = find_in_path('nvcc', os.environ['PATH'])\n\t\t\tif nvcc is None:\n\t\t\t\traise EnvironmentError('The nvcc binary could not be '\n\t\t\t\t\t'located in your $PATH. Either add it to your path, or set $CUDA_PATH')\n\t\t\thome = os.path.dirname(os.path.dirname(nvcc))\n\n\t\tcudaconfig = {'home':home, 'nvcc':nvcc,\n\t\t\t\t\t 'include': pjoin(home, 'include'),\n\t\t\t\t\t 'lib64': pjoin(home, 'lib', 'x64')}\n\t\tfor k, v in cudaconfig.items():\n\t\t\tif not os.path.exists(v) and not os.path.exists(v + \".exe\"):\n\t\t\t\traise EnvironmentError('The CUDA %s path could not be located in %s' % (k, v))\n\n\t\treturn cudaconfig\n\n\tdef rename_cuda_ext(cuda_files, ext = \".c\", Cdir = \"\", cudadir = \"cuda\"):\n\t\tif isinstance(cuda_files, str):\n\t\t\tcuda_files = [cuda_files]\n\t\tif not cudadir.endswith(\"/\"):\n\t\t\tcudadir += \"/\"\n\t\tif not os.path.exists(cudadir):\n\t\t\tos.mkdir(cudadir)\n\t\tfor cuda_file in cuda_files:\n\t\t\treal_cuda_file = os.path.splitext(cuda_file)[0] + ext\n\t\t\tif os.path.exists(real_cuda_file):\n\t\t\t\tcufile = open(cudadir + cuda_file, 'w')\n\t\t\t\tcufile.write(open(Cdir + real_cuda_file).read())\n\t\t\t\tcufile.close()\n\n\n\tdef customize_compiler_for_nvcc_unix(self):\n\t\tself.src_extensions.append('.cu')\n\t\t# save references to the default compiler_so and _comple methods\n\t\tdefault_compiler_so = self.compiler_so\n\t\tsuper = self._compile\n\n\t\tdef _compile(obj, src, ext, cc_args, extra_postargs, pp_opts):\n\t\t\tif os.path.splitext(src)[1] == '.cu':\n\t\t\t\tself.set_executable('compiler_so', CUDA['nvcc'])\n\t\t\t\tpostargs = extra_postargs['nvcc']\n\t\t\telse:\n\t\t\t\tpostargs = extra_postargs['cc']\n\n\t\t\tsuper(obj, src, ext, cc_args, postargs, pp_opts)\n\t\t\t# reset the default compiler_so, which we might have changed for cuda\n\t\t\tself.compiler_so = default_compiler_so\n\n\t\t# inject our redefined _compile method into the class\n\t\tself._compile = _compile\n\n\tdef customize_compiler_for_nvcc_win(self):\n\t\tself._cuda_extensions = ['.cu']\n\t\tself.src_extensions += self._cuda_extensions\n\t\tdef compile(sources,\n\t\t\t\toutput_dir=None, macros=None, include_dirs=None, debug=0,\n\t\t\t\textra_preargs=None, extra_postargs=None, depends=None):\n\n\t\t\tif not self.initialized:\n\t\t\t\tself.initialize()\n\n\t\t\tcompile_info = self._setup_compile(output_dir, macros, include_dirs,\n\t\t\t\t\t\t\t\t\t\t\t sources, depends, extra_postargs)\n\t\t\tmacros, objects, extra_postargs, pp_opts, build = compile_info\n\n\t\t\tcompile_opts = extra_preargs or []\n\t\t\tcompile_opts.append('/c')\n\t\t\tif debug:\n\t\t\t\tcompile_opts.extend(self.compile_options_debug)\n\t\t\telse:\n\t\t\t\tcompile_opts.extend(self.compile_options)\n\n\n\t\t\tadd_cpp_opts = False\n\n\t\t\tfor obj in objects:\n\t\t\t\ttry:\n\t\t\t\t\tsrc, ext = build[obj]\n\t\t\t\texcept KeyError:\n\t\t\t\t\tcontinue\n\t\t\t\tif debug:\n\t\t\t\t\t# pass the full pathname to MSVC in debug mode,\n\t\t\t\t\t# this allows the debugger to find the source file\n\t\t\t\t\t# without asking the user to browse for it\n\t\t\t\t\tsrc = os.path.abspath(src)\n\n\t\t\t\tif ext in self._c_extensions:\n\t\t\t\t\tinput_opt = \"/Tc\" + src\n\t\t\t\telif ext in self._cpp_extensions:\n\t\t\t\t\tinput_opt = \"/Tp\" + src\n\t\t\t\t\tadd_cpp_opts = True\n\t\t\t\telif ext in self._rc_extensions:\n\t\t\t\t\t# compile .RC to .RES file\n\t\t\t\t\tinput_opt = src\n\t\t\t\t\toutput_opt = \"/fo\" + obj\n\t\t\t\t\ttry:\n\t\t\t\t\t\tself.spawn([self.rc] + pp_opts + [output_opt, input_opt])\n\t\t\t\t\texcept DistutilsExecError as msg:\n\t\t\t\t\t\traise CompileError(msg)\n\t\t\t\t\tcontinue\n\t\t\t\telif ext in self._mc_extensions:\n\t\t\t\t\t# Compile .MC to .RC file to .RES file.\n\t\t\t\t\t# * '-h dir' specifies the directory for the\n\t\t\t\t\t#\t generated include file\n\t\t\t\t\t# * '-r dir' specifies the target directory of the\n\t\t\t\t\t#\t generated RC file and the binary message resource\n\t\t\t\t\t#\t it includes\n\t\t\t\t\t#\n\t\t\t\t\t# For now (since there are no options to change this),\n\t\t\t\t\t# we use the source-directory for the include file and\n\t\t\t\t\t# the build directory for the RC file and message\n\t\t\t\t\t# resources. This works at least for win32all.\n\t\t\t\t\th_dir = os.path.dirname(src)\n\t\t\t\t\trc_dir = os.path.dirname(obj)\n\t\t\t\t\ttry:\n\t\t\t\t\t\t# first compile .MC to .RC and .H file\n\t\t\t\t\t\tself.spawn([self.mc, '-h', h_dir, '-r', rc_dir, src])\n\t\t\t\t\t\tbase, _ = os.path.splitext(os.path.basename (src))\n\t\t\t\t\t\trc_file = os.path.join(rc_dir, base + '.rc')\n\t\t\t\t\t\t# then compile .RC to .RES file\n\t\t\t\t\t\tself.spawn([self.rc, \"/fo\" + obj, rc_file])\n\n\t\t\t\t\texcept DistutilsExecError as msg:\n\t\t\t\t\t\traise CompileError(msg)\n\t\t\t\t\tcontinue\n\t\t\t\telif ext in self._cuda_extensions:\n\t\t\t\t\tinput_opt = src\n\t\t\t\telse:\n\t\t\t\t\t# how to handle this file?\n\t\t\t\t\traise CompileError(\"Don't know how to compile {} to {}, ext {}\"\n\t\t\t\t\t\t\t\t\t .format(src, obj, ext))\n\n\t\t\t\t# release MT\n\t\t\t\tif \"/MD\" in compile_opts:\n\t\t\t\t\tpass#compile_opts[compile_opts.index(\"/MD\")] = \"/MT\"\n\t\t\t\t# for cuda compiler\n\t\t\t\tif ext in self._cuda_extensions:\n\t\t\t\t\targs = [CUDA['nvcc']]\n\t\t\t\t\targs.append(input_opt)\n\t\t\t\t\targs.append(\"-o=\" + obj)\n\t\t\t\t\t# suppress annoying unicode warnings\n\t\t\t\t\targs.extend([\"-Xcompiler\", \"/wd 4819\"])\n\t\t\t\t\targs.extend([_arg for _arg in pp_opts if _arg.startswith('-I')])\n\t\t\t\t\tif isinstance(extra_postargs, dict):\n\t\t\t\t\t\targs.extend(extra_postargs[\"nvcc\"])\n\t\t\t\t\telse:\n\t\t\t\t\t\targs.extend(extra_postargs)\n\t\t\t\telse:\n\t\t\t\t\targs = [self.cc] + compile_opts + pp_opts\n\t\t\t\t\tif add_cpp_opts:\n\t\t\t\t\t\targs.append('/EHsc')\n\t\t\t\t\targs.append(input_opt)\n\t\t\t\t\targs.append(\"/Fo\" + obj)\n\t\t\t\t\tif isinstance(extra_postargs, dict):\n\t\t\t\t\t\targs.extend(extra_postargs[\"cc\"])\n\t\t\t\t\telse:\n\t\t\t\t\t\targs.extend(extra_postargs)\n\n\t\t\t\ttry:\n\t\t\t\t\t# print('-----', args)\n\t\t\t\t\tself.spawn(args)\n\t\t\t\texcept DistutilsExecError as msg:\n\t\t\t\t\tprint(\"-----\", args)\n\t\t\t\t\traise CompileError(msg)\n\n\t\t\treturn objects\n\n\t\tself.compile = compile\n\n\tdef customize_linker_for_nvcc_win(self):\n\t\tdef link(\n\t\t\t target_desc,\n\t\t\t objects,\n\t\t\t output_filename,\n\t\t\t output_dir=None,\n\t\t\t libraries=None,\n\t\t\t library_dirs=None,\n\t\t\t runtime_library_dirs=None,\n\t\t\t export_symbols=None,\n\t\t\t debug=0,\n\t\t\t extra_preargs=None,\n\t\t\t extra_postargs=None,\n\t\t\t build_temp=None,\n\t\t\t target_lang=None):\n\n\t\t\tif not self.initialized:\n\t\t\t\tself.initialize()\n\t\t\tobjects, output_dir = self._fix_object_args(objects, output_dir)\n\t\t\tfixed_args = self._fix_lib_args(libraries, library_dirs,\n\t\t\t\t\t\t\t\t\t\t\truntime_library_dirs)\n\t\t\tlibraries, library_dirs, runtime_library_dirs = fixed_args\n\n\t\t\tif runtime_library_dirs:\n\t\t\t\tself.warn(\"I don't know what to do with 'runtime_library_dirs': \"\n\t\t\t\t\t\t + str(runtime_library_dirs))\n\n\t\t\tlib_opts = gen_lib_options(self,\n\t\t\t\t\t\t\t\t\t library_dirs, runtime_library_dirs,\n\t\t\t\t\t\t\t\t\t libraries)\n\t\t\tif output_dir is not None:\n\t\t\t\toutput_filename = os.path.join(output_dir, output_filename)\n\n\t\t\tif self._need_link(objects, output_filename):\n\t\t\t\tldflags = self._ldflags[target_desc, debug]\n\n\t\t\t\texport_opts = [\"/EXPORT:\" + sym for sym in (export_symbols or [])]\n\n\t\t\t\tld_args = (ldflags + lib_opts + export_opts +\n\t\t\t\t\t\t objects + ['/OUT:' + output_filename])\n\n\t\t\t\t# The MSVC linker generates .lib and .exp files, which cannot be\n\t\t\t\t# suppressed by any linker switches. The .lib files may even be\n\t\t\t\t# needed! Make sure they are generated in the temporary build\n\t\t\t\t# directory. Since they have different names for debug and release\n\t\t\t\t# builds, they can go into the same directory.\n\t\t\t\tbuild_temp = os.path.dirname(objects[0])\n\t\t\t\tif export_symbols is not None:\n\t\t\t\t\t(dll_name, dll_ext) = os.path.splitext(\n\t\t\t\t\t\tos.path.basename(output_filename))\n\t\t\t\t\timplib_file = os.path.join(\n\t\t\t\t\t\tbuild_temp,\n\t\t\t\t\t\tself.library_filename(dll_name))\n\t\t\t\t\tld_args.append ('/IMPLIB:' + implib_file)\n\n\t\t\t\tif extra_preargs:\n\t\t\t\t\tld_args[:0] = extra_preargs\n\t\t\t\tif extra_postargs:\n\t\t\t\t\tld_args.extend(extra_postargs)\n\n\t\t\t\toutput_dir = os.path.dirname(os.path.abspath(output_filename))\n\t\t\t\tself.mkpath(output_dir)\n\t\t\t\ttry:\n\t\t\t\t\tld_args.append(\"/NODEFAULTLIB:LIBCMT\")\n\t\t\t\t\tlog.debug('Executing \"%s\" %s', self.linker, ' '.join(ld_args))\n\t\t\t\t\tself.spawn([self.linker] + ld_args)\n\t\t\t\t\tself._copy_vcruntime(output_dir)\n\t\t\t\texcept DistutilsExecError as msg:\n\t\t\t\t\tprint('-----', ld_args)\n\t\t\t\t\traise LinkError(msg)\n\t\t\telse:\n\t\t\t\tlog.debug(\"skipping %s (up-to-date)\", output_filename)\n\n\t\tself.link = link\n\n\t# run the customize_compiler\n\tclass custom_build_ext(build_ext):\n\t\tdef build_extensions(self):\n\t\t\tcustomize_compiler_for_nvcc_win(self.compiler)\n\t\t\tcustomize_linker_for_nvcc_win(self.compiler)\n\t\t\tbuild_ext.build_extensions(self)\n\n\tCUDA = locate_cuda()\n\n\treturn CUDA, custom_build_ext","sub_path":"sklgpu/_build_utils/cuda_support.py","file_name":"cuda_support.py","file_ext":"py","file_size_in_byte":8875,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"67934592","text":"from django.shortcuts import render,redirect\nfrom .forms import UserCreationForm,LoginForm\nfrom django.contrib import messages\nfrom django.contrib.auth.models import auth\nfrom django.contrib.auth import login,authenticate,logout\nfrom blog.models import Post\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.paginator import Paginator,PageNotAnInteger,EmptyPage\n\n# Create your views here.\n\ndef register(request):\n if request.method =='POST':\n form=UserCreationForm(request.POST)\n if form.is_valid():\n new_user=form.save(commit=False)\n new_user.set_password(form.cleaned_data['password1'])\n new_user.save()\n #username=form.cleaned_data['username']\n messages.success(request,f'تهانينا{new_user} لقد تمت عملية التسجيل')\n return redirect('login')\n else:\n form=UserCreationForm()\n return render(request,'user/register.html',{\n 'title':'التسجيل',\n 'form':form,\n })\n\n\ndef Login_user(request):\n if request.method =='POST':\n \n form=LoginForm(request.POST)\n username=request.POST['username']\n password=request.POST['password']\n \n uu=authenticate(request,username=username,password=password) \n if uu is not None: \n login(request,uu) \n return redirect('profile') \n else:\n messages.warning(request,' هناك خطأ في كلمة المرور أو الاسم') \n \n else:\n form=LoginForm() \n \n return render(request,'user\\login.html',{\n 'title':'تسجيل خروج',\n 'form':form,\n })\n\n\ndef Logout_user(request):\n logout(request)\n return render(request,'user/logout.html',{\n 'title':'تسجيل الخروج',\n })\n\n\n\n@login_required(login_url='login')\ndef profile(request):\n posts=Post.objects.filter(author=request.user)\n post_list=Post.objects.filter(author=request.user)\n paginator=Paginator( post_list,2)\n page=request.GET.get('page')\n try:\n post_list=paginator.page(page)\n except PageNotAnInteger:\n post_list=paginator.page(1)\n except EmptyPage:\n post_list=paginator.page(paginator.num_page)\n return render(request,'user/profile.html',{\n 'title':'الملف الشخصي',\n 'posts':posts,\n 'post_list':post_list,\n 'page':page,\n\n }) ","sub_path":"src/user/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"528761465","text":"from getpass import getpass\r\nimport os\r\nimport sys\r\nimport pickle\r\nimport subprocess\r\nimport speech_recognition as sr\r\nimport pyttsx3\r\nimport datetime\r\nimport wikipedia\r\nimport webbrowser\r\nimport paramiko\r\nwikipedia.set_lang('es')\r\nengine= pyttsx3.init('sapi5')\r\nvoices= engine.getProperty('voices')\r\nprint(voices[2].id)\r\nengine.setProperty('voices', voices[0].id)\r\nr= sr.Recognizer()\r\ndef speak(audio):\r\n engine.say(audio)\r\n engine.runAndWait()\r\ndef wishMe():\r\n hour= int(datetime.datetime.now().hour)\r\n if hour>=0 and hour<12:\r\n speak(\"Buenos días\")\r\n elif hour>=12 and hour<18:\r\n speak(\"Buenas tardes\")\r\n else:\r\n speak(\"Buenas noches\")\r\nwishMe()\r\nprint(' Morgan v0.3 ')\r\nprint()\r\n# AQUI SE AUTENTICA EL USUARIO Y BASE DE DATOS\r\n# DEBE SER EDITADO CON EL ARCHIVO MORGAN 2.0.PY\r\n# PARA ASI PODER ACCEDER AL REGISTRO DE ESTE\r\n# DEBE EDITAR EL URL DE LOS ARCHIVOS PARA ENTRAR AL PROGRAMA\r\nwith open(\"user.pckl\",'r') as usera:\r\n user=usera.read()\r\n usera.close()\r\nwith open(\"pass.pckl\",'r') as passa:\r\n pas=passa.read()\r\n passa.close()\r\nprint(\"Por favor, Ingrese el usuario\")\r\nspeak('Por favor,Ingrese el usuario')\r\na=input()\r\n#autenticacion de usuario y pass\r\nwhile a!=user:\r\n print(\"Usuario no registrado o incorrecto\")\r\n speak('Usuario no registrado o incorrecto')\r\n print(\"Ingrese nuevamente por favor.\")\r\n speak('Ingrese nuevamente por favor')\r\n a=input()\r\n if a!=user:\r\n print(\"Usuario incorrecto. el programa se cerrará\")\r\n speak('Usuario incorrecto. el programa se cerrará')\r\n x=input()\r\n os.system('cls')\r\n sys.exit()\r\nprint(\"Ingrese contraseña:\")\r\nspeak('Ingrese contraseña')\r\npasx=getpass()\r\nwhile pasx!=pas:\r\n print(\"Contraseña incorrecta, por favor intente nuevamente\")\r\n speak('Contraseña incorrecta.. por favor intente nuevamente')\r\n pasx=input()\r\n if pasx!=pas:\r\n print(\"Contraseña incorrecta. El programa se cerrará\")\r\n speak('Contraseña incorecta. El programa se cerrará')\r\n input()\r\n os.system('cls')\r\n sys.exit()\r\nos.system('cls')\r\nos.system('C:/Users/dk34p/AppData/Local/Programs/Python/Python38-32/pythonw.exe')\r\n#inicio del programa\r\nprint (\"-----------------------------------------------\")\r\nprint (f\" Bienvenido {user} \")\r\nprint (\"-----------------------------------------------\")\r\nspeak(f'Bienvenido{user}')\r\nprint(\"Dime, ¿qué puedo hacer por tí?\")\r\nspeak('Dime,¿Qué puedo hacer por ti?')\r\ndef tomarcomando():\r\n r = sr.Recognizer()\r\n with sr.Microphone() as source:\r\n r.pause_threshold = 1\r\n audio = r.listen(source)\r\n try:\r\n query = r.recognize_google(audio, language='es-mx')\r\n print(query)\r\n except Exception as e: #U KNOW PRINT E WAS A REPLY?\r\n print('Por favor dilo de nuevo')\r\n return \"none\"\r\n return query\r\nif __name__ == \"__main__\":\r\n while True:\r\n query=tomarcomando().lower()\r\n if 'qué sabes sobre' in query:\r\n try: #NUEVO METODO DE ERROR PARA MORGAN, ASÍ EL PROGRAMA NO SE CIERRA.\r\n print (query)\r\n speak(f'Buscando en Wikipedia... por favor espere')\r\n query= query.replace('qué sabes sobre', '')\r\n results= wikipedia.summary(query, sentences=2)\r\n speak(\"Según Wikipedia\")\r\n print(results)\r\n speak(results)\r\n except Exception as e:\r\n speak(\"Ha ocurrido un error intentando conectar a Wikipedia\")\r\n \r\n elif 'abre youtube' in query:\r\n print (query)\r\n webbrowser.open('www.youtube.com')\r\n speak(\"Accediendo a Youtube\")\r\n elif 'abre facebook' in query:\r\n webbrowser.open('www.facebook.com')\r\n speak(\"Accediendo a Facebook\")\r\n elif 'abre spotify' in query:\r\n print (query)\r\n subprocess.Popen(['Spotify'])\r\n print(\"Accediendo a Spotify\")\r\n speak('Accediendo a Spotify')\r\n elif 'abre league of legends' in query:\r\n print (query)\r\n subprocess.Popen(['C:/Riot Games/League of Legends/LeagueClient.exe'])\r\n print(\"Accediendo a League of legends\")\r\n speak('Accediendo a Lig of legends')\r\n elif 'qué hora es' in query:\r\n print (query)\r\n strTime= datetime.datetime.now().strftime(\"%H:%M\")\r\n print(strTime)\r\n speak(\"La hora es {}\".format(strTime))\r\n elif 'dime la hora' in query:\r\n print (query)\r\n strTime= datetime.datetime.now().strftime(\"%H:%M\")\r\n print(strTime)\r\n speak(f\"La hora es {strTime}\")\r\n elif 'gracias' in query:\r\n print (query)\r\n speak('No hay de qué')\r\n elif 'abre visual code' in query:\r\n print (query)\r\n subprocess.Popen(['C:/Users/dk34p/AppData/Local/Programs/Microsoft VS Code/code.exe'])\r\n print(\"Accediendo a Visual Code\")\r\n speak('Accediendo a visual code')\r\n elif 'abre whatsapp' in query:\r\n print(query)\r\n webbrowser.open('https://web.whatsapp.com/')\r\n print(\"Accediendo a WhatsApp\")\r\n speak('Accediendo a WhatsApp')\r\n elif 'abre netflix' in query:\r\n print(query)\r\n webbrowser.open('www.netflix.com')\r\n print('Accediendo a Netflix')\r\n speak('Accediendo a Netflix')\r\n elif 'cierra sesión' in query:\r\n speak('Cerrando sesión')\r\n os.system('shutdown -l')\r\n elif 'limpia el terminal' in query:\r\n speak('Terminal Limpiado.')\r\n os.system('cls')\r\n elif 'quién soy' in query:\r\n try:\r\n with open('/Users/dk34p/desktop/data/nc.pckl', 'r') as nc:\r\n nca=pickle.load(nc)\r\n nc.close()\r\n with open('/Users/dk34p/desktop/data/ed.pckl','r') as ed:\r\n eda=pickle.load(ed)\r\n ed.close()\r\n with open('/Users/dk34p/desktop/data/fdn.pckl','r') as fn:\r\n fdn=pickle.load(fn)\r\n fn.close()\r\n with open('/Users/dk34p/desktop/data/rut.pckl','r') as ru:\r\n rut=pickle.load(ru)\r\n ru.close()\r\n with open('/Users/dk34p/desktop/data/city.pckl','r') as cit:\r\n city=pickle.load(cit)\r\n cit.close()\r\n with open('/Users/dk34p/desktop/data/celn.pckl','r') as celn:\r\n celna=pickle.load(celn)\r\n celn.close()\r\n print('--------------------------------')\r\n print(' Tus datos son: ')\r\n print('--------------------------------')\r\n print()\r\n print(\"Nombre : \",nca)\r\n speak(f'Su nombre es {nca}')\r\n print(\"Edad:\",eda)\r\n speak(f'Tiene {eda} años')\r\n print(\"Fecha de nacimiento: \" ,fdn)\r\n speak(f'Nació el {fdn}')\r\n print('Rut:',rut)\r\n speak(f'Su rut es {rut}')\r\n print('Ciudad:',city)\r\n speak(f'Vive en la ciudad de {city} ')\r\n print(f'Número de teléfono:',celna)\r\n speak(f'Su número de teléfono es {celna}')\r\n except Exception as e:\r\n speak(\"No hay datos ingresados, lo siento {}\".format(user))\r\n \r\n elif 'apaga el equipo' in query:\r\n speak('Apagando el equipo')\r\n os.system('shutdown')\r\n elif 'apaga el computador' in query:\r\n speak('Apagando el computador')\r\n os.system('shutdown ') \r\n elif 'reinicia el computador' in query:\r\n speak('Reiniciando el computador')\r\n os.system('shutdown -r -t 1')\r\n elif 'suspende el equipo' in query:\r\n speak('Suspendiendo el equipo')\r\n os.system('rundll32.exe powrprof.dll, SetSuspendState 0,1,0')\r\n elif 'suspender equipo' in query:\r\n speak('Suspendiendo el equipo')\r\n os.system('rundll32.exe powrprof.dll, SetSuspendState 0,1,0')\r\n \r\n elif 'te odio' in query:\r\n speak('No.. yo, yo, yo te amo no te vaaayaaaaaas. -muere-')\r\n elif 'establece alarma' in query:\r\n speak('A que hora quiere establecer alarma?')\r\n tomarcomando()\r\n elif 'cierra Spotify' in query:\r\n os.system('TASKKILL /F /IM Spotify.exe')\r\n speak('Cerrando Spotify')\r\n elif 'cierre visual code' in query:\r\n os.system('TASKKILL /F /IM code.exe')\r\n speak('Cerrando Visual Code')\r\n elif 'close visual code' in query:\r\n os.system('TASKKILL /F /IM code.exe')\r\n speak('Cerrando Visual Code')\r\n elif 'cierra torrent' in query:\r\n os.system('TASKKILL /F /IM uTorrent.exe')\r\n elif '+' in query:\r\n query=query.replace(\"+\", ' ')\r\n query=query.split()\r\n a=int(query[0])\r\n b=int(query[1])\r\n re=a+b\r\n print(re)\r\n speak(f'El resultado es {re}')\r\n elif 'por' in query:\r\n query=query.replace(\"por\", ' ')\r\n query=query.split()\r\n a=int(query[0])\r\n b=int(query[1])\r\n re=a*b\r\n print(f'El resultado es {re}')\r\n speak(f'El resultado es {re}')\r\n elif '*' in query:\r\n query=query.replace(\"*\", ' ')\r\n query=query.split()\r\n a=int(query[0])\r\n b=int(query[1])\r\n re=a*b\r\n print(f'El resultado es {re}')\r\n speak(f'El resultado es {re}')\r\n elif 'menos' in query:\r\n query=query.replace(\"menos\", ' ')\r\n query=query.split()\r\n a=int(query[0])\r\n b=int(query[1])\r\n re=a-b\r\n print(f'El resultado es {re}')\r\n speak(f'El resultado es {re}')\r\n elif 'dividido' in query:\r\n query=query.replace(\"dividido\", ' ')\r\n query=query.split()\r\n a=int(query[0])\r\n b=int(query[1])\r\n re=a/b\r\n int(re)\r\n print(f'El resultado es {re}')\r\n speak(f'El resultado es {re}')\r\n elif 'raíz cuadrada de' in query:\r\n import math\r\n query=query.replace('raíz cuadrada de', '')\r\n query=query.split()\r\n a=int(query[0])\r\n b=math.sqrt(a)\r\n print('El resultado es : ' ,b)\r\n speak(f'El resultado es: {b}')\r\n elif 'cuál es la raiz cuadrada de' in query:\r\n import math\r\n query=query.replace('cuál es la raiz cuadrada de','')\r\n query.query.split()\r\n a=int(query[0])\r\n b=math.sqrt(a)\r\n print(\"El resultado es :\",b)\r\n speak(f\"El resultado es: {b}\")\r\n elif 'Abre Telegram' in query:\r\n subprocess.Popen(['C:/Users/dk34p/AppData/Roaming/Telegram Desktop/Telegram.exe'])\r\n print(\"Abriendo Telegram\")\r\n speak(\"Abriendo Telegram\")\r\n \r\n\r\n\r\n \r\n","sub_path":"Morgan3.0.py","file_name":"Morgan3.0.py","file_ext":"py","file_size_in_byte":11380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"14063426","text":"#!/usr/bin/python\n\ntry:\n real_types = (int, long, float)\nexcept NameError: # Python 3\n real_types = (int, float)\n xrange = range\n\nimport math\n\n\nclass Quat:\n \"\"\"The class defining a quaternion.\"\"\"\n\n def __init__(self, x=0, y=0, z=0, t=0):\n \"\"\"Create a Quat instance.\"\"\"\n self.q = [float(x), float(y), float(z), float(t)]\n\n def __str__(self):\n \"\"\"Compute the string (informal) representation of the quaternion.\"\"\"\n #return str(self.q) # brzydkie\n words = []\n labels = [\"1\", \"i\", \"j\", \"k\"]\n words.append(str(self.q[0]))\n for i in range(1, 4):\n if self.q[i] >= 0:\n words.append(\"+\")\n words.append(str(self.q[i]))\n words.append(labels[i])\n return \"\".join(words)\n\n def __repr__(self):\n \"\"\"Compute the string (formal) representation of the quaternion.\"\"\"\n return \"Quat({}, {}, {}, {})\".format(*self.q)\n\n def __getitem__(self, key):\n \"\"\"Implement quat[key].\"\"\"\n return self.q[key]\n\n def _normalize(self, other):\n \"\"\"Transformation an object to a quaternion.\"\"\"\n if isinstance(other, real_types):\n other = Quat(other)\n elif isinstance(other, complex):\n other = Quat(other.real, other.imag)\n return other\n\n def __eq__(self, other):\n \"\"\"Test if the quaternions are equal.\"\"\"\n other = self._normalize(other)\n return all(self.q[i] == other.q[i] for i in range(4))\n\n def __ne__(self, other):\n \"\"\"Test if the quaternions are not equal.\"\"\"\n return not self == other\n\n def __nonzero__(self):\n \"\"\"Test if the quaternion is not equal to zero.\"\"\"\n return any(self.q[i] != 0 for i in range(4))\n\n __bool__ = __nonzero__ # Python 3\n\n def __pos__(self):\n \"\"\"Implementation of +q.\"\"\"\n return self\n\n def __neg__(self):\n \"\"\"Implementation of -q.\"\"\"\n #alist = [-item for item in self.q]\n #return Quat(*alist)\n return Quat(-self.q[0], -self.q[1], -self.q[2], -self.q[3])\n\n def __add__(self, other):\n \"\"\"Addition of quaternions.\"\"\"\n other = self._normalize(other)\n alist = [self.q[i] + other.q[i] for i in range(4)]\n return Quat(*alist)\n\n __radd__ = __add__\n\n def __sub__(self, other):\n \"\"\"Subtraction of quaternions.\"\"\"\n # return self + (-other)\n other = self._normalize(other)\n alist = [self.q[i] - other.q[i] for i in range(4)]\n return Quat(*alist)\n\n def __rsub__(self, other):\n \"\"\"Subtraction of quaternions.\"\"\"\n # return (-self) + other\n other = self._normalize(other)\n alist = [other.q[i] - self.q[i] for i in range(4)]\n return Quat(*alist)\n\n def __mul__(self, other):\n \"\"\"Quaternion product.\"\"\"\n other = self._normalize(other)\n a = (self.q[0] * other.q[0] - self.q[1] * other.q[1]\n - self.q[2] * other.q[2] - self.q[3] * other.q[3])\n b = (self.q[0] * other.q[1] + self.q[1] * other.q[0]\n + self.q[2] * other.q[3] - self.q[3] * other.q[2])\n c = (self.q[0] * other.q[2] - self.q[1] * other.q[3]\n + self.q[2] * other.q[0] + self.q[3] * other.q[1])\n d = (self.q[0] * other.q[3] + self.q[1] * other.q[2]\n - self.q[2] * other.q[1] + self.q[3] * other.q[0])\n return Quat(a, b, c, d)\n\n def __rmul__(self, other):\n \"\"\"Quaternion product.\"\"\"\n other = self._normalize(other)\n a = (other.q[0] * self.q[0] - other.q[1] * self.q[1]\n - other.q[2] * self.q[2] - other.q[3] * self.q[3])\n b = (other.q[0] * self.q[1] + other.q[1] * self.q[0]\n + other.q[2] * self.q[3] - other.q[3] * self.q[2])\n c = (other.q[0] * self.q[2] - other.q[1] * self.q[3]\n + other.q[2] * self.q[0] + other.q[3] * self.q[1])\n d = (other.q[0] * self.q[3] + other.q[1] * self.q[2]\n - other.q[2] * self.q[1] + other.q[3] * self.q[0])\n return Quat(a, b, c, d)\n\n def __abs__(self):\n \"\"\"Return the norm of a quaternion (a scalar).\"\"\"\n powers = sum(item * item for item in self.q)\n return math.sqrt(powers)\n\n def is_unit(self):\n \"\"\"Test a unit quaternion.\"\"\"\n return 1.0 == sum(item * item for item in self.q)\n\n def conjugate(self):\n \"\"\"Conjugate the quaternion.\"\"\"\n return Quat(self.q[0], -self.q[1], -self.q[2], -self.q[3])\n\n def __invert__(self): # ~p, return p^{-1}\n \"\"\"Reciprocal of the quaternion.\"\"\"\n powers = sum(item * item for item in self.q)\n return (1.0 / powers) * self.conjugate()\n\n def _pow1(self, n):\n \"\"\"Find powers of the quaternion (inefficient).\"\"\"\n if n < 0:\n return pow(~self, -n)\n quat = Quat(1)\n while n > 0:\n quat = quat * self\n n = n - 1\n return quat\n\n def _pow2(self, n):\n \"\"\"Find powers of the quaternion (binary exponentiation).\"\"\"\n if n == 0:\n return Quat(1)\n if n < 0:\n return pow(~self, -n)\n quat = self\n if n == 1:\n return self\n elif n == 2:\n return self * self\n else: # binary exponentiation\n result = Quat(1)\n while True:\n if n % 2 == 1:\n result = result * quat\n n = n - 1 # przez ile pomnozyc\n if n == 0:\n break\n if n % 2 == 0:\n quat = quat * quat\n n = n // 2\n return result\n\n __pow__ = _pow2\n\n def __hash__(self):\n \"\"\"Hashable quaternions.\"\"\"\n return hash(tuple(self.q))\n\n def __int__(self):\n \"\"\"Conversion to int is not possible.\"\"\"\n raise TypeError(\"can't convert quat to int\")\n\n def __long__(self):\n \"\"\"Conversion to long is not possible.\"\"\"\n raise TypeError(\"can't convert quat to long\")\n\n def __float__(self):\n \"\"\"Conversion to float is not possible.\"\"\"\n raise TypeError(\"can't convert quat to float\")\n\n def __complex__(self):\n \"\"\"Conversion to complex is not possible.\"\"\"\n raise TypeError(\"can't convert quat to complex\")\n\n # method used to create a rotation Quaternion to rotate\n # any vector defined as a Quaternion\n # with respect to the vector vect theta 'radians';\n # rot_vec ma dlugosc 1 w R^3, tworzymy kwaternion jednostkowy.\n # Chyba lepiej zrobic metode klasy.\n @classmethod\n def rot_quat(cls, axis, angle):\n \"\"\"From the axis-angle representation to the quat.\n The angle is in radians. The axis is a unit 3D vector.\"\"\"\n if sum(x * x for x in axis) != 1.0:\n raise ValueError(\"not a unit vector\")\n a = math.cos(angle / 2.0)\n sinus = math.sin(angle / 2.0)\n b = axis[0] * sinus\n c = axis[1] * sinus\n d = axis[2] * sinus\n return cls(a, b, c, d)\n\nQuaternion = Quat\n\n# EOF\n","sub_path":"pyquats/quats.py","file_name":"quats.py","file_ext":"py","file_size_in_byte":6993,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"207181485","text":"\"\"\" Data Visualizations\n\nvisualize_data.py: Library code to create data visualizations,\nsuch as KDE plots, histograms, and scatter plots.\n\n\"\"\"\n\n# Importing libraries\nimport pandas as pd\nimport numpy as np\nimport matplotlib\nmatplotlib.use('Agg', warn=False)\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n# Libraries for plotting curves\nfrom sklearn.metrics import roc_curve, auc\nfrom sklearn.metrics import roc_auc_score\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import precision_recall_curve\nfrom sklearn.metrics import average_precision_score\nfrom itertools import cycle\n\n# Suppress warnings\nimport warnings\nwarnings.simplefilter(action='ignore', category=UserWarning)\n\n\n\n\ndef plot_polygenic_risk_scores(fp):\n \"\"\"\n Creates a histogram of the polygenic risk scores.\n \n :param fp: Filepath to csv file\n \"\"\"\n \n simulated_df = pd.read_csv(fp)\n \n plt.hist(simulated_df['PRS'], bins=20)\n plt.title('Polygenic Risk Scores for Unbiased Population')\n plt.xlabel('Polygenic Risk Score')\n plt.ylabel('Frequency');\n \n \n\ndef plot_risk_across_classes(fp):\n \"\"\"\n Creates a histogram of the polygenic risk scores.\n \n :param fp: Filepath to csv file\n \"\"\"\n \n simulated_bias_df = pd.read_csv(fp)\n \n sns.kdeplot(simulated_bias_df[simulated_bias_df['Class'] == 0]['PRS'], label=\"Low Risk\")\n sns.kdeplot(simulated_bias_df[simulated_bias_df['Class'] == 1]['PRS'], label=\"Medium Risk\")\n sns.kdeplot(simulated_bias_df[simulated_bias_df['Class'] == 2]['PRS'], label=\"High Risk\")\n plt.title('Distribution of PRS Across Classes')\n plt.xlabel('Polygenic Risk Score')\n plt.ylabel('Normalized Frequency')\n \n\n \ndef plot_multiclass_roc(clf_name, clf, X_test, y_test, n_classes, ax):\n \"\"\"\n Plots the multiclass version of the Receiver Operating Characteristic (ROC) \n curve, which shows the connection/trade-off between \n the true positive rate and false positive rate\n \n :param clf_name: String of the model's name\n :param clf: The trained model\n :param X_test: Test data of the features\n :param y_test: Test data of the labels\n :param n_classes: Number of classes\n :param figsize: Size of the ROC curve plot\n :param ax: Axis of subplot to save plot to, otherwise will create new figure\n \"\"\"\n \n # try to run decision_function(), which is contained in \n # all classifiers we used except for KNN\n try:\n y_score = clf.decision_function(X_test)\n \n # except: run predict_proba() for KNN\n except:\n y_score = clf.predict_proba(X_test)\n\n # dictionaries to hold false positive rate (fpr), \n # true positive rate (tpr), ROC area under the curve (roc_auc),\n # and the classes\n fpr = dict()\n tpr = dict()\n roc_auc = dict()\n classes_dict = {0: 'Low Risk', 1: 'Medium Risk', 2: 'High Risk'}\n classes_color = {0: 'Green', 1: 'Yellow', 2: 'Red'}\n\n # one-hot encode labels to determine ROC curve\n y_test_dummies = pd.get_dummies(y_test, drop_first=False).values\n for i in range(n_classes):\n fpr[i], tpr[i], _ = roc_curve(y_test_dummies[:, i], y_score[:, i])\n roc_auc[i] = auc(fpr[i], tpr[i])\n\n # plot of the ROC for each class\n ax.plot([0, 1], [0, 1], 'k--')\n ax.lines[0].set_linestyle=('--')\n ax.set_xlim([0.0, 1.0])\n ax.set_ylim([0.0, 1.05])\n ax.tick_params(axis=\"x\")\n ax.tick_params(axis=\"y\")\n ax.set_xlabel('False Positive Rate', fontweight='bold')\n ax.set_ylabel('True Positive Rate', fontweight='bold')\n ax.set_title(clf_name, fontweight='bold')\n for i in range(n_classes):\n sns.lineplot(fpr[i], tpr[i], label='%s (Area = %0.2f)' % (classes_dict[i], roc_auc[i]),\n ax=ax, color=classes_color[i])\n ax.legend(loc='bottom right')\n ax.grid(alpha=.4)\n sns.despine(ax=ax)\n\n \n \ndef plot_precision_recall(clf_name, clf, X_test, y_test, n_classes, ax):\n \"\"\"\n Plots the multiclass version of the Precision-Recall (P-R) \n curve, which shows the tradeoff between precision and recall \n for different threshold and is a useful measure of success \n of prediction when the classes are very imbalanced.\n \n :param clf_name: String of the model's name\n :param clf: The trained model\n :param X_test: Test data of the features\n :param y_test: Test data of the labels\n :param n_classes: Number of classes\n :param figsize: Size of the ROC curve plot\n :param ax: Axis of subplot to save plot to, otherwise will create new figure\n \"\"\"\n \n # try to run decision_function(), which is contained in \n # all classifiers we used except for KNN\n try:\n y_score = clf.decision_function(X_test)\n \n # except: run predict_proba() for KNN\n except:\n y_score = clf.predict_proba(X_test)\n\n # dictionaries to hold precision, \n # recall, average precision,\n # and the classes\n precision = dict()\n recall = dict()\n average_precision = dict()\n classes_dict = {0: 'Low Risk', 1: 'Medium Risk', 2: 'High Risk'}\n classes_color = {0: 'Green', 1: 'Orange', 2: 'Red'}\n\n # one-hot encode labels to determine ROC curve\n y_test_dummies = pd.get_dummies(y_test, drop_first=False).values\n for i in range(n_classes):\n precision[i], recall[i], _ = precision_recall_curve(y_test_dummies[:, i], y_score[:, i])\n average_precision[i] = average_precision_score(y_test_dummies[:, i], y_score[:, i])\n \n # A \"micro-average\": quantifying score on all classes jointly\n # Micro-averaging is plotting a precision-recall curve by considering \n # each element of the label indicator matrix as a binary prediction\n precision[\"micro\"], recall[\"micro\"], _ = precision_recall_curve(y_test_dummies.ravel(),\n y_score.ravel())\n average_precision[\"micro\"] = average_precision_score(y_test_dummies, y_score,\n average=\"micro\")\n \n # plot of the P-R curve for each class\n ax.plot([0, 1], [1, 0], 'k--')\n ax.lines[0].set_linestyle=('--')\n ax.set_xlim([0.0, 1.0])\n ax.set_ylim([0.0, 1.05])\n ax.tick_params(axis=\"x\")\n ax.tick_params(axis=\"y\")\n ax.set_xlabel('Recall', fontweight='bold')\n ax.set_ylabel('Precision', fontweight='bold')\n ax.set_title(clf_name, fontweight='bold')\n for i in range(n_classes):\n sns.lineplot(recall[i], precision[i], label='%s (Area = %0.2f)' % (classes_dict[i], average_precision[i]),\n ax=ax, color=classes_color[i])\n ax.legend(loc='bottom right')\n ax.grid(alpha=.4)\n sns.despine(ax=ax)\n","sub_path":"project_16/src/visualize_data.py","file_name":"visualize_data.py","file_ext":"py","file_size_in_byte":6571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"612098226","text":"'''\n\nhttp://archive.ics.uci.edu/ml/datasets/Ecoli\n'''\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\nimport warnings\n\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.model_selection import train_test_split\nfrom collections import Counter\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.preprocessing import PolynomialFeatures\nfrom sklearn.linear_model import ElasticNet\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.metrics import r2_score\nwarnings.filterwarnings('ignore')\n\ndef R2_score(y_true, y_pred):\n assert y_pred.shape == y_true.shape, '输入的{}与{}不相等'.format(y_pred.shape, y_true.shape)\n return np.sum(np.square(y_pred - y_true)) / np.sum(np.square(y_pred - np.mean(y_true)))\n\n# 数据加载\nnames = ['mcg', 'gvh', 'lip', 'chg', 'aac', 'alm1', 'alm2', 'classtic']\ndf = pd.read_csv('./datas/ecoli.data', sep=' ', names=names)\nle = LabelEncoder()\nX = df.iloc[:, 0:-1]\nY = pd.DataFrame(le.fit_transform(df.iloc[:, -1]))\n\n# 数据分割\nx_train, x_test, y_train, y_test = train_test_split(X, Y)\n\nx_train = np.mat(x_train)\ny_train = np.mat(y_train).reshape(-1, 1)\n\n# 特征工程\npipeline = Pipeline(steps=[\n ('poly', PolynomialFeatures()), # 指定第一步做什么操作\n ('algo', ElasticNet(random_state=0)) # 指定最后一步做什么操作,最后一步一般为模型对象\n])\n\nparams = {\n \"poly__degree\": [1, 2, 3, 4, 5],\n \"algo__alpha\": [0.1, 0.01, 1.0, 10.0, 100.0, 1000.0],\n \"algo__l1_ratio\": [0.1, 0.3, 0.5, 0.9, 0.95, 1.0],\n \"algo__fit_intercept\": [True, False]\n}\n\nalgo = GridSearchCV(estimator=pipeline, cv=3, param_grid=params)\n\n# 6. 模型的训练\nalgo.fit(x_train, y_train)\n\n\n\n# 7. 模型效果评估\nprint(\"最优参数:{}\".format(algo.best_params_))\nprint(\"最优参数对应的最优模型:{}\".format(algo.best_estimator_))\nprint(\"最优模型对应的这个评估值:{}\".format(algo.best_score_))\n\nbest_pipeline = algo.best_estimator_\nbest_lr = best_pipeline.get_params()['algo']\n# print(\"各个特征属性的权重系数,也就是ppt上的theta值:{}\".format(best_lr.coef_))\nprint(\"截距项值:{}\".format(best_lr.intercept_))\n\npred_train = algo.predict(x_train)\npred_test = algo.predict(x_test)\n# b. 直接通过评估相关的API查看效果\nprint(\"模型在训练数据上的效果(R2):{}\".format(r2_score(pred_train, y_train)))\nprint(\"模型在测试数据上的效果(R2):{}\".format(r2_score(pred_test, y_test)))\n\n# 模型展示\nt = np.arange(len(x_test))\nplt.plot(t, y_test, 'r', label=u'实际值')\nplt.plot(t, pred_test, 'b', label=u'预估值')\nplt.legend(loc='lower right')\nplt.show()\n","sub_path":"项目实战-蛋白质定位点分析/基于网格交叉验证对蛋白质定位点分析(准确性0.23).py","file_name":"基于网格交叉验证对蛋白质定位点分析(准确性0.23).py","file_ext":"py","file_size_in_byte":2748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"60100952","text":"from django.urls import path\nfrom .views import BlogIndex, PostDetail, AddPostView, UpdatePostView, DeletePostView, AddCategoryView\nfrom .import views\n\n#from .feeds import LatestPostsFeed\n\n\n\napp_name= 'blog'\n\nurlpatterns = [\n path('blog', views.BlogIndex, name=\"blogIndex\"),\n \n #path('post/', PostDetail.as_view(), name=\"postDetail\"),\n\n path('////', views.PostDetail, name=\"postDetail\" ),\n path('/share/',views.post_share, name='post_share'),\n \n path('add_post/', AddPostView.as_view(), name=\"add_post\"),\n\n #path('add_category/', AddCategoryView.as_view(), name=\"add_category\"),\n\n path('post/edit/', UpdatePostView.as_view(), name=\"update_post\"),\n \n \n path('post//remove', DeletePostView.as_view(), name=\"deletepost\"),\n \n # path('category//', CategoryView, name='category'),\n \n # path('like/', LikeView, name='like_post'),\n path('tag//' ,views.BlogIndex, name='blogIndex_by_tag'),\n\n #path('feed/', LatestPostsFeed(), name='post_feed'),\n \n path('search/',views.postSearch, name='search'),\n \n\n]\n","sub_path":"blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1168,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"124577231","text":"#!/Users/lyjsmac/opt/anaconda3/bin/python3.8\n# -*- encoding: utf-8 -*-\n'''\n@File : 最大子序和.py\n@Time : 2021/1/17 11:05 下午\n@Author : fancycarp\n@Contact : woshiliyujian@gmail.com\n@Desc : None\n'''\n\n# here put the import lib\n'''\n给定一个整数数组 nums,找到一个具有最大和的连续子数组(子数组最少包含一个元素),返回其最大和。\n\n示例:\n\n输入: [-2,1,-3,4,-1,2,1,-5,4]\n输出: 6\n解释:连续子数组[4,-1,2,1] 的和最大,为6。\n\n来源:力扣(LeetCode)\n链接:https://leetcode-cn.com/problems/maximum-subarray\n著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。\n'''\nclass Solution(object):\n def maxSubArray(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n dp = nums\n for i in range(1,len(nums)):\n dp[i] = max(dp[i-1]+nums[i],nums[i])\n return max(dp)","sub_path":"Week_07/最大子序和.py","file_name":"最大子序和.py","file_ext":"py","file_size_in_byte":959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"566635618","text":"import sys\nfrom boxsdk import JWTAuth, Client\n\n## AUTHENTICATION \n\ndef authenticate_as_user(config, user):\n \"\"\"\n Using the JWT auth configuration file, get an authenticated client that can act as the specified user. \n \"\"\"\n auth = JWTAuth.from_settings_file(config)\n auth.authenticate_user(user)\n return Client(auth);\n\ndef authenticate_as_service_account(config):\n \"\"\"\n Using the JWT auth configuration file, get an authenticated client that can act as the app service account. \n \"\"\"\n auth = JWTAuth.from_settings_file(config)\n auth.authenticate_instance()\n return Client(auth);\n\n\n## STRING FUNCTIONS\n\ndef path_str(folder):\n \"\"\"\n If 'path_collection' is present, returns the folder's full path.\n Otherwise, return the folder name.\n \"\"\"\n path = \"\"\n if hasattr(folder, 'path_collection'):\n path = \"/\" + \"/\".join(map(lambda p: p['name'], folder.path_collection['entries'])) + \"/\"\n return path + folder.name\n\n## TREE TRAVERSAL\n\ndef is_folder(item):\n \"\"\"\n Check the item's 'type' to determine whether it's a folder.\n \"\"\"\n return item['type'] == \"folder\"\n\ndef get_subfolders(client, folder):\n \"\"\"\n Fetch all subfolders of a given folder\n \"\"\"\n offset = 0\n lastFetchedCount = -1\n result = []\n while (lastFetchedCount != 0):\n # fetch folder items and add subfolders to list\n items = client.folder(folder_id=folder['id']).get_items(limit=1000, offset=offset, fields=[\"id\", \"name\",\"path_collection\"])\n result.extend(filter(is_folder, items))\n # update offset and counts for terminating conditions.\n offset += len(items)\n lastFetchedCount = len(items)\n return result\n\ndef walk_folder_tree(client, folder, folder_action):\n \"\"\"\n Perform some folder_action against a folder, then do the same for every subfolder.\n \"\"\"\n folder_action(folder)\n for subfolder in get_subfolders(client, folder):\n walk_folder_tree(client, subfolder, folder_action)","sub_path":"Entrusted Toolkit/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":1990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"568072303","text":"def main(lines):\n lines = lines.split('\\n\\n')\n ruleLines = lines[0].split('\\n')\n ticketLine = lines[1].split('\\n')[1]\n otherTickets = lines[2].split('\\n')[1:-1]\n\n rules = []\n nums = []\n anyValid = set()\n for line in ruleLines:\n sect = line.strip().split(': ')\n category = sect[0]\n ranges = sect[1].split(' or ')\n valid = set()\n for r in ranges:\n low = int(r.split('-')[0])\n high = int(r.split('-')[1])\n for num in range(low, high+1):\n valid.add(num)\n anyValid.add(num)\n rules.append((category, valid))\n\n errorRate = 0\n validTickets = []\n for ticket in otherTickets:\n isValid = True\n nums = ticket.split(',')\n ticketInt = []\n for num in nums:\n ticketInt.append(int(num))\n if int(num) not in anyValid:\n errorRate += int(num)\n isValid = False\n if isValid:\n validTickets.append(ticketInt)\n print(errorRate)\n\n numFields = len(rules)\n possibilities = []\n for i in range(numFields):\n possibilities.append([])\n for j in range(numFields):\n possibilities[-1].append(j)\n\n for place in range(numFields):\n for field in range(numFields):\n for ticket in validTickets:\n if ticket[place] not in rules[field][1]:\n possibilities[place].remove(field)\n break\n\n fields = []\n i = 0\n while i < numFields:\n if len(possibilities[i]) == 1:\n field = possibilities[i][0]\n fields.append((field, i))\n for place in range(numFields):\n if field in possibilities[place]:\n possibilities[place].remove(field)\n i = -1\n i += 1\n\n yourTicket = [int(a) for a in ticketLine.split(',')]\n product = 1\n for field, position in fields:\n if rules[field][0].startswith(\"departure\"):\n product *= yourTicket[position]\n print(product)\n\ndef run(function, input_file):\n try:\n with open(input_file, \"r\") as fh:\n lines = fh.read()\n except:\n print(f\"{input_file} not found in current directory. Skipping...\")\n return\n function(lines)\n\nprint(\"TEST:\")\nrun(main, \"test.txt\")\nprint(\"\\nMAIN:\")\nrun(main, \"input.txt\")\n","sub_path":"16-ticket-translation/program.py","file_name":"program.py","file_ext":"py","file_size_in_byte":2373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"561390355","text":"import sys\nsys.path.append('..')\nfrom ...utils import *\n\nimport argparse\nfrom keras.models import *\nfrom keras.layers import *\nfrom keras.optimizers import *\n\n\nclass OthelloNNet:\n def __init__(self, game, args):\n # game params\n self.board_x, self.board_y = game.getBoardSize()\n self.action_size = game.getActionSize()\n self.args = args\n\n # Neural Net\n self.input_boards = Input(shape=(self.board_x, self.board_y))\n\n x = Reshape((self.board_x, self.board_y, 1))(self.input_boards)\n x = self.conv_block(x, 256)\n for _ in range(4):\n x = self.res_block(x, 256)\n self.pi = self.policy_head(x)\n self.v = self.value_head(x)\n\n self.model = Model(inputs=self.input_boards, outputs=[self.pi, self.v])\n self.model.compile(loss=['categorical_crossentropy', 'mean_squared_error'], optimizer=Adam(args.lr))\n\n def conv_block(self, x, kernel):\n x = Conv2D(kernel, 3, padding='same', use_bias=False, trainable=self.args.train)(x)\n x = BatchNormalization()(x)\n x = Activation('relu')(x)\n return x\n\n def res_block(self, inp, kernel):\n x = Conv2D(kernel, 3, padding='same', use_bias=False, trainable=self.args.train)(inp)\n x = BatchNormalization()(x)\n x = Activation('relu')(x)\n x = Conv2D(kernel, 3, padding='same', use_bias=False, trainable=self.args.train)(x)\n x = BatchNormalization()(x)\n x = Add()([inp, x])\n x = Activation('relu')(x)\n return x\n\n def policy_head(self, x):\n x = Conv2D(2, 1, use_bias=False)(x)\n x = BatchNormalization()(x)\n x = Activation('relu')(x)\n x = Flatten()(x)\n x = Dropout(0.5)(x)\n x = Dense(self.action_size, activation='softmax', name='pi')(x)\n return x\n\n @staticmethod\n def value_head(x):\n x = Conv2D(1, 1, use_bias=False)(x)\n x = BatchNormalization()(x)\n x = Activation('relu')(x)\n x = Flatten()(x)\n x = Dropout(0.5)(x)\n x = Dense(256, use_bias=False)(x)\n x = BatchNormalization()(x)\n x = Activation('relu')(x)\n x = Dropout(0.5)(x)\n x = Dense(1, activation='tanh', name='v')(x)\n return x\n","sub_path":"ai/othello/keras/OthelloNNet.py","file_name":"OthelloNNet.py","file_ext":"py","file_size_in_byte":2231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"620308342","text":"# linkedList : 기본적으로 node들이 linked된 data structure\n# 특징 1) Node 클래스, LinkedList 클래스 만들어주기\n# 2) head 값 별도로 저장. ex) cur = self.head\n\nclass Node:\n def __init__(self,item):\n self.val = item\n self.next = None\n\nclass LinkedList:\n def __init__(self,item):\n self.head=Node(item)\n\n def add(self,item):\n cur=self.head\n while cur.next is not None:\n cur=cur.next\n #while문을 통해 linkedList 끝으로 간다.\n cur.next=Node(item)\n\n def remove(self,item):\n #3가지 경우 : remove 대상이 head 일때 / 사이에 있을 때 / 맨 마지막 Node일 때\n if self.head.val == item: #1)remove 대상이 head 일때\n self.head = self.head.next\n else : #2) 사이에 있을 때\n cur=self.head\n while cur.next is not None:\n if cur.val ==item:\n self.removeItem(item)\n return\n\n if (cur.next.next is None) and cur.next.val == item: #3) 마지막 Node일 때 (previous를 알아야 된다)\n cur.next=None\n return\n cur=cur.next\n\n print(\"item does not exist in linked list\")\n\n def removeItem(self,item):\n cur=self.head\n while cur.next.next is not None:\n if cur.next.val == item:\n nextnode = cur.next.next\n cur.next = nextnode\n break\n cur=cur.next\n\n\n def reverse(self):\n cur = self.head\n prev = None\n while cur is not None:\n next = cur.next\n cur.next = prev\n prev = cur\n cur = next\n self.head = prev #head 다시 재설정\n\n def printlist(self):\n cur =self.head\n while cur is not None:\n print(cur.val)\n cur=cur.next\n\n# linkedlist = LinkedList() #이렇게 쓰면 안됨\nlinkedlist = LinkedList(1)\nlinkedlist.add(2)\nlinkedlist.add(3)\nlinkedlist.add(4)\nlinkedlist.remove(3)\nlinkedlist.printlist()\nprint('********************')\nlinkedlist.reverse()\nlinkedlist.printlist()\n\n#참고사이트\n#https://github.com/minsuk-heo/problemsolving/blob/master/data_structure/Stack.py","sub_path":"dataStructure/linkedList.py","file_name":"linkedList.py","file_ext":"py","file_size_in_byte":2262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"448827517","text":"import xarray as xr\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport seaborn as sns\nfrom matplotlib import pylab\nfrom matplotlib.ticker import StrMethodFormatter\nfrom sys import exit\n\n\n\ndef rmse(predict, actual):\n \n #predict = np.array(predict)\n #actual = np.array(actual)\n\n difference = actual - predict \n square_diff = np.square(difference)\n mean_square_diff = square_diff.mean()\n score = np.sqrt(mean_square_diff)\n return score\n\ndef data(actual):\n scoredata = actual.mean()\n return scoredata\n\nprev = '24'\nvar= 'prec'\n\n\nconfig = {\n 'Regiao' :['SUL' ,'SUDESTE' ,'CENTRO-OESTE' ,'NORDESTE' ,'NORTE' ,'SUDESTE' ,'NORTE' ],\n 'Setor' :['B1' ,'B2' ,'B3' ,'B4' ,'B5' ,'B6' ,'B7' ],\n 'latNorte':[ 75 ,130 ,130 ,195 ,195 ,130 ,195 ],\n 'latSul' :[-182 ,-119 ,-119 ,-45 ,-45 ,-139 ,-45 ],\n 'lonOeste':[-157 ,-78 ,-157 ,-79 ,-157 ,-78 ,-210 ],\n 'lonLest' :[-69 ,-1 ,-79 ,-1 ,-79 ,-1 ,-153 ]\n}\n\n#ind = 6\nfor ind in range(0,7,1):\n print('ind',ind)\n\n for indVars in range(0,4,1):\n print('indVars',indVars)\n\n lista_umidadeGL = []\n lista_umidadeNova = []\n lista_previsao = []\n\n lista_data_umidadeGL = []\n lista_data_umidadeNova = []\n\n\n for previsao in range(24,192,24):\n prev = str(previsao)\n print(prev)\n path_1 = \"/dados/dmdpesq/Experimento_umidade_do_solo/umidade_GL/\"\n name_file_1 = 'JAN2014_'+ prev +'Z_12Z_interp.nc'\n\n path_3 = \"/dados/dmdpesq/Experimento_umidade_do_solo/umidade_Nova/\"\n name_file_3 = 'JAN2014_'+ prev +'Z_12Z_interp.nc'\n path_4 = \"/dados/dmdpesq/Experimento_umidade_do_solo/GFS/\" \n name_file_4 = 'prev.2014.jan_12z_'+ prev +'h_interp.nc'\n path_out =\"/dados/dmdpesq/Experimento_umidade_do_solo/out/\" \n\n DS_NCEP = xr.open_dataset(path_1 + name_file_1)\n DS_NCEP_umidade_nova = xr.open_dataset(path_3 + name_file_3)\n GFS = xr.open_dataset(path_4 + name_file_4)\n\n vars = {\n 'ds':[ \n DS_NCEP.cssf \n ,DS_NCEP.clsf \n ,DS_NCEP.t2mt \n ,DS_NCEP.q2mt],\n 'ds_nova_umidade':[\n DS_NCEP_umidade_nova.cssf \n ,DS_NCEP_umidade_nova.clsf \n ,DS_NCEP_umidade_nova.t2mt \n ,DS_NCEP_umidade_nova.q2mt],\n 'ds_GFS': [\n GFS.SHTFL_surface\n ,GFS.LHTFL_surface\n ,GFS.TMP_2maboveground\n ,GFS.SPFH_2maboveground],\n 'variavel':[ \n 'CSSF' \n ,'CLSF' \n ,'T2MT' \n ,'Q2MT']\n }\n\n latNorte = config['latNorte'][ind]\n latSul = config['latSul'][ind]\n lonOeste = config['lonOeste'][ind]\n lonLest = config['lonLest'][ind]\n\n print(\"INICIO\")\n print(ind)\n print(indVars)\n print(vars['variavel'][indVars])\n \n \n\n longName = vars['ds_GFS'][indVars].attrs['long_name']\n units = vars['ds_GFS'][indVars].attrs['units']\n \n #DS_NCEP = xr.open_dataset(path_1 + name_file_1)\n xTickTime = DS_NCEP.prec['time'].isel(time=slice(None, 31))\n \n umidadeGL = vars['ds'][indVars].isel(time=slice(None, 31), lat=slice(latNorte,latSul), lon=slice(lonOeste,lonLest)).mean(dim=\"lat\").mean(dim=\"lon\")\n\n #DS_NCEP_umidade_nova = xr.open_dataset(path_3 + name_file_3)\n umidadeNova = vars['ds_nova_umidade'][indVars].isel(time=slice(None, 31), lat=slice(latNorte,latSul), lon=slice(lonOeste,lonLest)).mean(dim=\"lat\").mean(dim=\"lon\")\n\n #GFS = xr.open_dataset(path_4 + name_file_4)\n modeloGFS = vars['ds_GFS'][indVars].isel(time=slice(None, 31), lat=slice(latNorte,latSul), lon=slice(lonOeste,lonLest)).mean(dim=\"lat\").mean(dim=\"lon\")\n\n \n previsao = prev\n ###Copiar aqui\n \t###\n \n rmse_umidadeGL = rmse(modeloGFS, umidadeGL)\n #print(modeloGFS , umidadeGL)\n #print('prev ', prev,'rmse umidade GL: ', rmse_umidadeGL)\n rmse_umidadeNova = rmse(modeloGFS, umidadeNova)\n #print('prev ',prev,'umidade nova: ', rmse_umidadeNova)\n\n lista_previsao.append(previsao)\n lista_umidadeGL.append(rmse_umidadeGL)\n lista_umidadeNova.append(rmse_umidadeNova)\n \n\n ###até aqui\n\n ############################################################\n #teste = valorOri(umidadeGL)\n #print(teste)\n #exit(0)\n data_umidadeGL = data(umidadeGL)\n #print('value umidade GL: ', data_umidadeGL)\n data_umidadeNova = data(umidadeNova)\n #print('value umidade nova: ', data_umidadeNova)\n\n\n #lista_previsao.append(previsao)\n lista_data_umidadeGL.append(data_umidadeGL)\n lista_data_umidadeNova.append(data_umidadeNova)\n ############################################################\n\n\n #print(lista_umidadeGL)\n #exit (0)\n pylab.rcParams['figure.figsize'] = (30,10)\n fig = plt.figure(figsize=(30,10))\n fig, ax1 = plt.subplots()\n sns.set_style(\"darkgrid\", {\"axes.facecolor\": \".9\"})\n sns.set_context(\"notebook\", font_scale=1.5, rc={\"lines.linewidth\": 2.5})\n plt.tight_layout()\n\n plt.gca().yaxis.set_major_formatter(StrMethodFormatter('{x:,.3f}'))\n\n plt.figtext(.5,.99,'Root Mean Squared Error (RMSE) ' + longName + ' modelRef: GFS', fontsize=30, ha='center')\n plt.figtext(.5,.95,'201401 12z' + prev +'h' ,fontsize=20,ha='center')\n plt.figtext(.86,.95,'Região: '+ config['Regiao'][ind] +' Setor: '+ config['Setor'][ind] ,fontsize=20,ha='right')\n \n ###copiar aqui\n #Eixo 1\n ax1.set_ylabel('RMSE')\n lns1 = plt.plot(lista_previsao,lista_umidadeGL,color='orange', label='OPER')\n lns2 = plt.plot(lista_previsao,lista_umidadeNova,color='r', label='LDAS')\n\n #Eixo 2\n ax2 = ax1.twinx()\n ax2.set_ylabel('orig', color='b')\n lns3 = plt.plot(lista_previsao,lista_data_umidadeGL, marker='o', linestyle='',color='orange', label='OPER')\n lns4 = plt.plot(lista_previsao,lista_data_umidadeNova, marker='o', linestyle='',color='r', label='LDAS')\n\n plt.xticks(rotation=45) \n plt.xlabel('Previsão', labelpad=30)\n title = 'regiao_'+ config['Regiao'][ind] +'_'+ config['Setor'][ind]+'_rmse_'+vars['variavel'][indVars] +'_withGFS_2eixo.png'\n plt.legend(fontsize=17, frameon=True)\n ax1.legend(loc='upper left', bbox_to_anchor=(0.1, -0.15), shadow=True, ncol=5)\n ax2.legend(loc='upper left', bbox_to_anchor=(0.6, -0.15), shadow=True, ncol=5)\n plt.savefig(path_out + title, bbox_inches='tight', pad_inches=.2, dpi=300)\n print('Saved: {}'.format(title))\n plt.cla() #means clear current axis\n plt.clf() #means clear current figure\n plt.close('all')\n fig.clf()\n DS_NCEP.close()\n DS_NCEP_umidade_nova.close()\n GFS.close()\n\n print(config['Regiao'][ind])\n print(config['Setor'][ind])\n print(ind)\n print(indVars)\n print(vars['variavel'][indVars])\n print(\"FIM\")\n \n","sub_path":"precAnalise/rmse_dois_eixo_withGFS.py","file_name":"rmse_dois_eixo_withGFS.py","file_ext":"py","file_size_in_byte":7763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"312722191","text":"import collections\nimport itertools\nimport math\nimport time\nimport random\n\nimport colors\nimport weather\nimport presets\n\n\ndef time_memoized(check_interval):\n next_update_time = collections.defaultdict(int)\n last_result = {}\n def time_memoizer(func):\n def time_memoized(*args):\n if time.time() >= next_update_time[args]:\n last_result[args] = func(*args)\n next_update_time[args] = time.time() + check_interval\n return last_result[args]\n return time_memoized\n return time_memoizer\n\n\nclass Sun(object):\n def __init__(self):\n self.pos = None\n self.width = None\n\n @time_memoized(3600)\n def get_sunrise_sunset(self):\n return weather.get_sun_info()\n\n def _iterate(self, seconds_past):\n sunrise, sunset = self.get_sunrise_sunset()\n pos = weather.get_sun_position(weather.get_seconds(), sunrise, sunset)\n if pos is None:\n self.pos = 0.0 # arbitrary value\n self.width = 0.0\n else:\n self.pos = pos\n # Width starts and ends at 5, and is 10 at noon.\n self.width = 5.0 + abs(pos - 0.5) * 10\n\n def draw(self, pixels, seconds_past):\n self._iterate(seconds_past)\n pos = len(pixels) * (0.9 + self.pos * 0.35)\n pixels.draw_line(pos - self.width / 2, pos + self.width / 2, colors.RGB(250, 180, 0))\n\n\nclass Moon(object):\n def __init__(self):\n self.pos = None\n self.width = None\n\n @time_memoized(3600)\n def get_moonrise_moonset(self):\n return weather.get_moon_info()\n\n def _iterate(self, seconds_past):\n moonrise, moonset = self.get_moonrise_moonset()\n pos = weather.get_moon_position(weather.get_seconds(), moonrise, moonset)\n if pos is None:\n self.pos = 0.0 # arbitrary value\n self.width = 0.0\n else:\n self.pos = pos\n self.width = 5.0\n\n def draw(self, pixels, seconds_past):\n self._iterate(seconds_past)\n pos = len(pixels) * (0.35 + self.pos * 0.45)\n pixels.draw_line(pos - self.width / 2, pos + self.width / 2, colors.RGB(230, 230, 230))\n\nclass Cloud(object):\n def __init__(self):\n self.pos = random.random()\n # Velocity should make it take 1-3 minutes to cycle around led strip\n # given 20fps\n self.velocity = 1.0 / 120 + 1.0 / 240 * random.random()\n self.width = 6 + 18 * random.random()\n\n def draw(self, pixels, seconds_past):\n self.pos = (self.pos + self.velocity * seconds_past) % 1.0\n pos = len(pixels) * self.pos\n pixels.draw_line(pos - self.width / 2, pos + self.width / 2, colors.RGB(80, 80, 80, 0.5))\n\n\nclass RainDrop(object):\n def __init__(self):\n self.pos = random.random()\n self.width = random.random()\n self.fade_rate = 0.2 + 0.1 * random.random()\n\n def _iterate(self, seconds_past):\n self.width *= self.fade_rate**seconds_past\n if self.width < 0.2:\n self.pos = random.random()\n self.width = 0.8\n\n def draw(self, pixels, seconds_past):\n self._iterate(seconds_past)\n pos = len(pixels) * self.pos\n pixels.draw_line(pos - self.width / 2, pos + self.width / 2, colors.RGB(0, 0, 255))\n\nclass WeatherPreset(presets.Preset):\n def __init__(self, name='Weather'):\n super(WeatherPreset, self).__init__(name)\n self.sun = Sun()\n self.moon = Moon()\n self.clouds = [Cloud() for _ in xrange(10)]\n self.rain_drops = [RainDrop() for _ in xrange(100)]\n\n # TODO: Update this asynchronously.\n @time_memoized(30 * 60)\n def get_weather(self):\n return weather.get_weather()\n\n def draw(self, pixels, seconds_past):\n weather = self.get_weather()\n self.sun.draw(pixels, seconds_past)\n self.moon.draw(pixels, seconds_past)\n if weather.is_cloudy:\n for cloud in itertools.islice(self.clouds, 0, int(weather.is_cloudy * len(self.clouds))):\n cloud.draw(pixels, seconds_past)\n if weather.is_rainy:\n for rain_drop in itertools.islice(self.rain_drops, 0, int(weather.is_rainy * len(self.rain_drops))):\n rain_drop.draw(pixels, seconds_past)\n\n\npresets.PRESETS.append(WeatherPreset())\n\n","sub_path":"server/presets/weather_presets.py","file_name":"weather_presets.py","file_ext":"py","file_size_in_byte":3918,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"201698674","text":"from app.routes import *\n\n#REGISTER CONCEPTS\n@app.route('/api/register_concept', methods=[\"POST\"])\ndef register_concept():\n if True and True:\n if request.method == \"POST\":\n check_concept = Conceptos.query.filter_by(concepto=request.form[\"concepto\"]).all()\n if(check_concept):\n return \"The concept already exists dude.\", 409\n else:\n new_concept = Conceptos(concepto=request.form[\"concepto\"],\\\n estatus= \"1\")\n db.session.add(new_concept)\n db.session.commit()\n result = Conceptos.query.filter_by(concepto=request.form[\"concepto\"]).first()\n response = {\n 'idConcepto': result.idConcepto,\n 'concepto': result.concepto\n }\n return jsonify(response), 201\n else:\n return \"you don't have authorization to view this module\", 401\n\n#UPDATE CONCEPTS\n@app.route('/api/update_concept', methods=[\"PUT\"])\ndef update_concept():\n if True and True:\n if request.method == \"PUT\":\n requestValues = EmptyStringToNull()\n check_concept = Conceptos.query.filter_by(concepto=requestValues[\"concepto\"], estatus=\"1\").first()\n if check_concept and str(check_concept.idConcepto) != str(requestValues[\"id\"]):\n return \"The concept already exists dude.\", 409\n else:\n updating_concept = Conceptos.query.filter_by(idConcepto=requestValues[\"id\"]).first()\n updating_concept.concepto=requestValues[\"concepto\"]\n db.session.add(updating_concept)\n db.session.commit()\n return \"The concept has been updated successfully!.\", 201\n else:\n return \"Method not allowed\"\n else:\n return \"you don't have athorization to view this module\", 401\n\n#DELETE CONCEPT\n@app.route('/api/delete_concept/', methods=[\"PUT\"])\ndef delete_concept(id):\n if True and True:\n if request.method == \"PUT\":\n deleting_concept = Conceptos.query.filter_by(idConcepto=id).first()\n deleting_concept.estatus = \"0\"\n db.session.add(deleting_concept)\n db.session.commit()\n return \"The concept has been logical deleted successfully!.\", 201\n else:\n return \"Method not allowed\"\n else:\n return \"you don't have athorization to view this module\", 401\n","sub_path":"app/routes/crud_concepts.py","file_name":"crud_concepts.py","file_ext":"py","file_size_in_byte":2450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"155413669","text":"'''\nflask main code for the configuration loader utility\nprovides both a get api key based on device - Panorama or firewall\noption to load the day 1 template with select variable options\n'''\n\nimport os.path\nimport csv\nfrom IPy import IP\nfrom forms import SimpleRequestForm, APIRequestForm\nfrom flask import Flask, render_template, request\nfrom scripts.bin.show_security_rules import show_security_rules\nfrom scripts.bin.generate_api_key import generate_api_key\nfrom scripts.bin.start_service import process_svc\nfrom scripts.bin.save_config import save_config\n\napp = Flask(__name__)\napp.config.from_object('config')\n\n\n@app.route('/loadtemplate', methods=['GET', 'POST'])\ndef loadtemplate():\n ''' get web form data and load configuration with api '''\n hostname = '1.1.1.1'\n devicetype = 'unknown'\n api_file = 'scripts/api-key.csv'\n if os.path.isfile(api_file) and os.path.getsize(api_file) > 0:\n with open(str(api_file), 'r') as keyFile:\n apiRead = csv.reader(keyFile)\n for row in apiRead:\n hostname = IP(row[0])\n apikey = row[1]\n devicetype = row[2]\n form = SimpleRequestForm()\n if request.method == 'POST':\n xmlVar = process_svc()\n if xmlVar['apierror'] != None:\n return render_template('error.html', xmlVar=xmlVar)\n rules_dict = show_security_rules(xmlVar)\n return render_template(\"output.html\", xmlVar=xmlVar, rules_dict=rules_dict)\n return render_template('loadtemplate.html', form=form, devicetype=devicetype, hostname=hostname)\n\n\n@app.route('/tbd')\ndef tbd():\n ''' tbd placeholder page'''\n return render_template('tbd.html')\n\n@app.route('/', methods=['GET', 'POST'])\ndef getapikey():\n ''' get api key from device using web form credentials'''\n hostname = '1.1.1.1'\n devicetype = 'unknown'\n api_file = 'scripts/api-key.csv'\n form = APIRequestForm()\n\n# The key file make exist but is empty and it blows up. Check to\n# make sure it's not empty.\n if os.path.isfile(api_file) and os.path.getsize(api_file) > 0:\n with open(str(api_file), 'r') as keyfile:\n apiread = csv.reader(keyfile)\n for row in apiread:\n hostname = IP(row[0])\n devicetype = row[2]\n\n if request.method == 'POST' and form.validate():\n hostname = generate_api_key()\n print(hostname)\n if hostname is None:\n return render_template('getapikey.html',\n form=form, hostname=hostname, devicetype=devicetype)\n else:\n return render_template(\"gotapikey.html\", hostname=hostname)\n else:\n return render_template('getapikey.html',\n form=form, hostname=hostname, devicetype=devicetype)\n\n@app.route('/saverunning', methods=['GET', 'POST'])\ndef saverunning():\n saveoutput = \"show command output to be displayed here\"\n showcommand = \"show_running_config\"\n hostname = '1.1.1.1'\n devicetype = 'unknown'\n filename = 'TBD'\n api_file = 'scripts/api-key.csv'\n if os.path.isfile(api_file) and os.path.getsize(api_file) > 0:\n with open(str(api_file), 'r') as keyFile:\n apiRead = csv.reader(keyFile)\n for row in apiRead:\n hostname = IP(row[0])\n apikey = row[1]\n devicetype = row[2]\n\n if request.method == 'POST':\n print('getting and saving config...')\n saveoutput = save_config(hostname, apikey, devicetype)\n return render_template(\"saverunning.html\", saveoutput=saveoutput, hostname=hostname, devicetype=devicetype, filename=filename)\n return render_template('saverunning.html', saveoutput=saveoutput, hostname=hostname, devicetype=devicetype, filename=filename)\n\nif __name__ == \"__main__\":\n app.secret_key = os.urandom(12)\n app.run(host='0.0.0.0', port=5001, debug=True)\n","sub_path":"iron_skillet.py","file_name":"iron_skillet.py","file_ext":"py","file_size_in_byte":3889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"579473611","text":"from random import randint\nfrom operator import itemgetter\nimport winsound\n\nConfig = {\n \"RED\": '\\033[31m',\n \"GREEN\": '\\033[32m',\n \"BLUE\": '\\033[34m',\n \"CIANO\": '\\033[36m',\n \"MAGETA\": '\\033[35m',\n \"YELLOW\": '\\033[33m',\n \"BLACK\": '\\033[30m',\n \"WHITE\": '\\033[37m',\n 'NEGRITO': '\\033[1m',\n 'RESET': '\\033[0;0m'\n}\n\n\nclass System:\n @staticmethod\n def listPlayer():\n return [\"Ichigo Kurosaki\", \"Killer Bee\", \"Xena\", \"Roronoa Zoro\", \"Gohan\"]\n\n @staticmethod\n def printPlayers():\n player = System.listPlayer()\n for index in range(len(player)):\n print(\"%i - %s \\n\" % (index, player[index]))\n\n @staticmethod\n def printPlayer(player, flagConfig):\n print(Config[flagConfig], end=\"\")\n print('''\n\n Nome : {}\n Vida : {}\n Mana : {}\n Sword : {}\n Shield : {}\n\n '''.format(player.name, player.hp, player.mana, player.sword.name, player.shield.name))\n print(Config['RESET'], end=\"\")\n\n @staticmethod\n def print(st, flagConfig):\n print(Config[flagConfig])\n print(st)\n print(Config['RESET'])\n\n @staticmethod\n def printKnock(danoVerdadeiro, mana, attack, player, advPlayer):\n print(Config[\"BLACK\"], Config['NEGRITO'], end='')\n print(''' {} \n Utiliza {} para atacar {} '''.format(player.name.upper(), attack.name.upper(), advPlayer.name.upper()))\n\n print(Config[\"BLACK\"], Config['NEGRITO'], end='')\n print(\"Dano verdadeiro :\", end='')\n print(Config[\"RED\"], end='')\n print(danoVerdadeiro)\n\n print(Config[\"CIANO\"])\n print('''\n HP {} : {}\n '''.format(advPlayer.name.upper(), advPlayer.hp))\n\n print(Config[\"BLUE\"], end='')\n print(\"MANA RESTAURADA : {}\".format(mana))\n print(Config['RESET'])\n\n @staticmethod\n def choosePlayer(indexPlayer):\n nameChamp = System.listPlayer()[indexPlayer]\n if nameChamp == 'Ichigo Kurosaki':\n at1 = Attack(name=\"Getsuga Tenshou\", mana=400, latencia=6, danoFisico=1000, danoMagico=6000)\n at2 = Attack(name=\"Piercer of Heaven\", mana=200, latencia=3, danoFisico=600, danoMagico=2000)\n at3 = Attack(name=\"Getsuga Jūjishō\", mana=350, latencia=3, danoFisico=2500, danoMagico=1500)\n\n sword = Sword(\"Zanpakutō\", at1, at2, at3)\n\n shield = Shield(name=\"Armor Berserker\", latencia=2, defesaFisica=1000, defesaMagica=3000)\n player = Player(name=\"Ichigo Kurosaki\", hp=10000, mana=1000, sword=sword, shield=shield)\n\n player.setWAVShow(\"DirWAV/ICHIGOPERSONAGEM.wav\")\n player.setWAVSlang(\"DirWAV/go.wav\")\n\n return player\n elif nameChamp == 'Killer Bee':\n at1 = Attack(name=\"Crumbling the skin\", mana=200, latencia=3, danoFisico=3000, danoMagico=600)\n at2 = Attack(name=\"Life Theft\", mana=10, latencia=7, danoFisico=1000, danoMagico=5500)\n at3 = Attack(name=\"Samehada and the seven lightning swords\", mana=500, latencia=5, danoFisico=3500,\n danoMagico=4500)\n\n sword = Sword(\"Samehada\", at1, at2, at3)\n\n shield = Shield(name=\"Armor of Gemini\", latencia=4, defesaFisica=2000, defesaMagica=1000)\n player = Player(name=\"Killer Bee\", hp=10000, mana=1500, sword=sword, shield=shield)\n\n player.setWAVShow(\"DirWAV/KILLERBEEPERSONAGEM.wav\")\n player.setWAVSlang(\"DirWAV/go.wav\")\n\n return player\n elif nameChamp == 'Xena':\n at1 = Attack(name=\"Full Counter\", mana=200, latencia=3, danoFisico=3000, danoMagico=500)\n at2 = Attack(name=\"Counter Vanish\", mana=100, latencia=1, danoFisico=2500, danoMagico=100)\n at3 = Attack(name=\"Hellblaze\", mana=400, latencia=5, danoFisico=4000, danoMagico=3000)\n\n sword = Sword(\"Excalibur\", at1, at2, at3)\n\n shield = Shield(name=\"Armor Knight\", latencia=3, defesaFisica=1000, defesaMagica=1000)\n\n player = Player(name=\"Xena\", hp=7000, mana=1000, sword=sword, shield=shield)\n\n player.setWAVShow(\"DirWAV/XENAPERSONAGEM.wav\")\n player.setWAVSlang(\"DirWAV/go.wav\")\n\n return player\n elif nameChamp == 'Roronoa Zoro':\n at1 = Attack(name=\"Shishi SonSon\", mana=100, latencia=1, danoFisico=1500, danoMagico=0)\n at2 = Attack(name=\"Sanjuroku Pound Ho\", mana=200, latencia=4, danoFisico=3500, danoMagico=500)\n at3 = Attack(name=\"Yakkodori\", mana=500, latencia=3, danoFisico=3000, danoMagico=4000)\n\n sword = Sword(\"Sandai Kitetsu\", at1, at2, at3)\n\n shield = Shield(name=\"Black Armor\", latencia=4, defesaFisica=1000, defesaMagica=1000)\n\n player = Player(name=\"Roronoa Zoro\", hp=7000, mana=1000, sword=sword, shield=shield)\n\n player.setWAVShow(\"DirWAV/ZOROPERSONAGEM.wav\")\n player.setWAVSlang(\"DirWAV/go.wav\")\n\n return player\n elif nameChamp == 'Gohan':\n at1 = Attack(name=\"Fulminant Strike\", mana=400, latencia=3, danoFisico=1000, danoMagico=3000)\n at2 = Attack(name=\"Blade Combo\", mana=300, latencia=1, danoFisico=1500, danoMagico=1500)\n at3 = Attack(name=\"Rage of the Blade\", mana=600, latencia=7, danoFisico=6500, danoMagico=1000)\n\n sword = Sword(\"Espada Z\", at1, at2, at3)\n\n shield = Shield(name=\"Metal Tech\", latencia=3, defesaFisica=700, defesaMagica=500)\n\n player = Player(name=\"Gohan\", hp=15000, mana=2000, sword=sword, shield=shield)\n\n player.setWAVShow(\"DirWAV/GOHANPERSONAGEM.wav\")\n player.setWAVSlang(\"DirWAV/go.wav\")\n\n return player\n\n @staticmethod\n def calculeteDamageShield(player, attack):\n '''\n Funcao responsavel por CALCULAR o dano com base na ARMADURA do adversario\n player : Type (Player)\n attack : Type (Attack)\n return: INT\n '''\n # obter valores de armadura do jogador adversario\n defesaMagica = player.shield.defesaMagica\n defesaFisica = player.shield.defesaFisica\n # obter valores do dano causado pelo ataque\n danoMagico = attack.danoMagico\n danoFisico = attack.danoFisico\n # calculando valores de dano ao hp adversario pela subtracoo (dano - armadura)\n danoRealMagico = danoMagico - defesaMagica\n danoRealFisico = danoFisico - defesaFisica\n\n # validando os valores de retorno da funcao\n if danoRealMagico >= 0 and danoRealFisico >= 0:\n # significa que o ataque foi efetivo tanto de modo magico como fisico\n danoReal = danoRealFisico + danoRealMagico\n return danoReal\n elif danoRealMagico < 0 and danoRealFisico < 0:\n # significa que o ataque foi totalmente defendido\n return 0\n else:\n # significa que apenas uma das formas foi efetiva ao realizar o dano\n if danoRealMagico < 0:\n # Dano Fisico foi efetivo\n return danoRealFisico\n else:\n # Dano Magico foi efetivo\n return danoRealMagico\n\n @staticmethod\n def calculeteDamage(attack):\n '''\n Funcao responsavel por CALCULAR o dano sem a ARMADURA do adversario\n attack : Type (Attack)\n return: INT\n '''\n # obter valores do dano causado pelo ataque\n danoMagico = attack.danoMagico\n danoFisico = attack.danoFisico\n\n danoReal = danoFisico + danoMagico\n\n return danoReal\n\n\nclass Player:\n def __init__(self, name, hp, mana, sword, shield):\n self.name = name\n self.hp = hp\n self.sword = sword\n self.shield = shield\n self.mana = mana\n self.arqWAV = [\"\", \"\"]\n\n def __str__(self, ):\n return (\"Nome: \" + str(self.name) + \"\\nHP: \" + str(self.hp) + \"\\nSword: \" + str(\n self.sword.name) + \"\\nShield: \" + str(self.shield.name) + \"\\nMana: \" + str(self.mana))\n\n def sufferDamage(self, damage):\n '''\n Funcao responsavel subtrair o dano sofrido no HP\n damage : Type (INT)\n return: VOID\n '''\n\n self.hp -= damage\n\n def setWAVSlang(self, nomeArquivoWAV):\n '''\n Atualizar o arquivo inicializado como ('') vazio\n :param nomeArquivoWAV:\n :return: void\n '''\n self.arqWAV[0] = nomeArquivoWAV\n\n def PlayWAVSlang(self):\n winsound.PlaySound(self.arqWAV[0], winsound.SND_NOSTOP)\n\n def setWAVShow(self, nomeArquivoWAV):\n '''\n Atualizar o arquivo inicializado como ('') vazio\n :param nomeArquivoWAV:\n :return: void\n '''\n self.arqWAV[1] = nomeArquivoWAV\n\n def PlayWAVShow(self):\n winsound.PlaySound(self.arqWAV[1], winsound.SND_NOSTOP)\n\n def userMana(self, mana):\n '''\n Funcao responsavel subtrair a MANA utlizada para realizar o ataque\n :param Type (INT)\n :return: VOID\n '''\n\n self.mana -= mana\n\n def knock(self, playerAdversario, attack):\n '''\n Realizar o Ataque. Calcular latência.\n :param playerAdversario: Player\n :param attack: Attack\n :return: void\n '''\n # o numero randomico para a latencia\n randomLatenciaAtaque = randint(0, 9)\n # ESTRUTURA DA LATENCIA\n '''\n A latencia e a possibilidade de erro do ataque. Entao, e jagado um random e comparado esse o valor do random com a latencia do Player\n Se a latencia do individuo for 5, significa que ele tem uma margem de erro [0,5[. \n Quando comparado com o valor randomico, se o valor estiver fora dessa margem o ataque foi efetivo\n QUANTO MENOR A LATENCIA, MAIS MAIOR E A POSSIBILIDADE DE ATQUE EFETIVO\n #ESTRUTURA DA LATENCIA\n A latencia e a possibilidade de erro do ataque. Entao, e jagado um random e comparado esse o valor do random com a latencia do Player\n Se a latencia do individuo for 5, significa que ele tem uma margem de erro [0,5[. \n Quando comparado com o valor randomico, se o valor estiver fora dessa margem o ataque foi efetivo\n QUANTO MENOR A LATENCIA, MAIS MAIOR E A POSSIBILIDADE DE ATQUE EFETIVO\n '''\n if (attack.latencia <= randomLatenciaAtaque):\n # ATAQUE EFETIVO\n System.print('''SEU ATAQUE FOI EFETIVO\n -------------RANDOM LATENCIA: {}'''.format(str(randomLatenciaAtaque)), \"GREEN\")\n randomLatenciaDefesa = randint(0, 9)\n\n if (playerAdversario.shield.latencia <= randomLatenciaDefesa):\n # DEFESA EFETIVA\n dano = System.calculeteDamageShield(playerAdversario, attack)\n playerAdversario.sufferDamage(dano)\n self.userMana(attack.mana)\n manaRestore = self.restoreMana(dano)\n System.printKnock(dano, manaRestore, attack, self, playerAdversario)\n\n else:\n # DEFESA NAO EFETIVA\n System.print(\"DANO CRITICO\", 'NEGRITO')\n dano = System.calculeteDamage(attack)\n playerAdversario.sufferDamage(dano)\n self.userMana(attack.mana)\n manaRestore = self.restoreMana(dano)\n System.printKnock(dano, manaRestore, attack, self, playerAdversario)\n else:\n # ATAQUE NAO EFETIVO\n System.print('''SEU ATAQUE FALHOU\n -------------RANDOM LATENCIA: {}'''.format(str(randomLatenciaAtaque)), \"RED\")\n\n def restoreMana(self, dano):\n \"\"\"\n :param dano: int\n :return: int\n \"\"\"\n # quanto maior e o dano menos se recupera a mana\n if dano >= 400:\n mana = int(1000 * (100 / dano))\n self.mana += mana\n return mana\n else:\n self.mana += 300\n return 300\n\n @staticmethod\n def printAttacks(player):\n '''\n Metodo estático. Imprime os ataques.\n :param player: Player\n :return: void\n '''\n attacksUser = player.sword.getAttack()\n for attackIndice in range(4):\n if attackIndice == 3:\n print(str(attackIndice) + \" - Manter modo defensivo\")\n break\n print(str(attackIndice) + \" - \" + str(attacksUser[attackIndice]) + \"\\n\")\n\n\nclass InteligencePlayer:\n def __init__(self, player, baseDeMana):\n self.player = player\n self.baseDeMana = baseDeMana\n\n def rankAttack(self, PlayerAdv):\n '''\n Funcao responsavel por rankear os ataques do player\n :param PlayerAdv: Player\n :return: Matriz[3][3]\n '''\n\n ListAttack = self.player.sword.getAttack()\n ListRank = []\n for indexAttack in range(len(ListAttack)):\n Dano = System.calculeteDamageShield(PlayerAdv, ListAttack[indexAttack])\n ListRank.append([ListAttack[indexAttack], Dano, indexAttack])\n return sorted(ListRank, key=itemgetter(1))\n\n def resolverAttack(self, PlayerAdv):\n '''\n Funcao responsavel por retorna o index do ataque mais recomendado\n :param PlayerAdv: Player\n :return: int [0:3]\n '''\n if self.player.mana == 0:\n return 3\n if self.player.mana <= self.baseDeMana:\n ListRank = self.rankAttack(PlayerAdv)\n for linhaMatrizAttack in ListRank:\n if self.player.mana > linhaMatrizAttack[0].mana:\n return linhaMatrizAttack[2]\n return 3\n else:\n ListRank = self.rankAttack(PlayerAdv)\n for linhaMatrizAttack in ListRank[::-1]:\n if self.player.mana > linhaMatrizAttack[0].mana:\n return linhaMatrizAttack[2]\n return 3\n\n\nclass Sword:\n def __init__(self, name, attackI, attackII, attackIII):\n self.name = name\n self.attackI = attackI\n self.attackII = attackII\n self.attackIII = attackIII\n\n def __str__(self):\n ataques = self.getAttack()\n somaDanoMagico = 0\n somaDanoFisico = 0\n samaLatencia = 0\n\n for ataque in ataques:\n somaDanoMagico += ataque.danoMagico\n somaDanoFisico += ataque.danoFisico\n samaLatencia += ataque.latencia\n\n mediaDanoMagico = somaDanoMagico / 3\n mediaDanoFisico = somaDanoFisico / 3\n mediaLatencia = samaLatencia / 3\n\n return (\"NOME: \" + self.name + \"\\nLATENCIA (MEDIA): \" + str(mediaLatencia) + \"\\nDEFESA MAGICA (MEDIA) : \" + str(\n mediaDanoMagico) + \"\\nDEFESA FISICA (MEDIA): \" + str(mediaDanoFisico))\n\n def getAttack(self):\n '''\n Funcao responsavel por retornar o ataque requerido pelo PLAYER\n :return: List\n '''\n return [self.attackI, self.attackII, self.attackIII]\n\n\nclass Attack:\n def __init__(self, name, mana, latencia, danoMagico, danoFisico):\n self.name = name\n self.mana = mana\n self.latencia = latencia\n self.danoMagico = danoMagico\n self.danoFisico = danoFisico\n\n def __str__(self):\n # coverter para string\n return (\"Nome: \" + str(self.name) + \"\\nDano Magico: \" + str(self.danoMagico) + \"\\nDano Fisico: \" + str(\n self.danoFisico) + \"\\nMana: \" + str(self.mana) + \"\\nLantenci: \" + str(self.latencia))\n\n\nclass Shield:\n def __init__(self, name, latencia, defesaMagica, defesaFisica):\n self.name = name\n self.latencia = latencia\n self.defesaMagica = defesaMagica\n self.defesaFisica = defesaFisica\n\n def __str__(self):\n return (\n \"Nome: \" + self.name + \"\\nLatencia: \" + self.latencia + \"\\nDefesa Magica: \" + self.defesaMagica + \"\\nDefesa Fisica: \" + self.defesaFisica)","sub_path":"fileClass.py","file_name":"fileClass.py","file_ext":"py","file_size_in_byte":15828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"473625575","text":"import socket\n\nUDP_PORT = 5000\nPACKET_SIZE = 2 * 250 # 250 2-byte samples\nBYTES_COUNT = 0\nnewline = '\\r\\n'\n\nf = open('scan_file', 'wb')\n\ns = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\ns.bind(('', UDP_PORT))\ns.settimeout(5.0)\n\nwhile(1):\n data, addr = s.recvfrom(PACKET_SIZE)\n f.write(data)\n f.write(newline.encode('ascii'))\n BYES_COUNT += len(data)\n if(data[0:3].decode('ascii') == 'END'):\n break\ns.close()\n\nprint (\"Total Bits Received: %d\" % (BYTES_COUNT*8))\n","sub_path":"UDP_client_test.py","file_name":"UDP_client_test.py","file_ext":"py","file_size_in_byte":487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"90184","text":"__all__ = [\n 'get_default_accounts_profile',\n 'get_default_deterministic_analysis_settings',\n 'get_default_exposure_profile',\n 'get_default_fm_aggregation_profile',\n 'get_default_unified_profile',\n 'KTOOLS_ALLOC_RULE',\n 'KTOOLS_FIFO_RELATIVE',\n 'KTOOLS_DEBUG',\n 'KTOOLS_MEM_LIMIT',\n 'KTOOLS_NUM_PROCESSES',\n 'OASIS_FILES_PREFIXES',\n 'SUMMARY_MAPPING',\n 'SUMMARY_GROUPING',\n 'SUMMARY_OUTPUT',\n 'SOURCE_IDX',\n 'SOURCE_FILENAMES',\n 'STATIC_DATA_FP'\n]\n\nimport os\n\nfrom collections import OrderedDict\n\nfrom .data import (\n get_json,\n)\n\n\nSOURCE_FILENAMES = OrderedDict({\n 'loc': 'location.csv',\n 'acc': 'account.csv',\n 'info': 'reinsinfo.csv',\n 'scope': 'reinsscope.csv'\n})\n\n# Store index from merged source files (for later slice & dice)\nSOURCE_IDX = OrderedDict({\n 'loc': 'loc_idx',\n 'acc': 'acc_idx',\n 'info': 'info_idx',\n 'scope': 'scope_idx'\n})\n\nSUMMARY_MAPPING = OrderedDict({\n 'gul_map_fn': 'gul_summary_map.csv',\n 'fm_map_fn': 'fm_summary_map.csv'\n})\n\nSUMMARY_OUTPUT = OrderedDict({\n 'gul': 'gulsummaryxref.csv',\n 'il': 'fmsummaryxref.csv'\n})\n\n# Update with load OED column names\n# NOTE: this should be removed once UI column picker feature has been added\nSUMMARY_GROUPING = OrderedDict({\n 'prog': None,\n 'state': ['countrycode'],\n 'county': ['geogname1'],\n 'location': ['locnumber'],\n 'lob': ['occupancycode'], # <-- \"Work around, this value should come from 'LOB' in the accounts file\"\n 'policy': ['polnumber'],\n})\n\n# Path for storing static data/metadata files used in the package\nSTATIC_DATA_FP = os.path.join(os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir)), '_data')\n\n# Default profiles that describe the financial terms in the OED acc. and loc.\n# (exposure) files, as well as how aggregation of FM input items is performed\n# in the different OED FM levels\n\n\ndef get_default_accounts_profile(path=False):\n fp = os.path.join(STATIC_DATA_FP, 'default_acc_profile.json')\n return get_json(src_fp=fp) if not path else fp\n\n\ndef get_default_exposure_profile(path=False):\n fp = os.path.join(STATIC_DATA_FP, 'default_loc_profile.json')\n return get_json(src_fp=fp) if not path else fp\n\n\ndef get_default_unified_profile(path=False):\n fp = os.path.join(STATIC_DATA_FP, 'default_unified_profile.json')\n return get_json(src_fp=fp) if not path else fp\n\n\ndef get_default_fm_aggregation_profile(path=False):\n fp = os.path.join(STATIC_DATA_FP, 'default_fm_agg_profile.json')\n return {int(k): v for k, v in get_json(src_fp=fp).items()} if not path else fp\n\n\n# Default name prefixes of the Oasis input files (GUL + IL)\nOASIS_FILES_PREFIXES = OrderedDict({\n 'gul': {\n 'complex_items': 'complex_items',\n 'items': 'items',\n 'coverages': 'coverages',\n },\n 'il': {\n 'fm_policytc': 'fm_policytc',\n 'fm_profile': 'fm_profile',\n 'fm_programme': 'fm_programme',\n 'fm_xref': 'fm_xref',\n }\n})\n\n\n# Default analysis settings for deterministic loss generation\ndef get_default_deterministic_analysis_settings(path=False):\n fp = os.path.join(STATIC_DATA_FP, 'analysis_settings.json')\n return get_json(src_fp=fp) if not path else fp\n\n\n# Defaults for Ktools runtime parameters\nKTOOLS_NUM_PROCESSES = 2\nKTOOLS_MEM_LIMIT = False\nKTOOLS_FIFO_RELATIVE = False\nKTOOLS_ALLOC_RULE = 2\nKTOOLS_DEBUG = False\n","sub_path":"src/flamingo_api_server/reinsurance_modules/utils/defaults.py","file_name":"defaults.py","file_ext":"py","file_size_in_byte":3391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"19055610","text":"# coding=utf-8\n# from sympy import symbols\n\n# x = symbols('x')\n\ndef isVariable(variable):\n # returns True, if the symbol is in list of variables\n list_of_variables = {'q', 'w', 'y', 'u', 'p', 'ü', 'õ', 'd', 'f', 'j', 'k', 'ö',\n 'ä', 'z', 'x', 'v', 'b', 'm', 'Q', 'W', 'Y', 'U', 'P', 'Ü',\n 'Õ', 'D', 'F', 'J', 'K', 'Ö', 'Ä', 'Z', 'X', 'V', 'B', 'M'}\n if variable in list_of_variables: return True\n return False\n\ndef getRidOfSpaces(argument):\n return argument.replace(' ', '')\n\ndef getMissingMultiplic(argument):\n # if there are any missing multiplication mark in the input, it adds them\n argument = list(getRidOfSpaces(argument))\n for i in range(len(argument)-1):\n if (argument[i]).strip().isdigit():\n if isVariable(argument[i+1]):\n argument.insert(i+1, \"*\")\n elif argument[i+1] == \"(\":\n argument.insert(i+1, \"*\")\n elif argument[i] == \")\":\n if argument[i+1] == \"(\":\n argument.insert(i+1, \"*\")\n elif (argument[i+1]).strip().isdigit():\n argument.insert(i+1, \"*\")\n elif isVariable(argument[i+1]):\n argument.insert(i+1, \"*\")\n elif isVariable(argument[i]):\n if (argument[i+1]).strip().isdigit():\n argument.insert(i+1, \"*\")\n elif argument[i+1] == \"(\":\n argument.insert(i+1, \"*\")\n return \"\".join(argument)\n\ndef getAllToLeftSide(expr):\n # takes an argument as string and returns the it with all arguments on the left side with operators changed\n expr = list(getRidOfSpaces(expr))\n if \"|\" in expr:\n counter = 0\n for i in range(len(expr)):\n if expr[i] == \"|\":\n counter += 1\n if counter % 2 == 0:\n expr[i] = \")\"\n expr = \"\".join(expr)\n expr = list(expr.replace(\"|\", \"abs(\"))\n if expr.count('(') != expr.count(')'):\n return -1\n if \"=\" in expr:\n sign_index = expr.index(\"=\")\n elif \"<\" in expr:\n sign_index = expr.index(\"<\")\n else:\n sign_index = expr.index(\">\")\n left_side = \"\".join(expr[:sign_index])\n right_side = expr[sign_index+1:]\n counter_parens = 0\n for i in range(len(right_side)):\n if right_side[i] == \"(\":\n counter_parens += 1\n elif right_side[i] == \")\":\n counter_parens -= 1\n if counter_parens == 0:\n if right_side[i] == \"+\":\n right_side[i] = \"-\"\n elif right_side[i] == \"-\":\n right_side[i] = \"+\"\n right_side = \"\".join(right_side)\n if right_side[0] != \"+\" and right_side[0] != \"-\":\n right_side = \"-\" + right_side\n return left_side + right_side\n\ndef replaceSqrt(expr):\n counter = 0\n sqrt_counter = expr.count(\"sqrt\")\n while sqrt_counter > counter:\n first_paren = expr.index(\"sqrt\")\n after_paren = expr[first_paren+4:]\n counter_parens = 0\n for i in range(len(after_paren)):\n if after_paren[i] == \"(\":\n counter_parens += 1\n elif after_paren[i] == \")\":\n counter_parens -= 1\n if counter_parens == 0:\n paren_index = i\n break\n expr = (\"\".join(expr[:first_paren]) +\n \"\".join(after_paren[:paren_index+1]) +\n \"**0.5\" +\n \"\".join(after_paren[paren_index+1:]))\n counter += 1\n return expr\n\ndef optimizeEquationForSympy(equation):\n equation = getMissingMultiplic(equation)\n if \"=\" in equation:\n equation = getAllToLeftSide(equation)\n elif \"<\" in equation:\n equation = getAllToLeftSide(equation) + \" < 0\"\n elif \">\" in equation:\n equation = getAllToLeftSide(equation) + \" > 0\"\n if \"sqrt\" in equation:\n equation = replaceSqrt(equation)\n return equation\n\n\n\nif __name__ == '__main__':\n print(optimizeEquationForSympy('3x + 7 = 6y - p**(-4)'))\n print(optimizeEquationForSympy(\"4-7=abs(3-2)+abs(4-7)-2(+3(4-z))\"))\n print(optimizeEquationForSympy(\"x-7+|x-2+2|=+|4-7| + |x+6|\"))\n print(optimizeEquationForSympy(\"(x-4)(5+x)=0\"))\n","sub_path":"Equation_manipulation.py","file_name":"Equation_manipulation.py","file_ext":"py","file_size_in_byte":4274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"541124232","text":"# -*- coding: utf-8 -*-\n# BSD 3-Clause License\n#\n# Copyright (c) 2017\n# All rights reserved.\n# Copyright 2022 Huawei Technologies Co., Ltd\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n# ==========================================================================\n\n# Copyright (c) OpenMMLab. All rights reserved.\nimport cv2\nimport numpy as np\nimport pyclipper\nfrom mmdet.core import BitmapMasks\nfrom mmdet.datasets.builder import PIPELINES\nfrom shapely.geometry import Polygon\n\nfrom . import BaseTextDetTargets\n\n\n@PIPELINES.register_module()\nclass DBNetTargets(BaseTextDetTargets):\n \"\"\"Generate gt shrunk text, gt threshold map, and their effective region\n masks to learn DBNet: Real-time Scene Text Detection with Differentiable\n Binarization [https://arxiv.org/abs/1911.08947]. This was partially adapted\n from https://github.com/MhLiao/DB.\n\n Args:\n shrink_ratio (float): The area shrunk ratio between text\n kernels and their text masks.\n thr_min (float): The minimum value of the threshold map.\n thr_max (float): The maximum value of the threshold map.\n min_short_size (int): The minimum size of polygon below which\n the polygon is invalid.\n \"\"\"\n\n def __init__(self,\n shrink_ratio=0.4,\n thr_min=0.3,\n thr_max=0.7,\n min_short_size=8):\n super().__init__()\n self.shrink_ratio = shrink_ratio\n self.thr_min = thr_min\n self.thr_max = thr_max\n self.min_short_size = min_short_size\n\n def find_invalid(self, results):\n \"\"\"Find invalid polygons.\n\n Args:\n results (dict): The dict containing gt_mask.\n\n Returns:\n ignore_tags (list[bool]): The indicators for ignoring polygons.\n \"\"\"\n texts = results['gt_masks'].masks\n ignore_tags = [False] * len(texts)\n\n for idx, text in enumerate(texts):\n if self.invalid_polygon(text[0]):\n ignore_tags[idx] = True\n return ignore_tags\n\n def invalid_polygon(self, poly):\n \"\"\"Judge the input polygon is invalid or not. It is invalid if its area\n smaller than 1 or the shorter side of its minimum bounding box smaller\n than min_short_size.\n\n Args:\n poly (ndarray): The polygon boundary point sequence.\n\n Returns:\n True/False (bool): Whether the polygon is invalid.\n \"\"\"\n area = self.polygon_area(poly)\n if abs(area) < 1:\n return True\n short_size = min(self.polygon_size(poly))\n if short_size < self.min_short_size:\n return True\n\n return False\n\n def ignore_texts(self, results, ignore_tags):\n \"\"\"Ignore gt masks and gt_labels while padding gt_masks_ignore in\n results given ignore_tags.\n\n Args:\n results (dict): Result for one image.\n ignore_tags (list[int]): Indicate whether to ignore its\n corresponding ground truth text.\n\n Returns:\n results (dict): Results after filtering.\n \"\"\"\n flag_len = len(ignore_tags)\n assert flag_len == len(results['gt_masks'].masks)\n assert flag_len == len(results['gt_labels'])\n\n results['gt_masks_ignore'].masks += [\n mask for i, mask in enumerate(results['gt_masks'].masks)\n if ignore_tags[i]\n ]\n results['gt_masks'].masks = [\n mask for i, mask in enumerate(results['gt_masks'].masks)\n if not ignore_tags[i]\n ]\n results['gt_labels'] = np.array([\n mask for i, mask in enumerate(results['gt_labels'])\n if not ignore_tags[i]\n ])\n new_ignore_tags = [ignore for ignore in ignore_tags if not ignore]\n\n return results, new_ignore_tags\n\n def generate_thr_map(self, img_size, polygons):\n \"\"\"Generate threshold map.\n\n Args:\n img_size (tuple(int)): The image size (h,w)\n polygons (list(ndarray)): The polygon list.\n\n Returns:\n thr_map (ndarray): The generated threshold map.\n thr_mask (ndarray): The effective mask of threshold map.\n \"\"\"\n thr_map = np.zeros(img_size, dtype=np.float32)\n thr_mask = np.zeros(img_size, dtype=np.uint8)\n\n for polygon in polygons:\n self.draw_border_map(polygon[0], thr_map, mask=thr_mask)\n thr_map = thr_map * (self.thr_max - self.thr_min) + self.thr_min\n\n return thr_map, thr_mask\n\n def draw_border_map(self, polygon, canvas, mask):\n \"\"\"Generate threshold map for one polygon.\n\n Args:\n polygon(ndarray): The polygon boundary ndarray.\n canvas(ndarray): The generated threshold map.\n mask(ndarray): The generated threshold mask.\n \"\"\"\n polygon = polygon.reshape(-1, 2)\n assert polygon.ndim == 2\n assert polygon.shape[1] == 2\n\n polygon_shape = Polygon(polygon)\n distance = (\n polygon_shape.area * (1 - np.power(self.shrink_ratio, 2)) /\n polygon_shape.length)\n subject = [tuple(p) for p in polygon]\n padding = pyclipper.PyclipperOffset()\n padding.AddPath(subject, pyclipper.JT_ROUND,\n pyclipper.ET_CLOSEDPOLYGON)\n padded_polygon = padding.Execute(distance)\n if len(padded_polygon) > 0:\n padded_polygon = np.array(padded_polygon[0])\n else:\n print(f'padding {polygon} with {distance} gets {padded_polygon}')\n padded_polygon = polygon.copy().astype(np.int32)\n\n x_min = padded_polygon[:, 0].min()\n x_max = padded_polygon[:, 0].max()\n y_min = padded_polygon[:, 1].min()\n y_max = padded_polygon[:, 1].max()\n\n width = x_max - x_min + 1\n height = y_max - y_min + 1\n\n polygon[:, 0] = polygon[:, 0] - x_min\n polygon[:, 1] = polygon[:, 1] - y_min\n\n xs = np.broadcast_to(\n np.linspace(0, width - 1, num=width).reshape(1, width),\n (height, width))\n ys = np.broadcast_to(\n np.linspace(0, height - 1, num=height).reshape(height, 1),\n (height, width))\n\n distance_map = np.zeros((polygon.shape[0], height, width),\n dtype=np.float32)\n for i in range(polygon.shape[0]):\n j = (i + 1) % polygon.shape[0]\n absolute_distance = self.point2line(xs, ys, polygon[i], polygon[j])\n distance_map[i] = np.clip(absolute_distance / distance, 0, 1)\n distance_map = distance_map.min(axis=0)\n\n x_min_valid = min(max(0, x_min), canvas.shape[1] - 1)\n x_max_valid = min(max(0, x_max), canvas.shape[1] - 1)\n y_min_valid = min(max(0, y_min), canvas.shape[0] - 1)\n y_max_valid = min(max(0, y_max), canvas.shape[0] - 1)\n\n if x_min_valid - x_min >= width or y_min_valid - y_min >= height:\n return\n\n cv2.fillPoly(mask, [padded_polygon.astype(np.int32)], 1.0)\n canvas[y_min_valid:y_max_valid + 1,\n x_min_valid:x_max_valid + 1] = np.fmax(\n 1 - distance_map[y_min_valid - y_min:y_max_valid - y_max +\n height, x_min_valid - x_min:x_max_valid -\n x_max + width],\n canvas[y_min_valid:y_max_valid + 1,\n x_min_valid:x_max_valid + 1])\n\n def generate_targets(self, results):\n \"\"\"Generate the gt targets for DBNet.\n\n Args:\n results (dict): The input result dictionary.\n\n Returns:\n results (dict): The output result dictionary.\n \"\"\"\n assert isinstance(results, dict)\n\n if 'bbox_fields' in results:\n results['bbox_fields'].clear()\n\n ignore_tags = self.find_invalid(results)\n results, ignore_tags = self.ignore_texts(results, ignore_tags)\n\n h, w, _ = results['img_shape']\n polygons = results['gt_masks'].masks\n\n # generate gt_shrink_kernel\n gt_shrink, ignore_tags = self.generate_kernels((h, w),\n polygons,\n self.shrink_ratio,\n ignore_tags=ignore_tags)\n\n results, ignore_tags = self.ignore_texts(results, ignore_tags)\n # genenrate gt_shrink_mask\n polygons_ignore = results['gt_masks_ignore'].masks\n gt_shrink_mask = self.generate_effective_mask((h, w), polygons_ignore)\n\n # generate gt_threshold and gt_threshold_mask\n polygons = results['gt_masks'].masks\n gt_thr, gt_thr_mask = self.generate_thr_map((h, w), polygons)\n\n results['mask_fields'].clear() # rm gt_masks encoded by polygons\n results.pop('gt_labels', None)\n results.pop('gt_masks', None)\n results.pop('gt_bboxes', None)\n results.pop('gt_bboxes_ignore', None)\n\n mapping = {\n 'gt_shrink': gt_shrink,\n 'gt_shrink_mask': gt_shrink_mask,\n 'gt_thr': gt_thr,\n 'gt_thr_mask': gt_thr_mask\n }\n for key, value in mapping.items():\n value = value if isinstance(value, list) else [value]\n results[key] = BitmapMasks(value, h, w)\n results['mask_fields'].append(key)\n\n return results\n","sub_path":"PyTorch/built-in/cv/detection/DBpp_ID4145_for_PyTorch/mmocr/datasets/pipelines/textdet_targets/dbnet_targets.py","file_name":"dbnet_targets.py","file_ext":"py","file_size_in_byte":10811,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"276481354","text":"import file as f\r\nimport speech_recognition as sr\r\nimport pyttsx3\r\n#import webbrowser\r\nimport speech as s\r\n\r\nengine = pyttsx3.init('sapi5')\r\nvoices = engine.getProperty('voices')\r\nengine.setProperty('voice',voices[0].id)\r\nengine.setProperty('rate', 120) # Speed percent (can go over 100)\r\nengine.setProperty('volume', 0.9) # Volume 0-1\r\n\r\n\r\ndef speak(audio):\r\n engine.say(audio)\r\n engine.runAndWait()\r\n\r\n \r\n\r\n'''\r\ndef Take_Command():\r\n query = input('you: ')\r\n return (query)\r\n'''\r\n\r\nif __name__ == \"__main__\":\r\n # f.Wish_Me()\r\n\r\n while True:\r\n query=s.Take_Command()\r\n print(query)\r\n \r\n # logic for executing task based on query\r\n \r\n if 'wikipedia' in query:\r\n query = query.replace('wikipedia ','')\r\n f.wiki(query)\r\n \r\n elif 'open youtube' in query:\r\n f.youtube()\r\n \r\n elif 'open google' in query:\r\n f.google()\r\n \r\n \r\n elif 'play music' in query:\r\n f.music()\r\n \r\n elif ' time' in query:\r\n f.time()\r\n \r\n elif 'reminder' in query:\r\n f.reminder()\r\n \r\n elif 'bye nancy' in query:\r\n f.nancy() \r\n elif 'hello'in query:\r\n speak('hello ,how can i help you')\r\n \r\n elif 'what is your name' in query:\r\n speak('i am nancy')\r\n \r\n# else 'ok' in query:\r\n# speak('hmmm')\r\n \r\n \r\n elif 'hello nancy' in query:\r\n f.nancy_f(query)\r\n \r\n elif 'conversation' in query:\r\n speak('what type of conversation you would like')\r\n a =s.Take_Command()\r\n speak('wait sir')\r\n import bot1 as c\r\n \r\n # elif ' prediction' or 'predict'in query:\r\n # from stock import stock\r\n \r\n \r\n elif 'video series' in query:\r\n f.videoseries()\r\n \r\n elif 'ms word' in query:\r\n f.msword()\r\n \r\n elif 'ms excel' in query:\r\n f.msexcel()\r\n \r\n elif 'chrom' in query:\r\n f.chrome()\r\n \r\n elif 'shutdown' in query:\r\n f.shutdown()\r\n \r\n elif 'restart' in query:\r\n f.restart()\r\n \r\n elif 'sleep' in query:\r\n f.sleep()\r\n \r\n elif 'keyboard' in query:\r\n f.keyboard()\r\n \r\n elif 'calculator' in query:\r\n f.calculator() \r\n \r\n elif 'search' in query:\r\n f.search()\r\n \r\n \r\n else:\r\n speak(\"i can't understand\")\r\n \r\n \r\n","sub_path":"Desktop Assistant/Code/nancy.py","file_name":"nancy.py","file_ext":"py","file_size_in_byte":2745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"182328907","text":"import turtle\r\nturtle = turtle.Turtle()\r\n\r\nscreen = turtle.getscreen()\r\n\r\n# turtle. textinput(title, prompt)\r\n# Parameters: title – string\r\n# prompt – string\r\n# Pop up a dialog window for input of a string.\r\n# Parameter title is the title of the dialog window, propmt is a text mostly describing what information to input.\r\n# Return the string input. \r\n# If the dialog is canceled, return None.\r\n\r\nname = screen.textinput('Name','Enter your name:')\r\nprint(name)","sub_path":"Turtle/textInput.py","file_name":"textInput.py","file_ext":"py","file_size_in_byte":465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"305701808","text":"# This script accepts a data file containing data tables in CSV format and performs an SQL join using MapReduce\nimport MapReduce\nimport sys\n\n\"\"\"\nWord Count Example in the Simple Python MapReduce Framework\n\"\"\"\n\nmr = MapReduce.MapReduce()\n\n# =============================\n# Do not modify above this line\n\ndef mapper(record):\n table = record[1]\n mr.emit_intermediate(table, record)\n\ndef reducer(key, list_of_values):\n order = list_of_values[0]\n for i in range(1,len(list_of_values)):\n mr.emit((order + list_of_values[i]))\n\n# Do not modify below this line\n# =============================\nif __name__ == '__main__':\n inputdata = open(sys.argv[1])\n mr.execute(inputdata, mapper, reducer)\n","sub_path":"data_science_course/src/assignment3/join.py","file_name":"join.py","file_ext":"py","file_size_in_byte":707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"382905287","text":"# add stratified\n# сделать cross-val\n\nimport pandas as pd\nimport lightgbm as lgb\nimport json\n\n\npath_data = '../tables/train_df_5.csv'\npath_save_model = '../models/lgb_test.model'\n\n\ndef run_lgb(train_X, train_y, test_X, test_y, val_X=None, val_y=None):\n params = {\n \"objective\" : \"multiclass\",\n \"num_classes\": 80,\n \"num_threads\": 4,\n \"max_depth\": 25, # 6\n \"learning_rate\" : 0.03,\n# \"num_leaves\" : 30,\n \"early_stopping_round\": 50,\n \"metric\" : \"multi_logloss\",\n# \"min_child_samples\" : 100,\n \"verbosity\" : -1,\n \"bagging_seed\" : 2018,\n \"bagging_fraction\" : 0.7,\n \"feature_fraction\" : 0.7,\n \"num_iterations\": 150\n }\n \n train = lgb.Dataset(train_X, label=train_y)\n test = lgb.Dataset(test_X, label=test_y)\n \n verb_eval=5\n model = lgb.train(params, train, valid_sets=[test], # valid_sets=[train, val], \n valid_names = ['test'],\n verbose_eval=verb_eval)\n return model\n\n\ndata = pd.read_csv(path_data)\ndata = data.sample(frac=1)\nprint('Data has been loaded...')\ndata.drop(['hash_inn'], 1, inplace=True)\ndata = data[data['target'] != -1]\ntrain_cols = [x for x in list(data.columns) if x != 'target']\n\nX = data[train_cols]\ny = data['target']\n\np = 0.9\ntrain_portion = int(X.shape[0] * p)\nX_train, y_train = X.iloc[:train_portion], y.iloc[:train_portion]\nX_test, y_test = X.iloc[train_portion:], y.iloc[train_portion:]\n\nX_test.to_csv('X_test.csv')\ny_test.to_csv('y_test.csv')\n\nmodel = run_lgb(X_train, y_train, X_test, y_test)\nmodel.save_model(path_save_model)\n","sub_path":"code/train_lgb.py","file_name":"train_lgb.py","file_ext":"py","file_size_in_byte":1622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"324589718","text":"### Import libaries\nimport numpy as np\nimport cv2\nimport glob\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nimport pickle\nfrom scipy.signal import find_peaks_cwt\nimport time\nfrom moviepy.editor import VideoFileClip\nfrom IPython.display import HTML\n\n\ndist_pickle = pickle.load( open( \"dist_pickle.p\", \"rb\" ) )\ncamera_mtx = dist_pickle[\"mtx\"]\ncamera_dist = dist_pickle[\"dist\"]\nobjpoints = dist_pickle[\"objpoints\"]\nimgpoints = dist_pickle[\"imgpoints\"]\n\n\n\n\nright_fit_count = 0\nframe_count = 0\n\n\n'''\ndef detect_lines(img_canny_masked):\n \n # Apply HoughLines to extract lines\n rho_res = 2 \n theta_res = np.pi/180. \n threshold = 15 \n min_line_length = 40 \n max_line_gap = 20 \n lines = cv2.HoughLinesP(img_canny_masked, rho_res, theta_res, threshold, np.array([]), \n minLineLength=min_line_length, maxLineGap=max_line_gap)\n return lines\n\n'''\n\n\n# Display Function\ndef disp_img(original_image, augmented_image, aug_title = \"\"):\n f, (ax1, ax2) = plt.subplots(1, 2, figsize=(20,10))\n f.tight_layout()\n ax1.imshow(original_image)\n ax2.imshow(augmented_image)\n ax1.set_title('Original Image', fontsize=30)\n ax2.set_title('Augmented Image: ' + aug_title, fontsize=30)\n plt.show()\n \n'''\n \ndef color_threshold(img, color_thresh=(150, 255), sx_thresh=(20, 100)):\n img = np.copy(img)\n # Convert to HLS color space and separate the S channel\n HLS = cv2.cvtColor(img, cv2.COLOR_RGB2HLS).astype(np.float)\n l_channel = HLS[:,:,1]\n s_channel = HLS[:,:,2]\n # Sobel x\n sobelx = cv2.Sobel(l_channel, cv2.CV_64F, 1, 0) # Take the derivative in x\n abs_sobelx = np.absolute(sobelx) # Absolute x derivative to accentuate lines away from horizontal\n scaled_sobel = np.uint8(255*abs_sobelx/np.max(abs_sobelx))\n \n # Threshold x gradient\n sxbinary = np.zeros_like(scaled_sobel)\n sxbinary[(scaled_sobel >= sx_thresh[0]) & (scaled_sobel <= sx_thresh[1])] = 1\n \n # Threshold color channel\n s_binary = np.zeros_like(s_channel)\n s_binary[(s_channel >= color_thresh[0]) & (s_channel <= color_thresh[1])] = 1\n # Stack each channel\n # Note color_binary[:, :, 0] is all 0s, effectively an all black image. It might\n # be beneficial to replace this channel with something else.\n color_binary = np.dstack(( np.zeros_like(sxbinary), sxbinary, s_binary))\n \n combined_color_binary = np.zeros_like(s_binary)\n combined_color_binary[(sxbinary == 1) | (s_binary == 1)] = 1\n \n return combined_color_binary\n \n'''\n\ndef color_threshold(img):\n \n l_channel = cv2.cvtColor(img, cv2.COLOR_RGB2LUV)[:,:,0]\n b_channel = cv2.cvtColor(img, cv2.COLOR_RGB2Lab)[:,:,2]\n #s_channel = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)[:,:,2]\n \n b_thresh_min = 150\n b_thresh_max = 200\n b_binary = np.zeros_like(b_channel)\n b_binary[(b_channel >= b_thresh_min) & (b_channel <= b_thresh_max)] = 1\n \n l_thresh_min = 225 \n l_thresh_max = 255\n l_binary = np.zeros_like(l_channel)\n l_binary[(l_channel >= l_thresh_min) & (l_channel <= l_thresh_max)] = 1\n \n '''\n s_thres_min = 180\n s_thres_max = 255\n s_binary = np.zeros_like(s_channel)\n s_binary[(s_channel >= s_thres_min) & (s_channel <= s_thres_max)] = 1\n '''\n \n combined_binary = np.zeros_like(b_binary)\n combined_binary[(l_binary == 1) | (b_binary == 1)] = 1\n \n \n return combined_binary\n\n\n#_______________________________________________________________________________\n'''\ndef abs_sobel_thresh(image, orient='x', sobel_kernel=3, thresh=(150, 255)):\n gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)\n if orient == 'x':\n abs_sobel = np.absolute(cv2.Sobel(gray, cv2.CV_64F, 1, 0))\n elif orient == 'y':\n abs_sobel = np.absolute(cv2.Sobel(gray, cv2.CV_64F, 0, 1))\n # Rescale back to 8 bit integer\n scaled_sobel = np.uint8(255*abs_sobel/np.max(abs_sobel))\n # Create a copy and apply the threshold\n grad_binary = np.zeros_like(scaled_sobel)\n # Here I'm using inclusive (>=, <=) thresholds, but exclusive is ok too\n grad_binary[(scaled_sobel >= thresh[0]) & (scaled_sobel <= thresh[1])] = 1\n\n return grad_binary\n\ndef mag_thresh(image, sobel_kernel=3, mag_thresh=(0, 255)):\n # Convert to grayscale\n gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)\n # Take both Sobel x and y gradients\n sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel)\n sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel)\n # Calculate the gradient magnitude\n gradmag = np.sqrt(sobelx**2 + sobely**2)\n # Rescale to 8 bit\n scale_factor = np.max(gradmag)/255 \n gradmag = (gradmag/scale_factor).astype(np.uint8) \n # Create a binary image of ones where threshold is met, zeros otherwise\n mag_binary = np.zeros_like(gradmag)\n mag_binary[(gradmag >= mag_thresh[0]) & (gradmag <= mag_thresh[1])] = 1\n\n # Return the binary image\n return mag_binary\n\n\ndef dir_threshold(image, sobel_kernel=3, thresh=(0, np.pi/2)):\n # Grayscale\n gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)\n # Calculate the x and y gradients\n sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel)\n sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel)\n # Take the absolute value before extracting the gradient direction, \n # apply a threshold, and create a binary image result\n absgraddir = np.arctan2(np.absolute(sobely), np.absolute(sobelx))\n dir_binary = np.zeros_like(absgraddir)\n dir_binary[(absgraddir >= thresh[0]) & (absgraddir <= thresh[1])] = 1\n\n return dir_binary\n\n\ndef hls_select(img, thresh=(150, 255)):\n hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)\n s_channel = hls[:,:,2]\n hls_binary = np.zeros_like(s_channel)\n hls_binary[(s_channel > thresh[0]) & (s_channel <= thresh[1])] = 1\n return hls_binary\n\n\ndef color_threshold(img, ksize = 3):\n # Apply each of the thresholding functions\n gradx = abs_sobel_thresh(img, orient='x', sobel_kernel=ksize, thresh=(30, 100))\n grady = abs_sobel_thresh(img, orient='y', sobel_kernel=ksize, thresh=(30, 100))\n mag_binary = mag_thresh(img, sobel_kernel=ksize, mag_thresh=(30, 100))\n dir_binary = dir_threshold(img, sobel_kernel=ksize, thresh=(0, np.pi/2))\n hls_binary = hls_select(img, thresh=(180,255))\n combined = np.zeros_like(dir_binary)\n combined[((gradx == 1) & (grady == 1)) | ((mag_binary == 1) & (dir_binary == 1))|(hls_binary==1)] = 1\n \n return combined\n \n\n\n#_______________________________________________________________________________\n'''\ndef undistort_img(image):\n # Undistort test image\n undist_image = cv2.undistort(image, camera_mtx, camera_dist, None, camera_mtx)\n\n img_size = (undist_image.shape[1], undist_image.shape[0])\n \n # Lane line vertices\n # Upper and low are based on visual locations, not grid locations\n center_x = img_size[0]//2\n upper_y = img_size[1]//1.5 \n low_y = img_size[1]\n upper_left_x = center_x//1.33 #1.33\n upper_right_x = center_x//0.8 #0.8\n low_left_x = 0\n low_right_x = img_size[0] # 2*center_x\n \n '''\n center_x = img_size[0]//2\n upper_y = img_size[1]//1.5\n low_y = img_size[1]\n upper_left_x = center_x//1.33\n upper_right_x = center_x//0.80\n low_left_x = 0\n low_right_x = 2*center_x\n \n '''\n\n \n \n # Calculate source points based on fractions of imade dimensions\n src_corners = np.float32([[low_left_x, low_y], \n [upper_left_x, upper_y], \n [upper_right_x, upper_y],\n [low_right_x, low_y]])\n \n \n # Calculate destination points based on entire image's dimensions.\n \n dst_corners = np.float32([[0, img_size[1]],\n [0, 0],\n [img_size[0],0],\n [img_size[0], img_size[1]]])\n \n \n return undist_image, src_corners, dst_corners\n\n\n\ndef perspective_transform(image):\n # Calculate perspective transform\n \n undist_image, src_corners, dst_corners = undistort_img(image) \n \n img_size = (undist_image.shape[1], undist_image.shape[0])\n \n M = cv2.getPerspectiveTransform(src_corners, dst_corners)\n\n warped = cv2.warpPerspective(undist_image, M, img_size)\n \n M_inv = cv2.getPerspectiveTransform(dst_corners, src_corners)\n\n # Draw points and lines to mark region for transform\n \n '''\n for i in range(4):\n \n cv2.circle(undist_image, (src_corners[i,0], src_corners[i,1]), 6, (255, 0, 0), 6)\n for i in range(4):\n \n cv2.line(undist_image, \n (src_corners[i-1,0], src_corners[i-1,1]), \n (src_corners[i,0], src_corners[i,1]), \n (0,255,0), 2)\n '''\n \n return warped, M_inv\n \n \n \n### Color Thresholding on Bird-Eye View\n\ndef thresholded_img(image):\n # Image thresholding\n warped, M_inv = perspective_transform(image)\n blurred_warped = cv2.GaussianBlur(warped,(5,5),0)\n\n # Color Thresholding\n combined_color_binary = color_threshold(blurred_warped)\n \n return combined_color_binary, M_inv \n \n\n### Lane Finding\ndef lane_coordinates(image):\n \n # Define image height and window size\n img_dim_y = image.shape[0]\n img_dim_x = image.shape[1]\n img_slices = 10\n win_size = img_dim_y//img_slices\n \n # Histogram\n indexes = []\n #hist_val = []\n \n for i in range(img_slices):\n histogram = np.mean(image[(img_slices-i-1)*win_size:(img_slices-i)*win_size,:], axis=0)\n \n # Histogram search area\n left_half_min = 0\n left_half_max = img_dim_x//2.5\n right_half = img_dim_x-(img_dim_x//2.2)\n \n # Histogram peaks\n hist_peaksl = np.argmax(histogram[int(left_half_min):int(left_half_max)])\n hist_peaksr = np.argmax(histogram[int(right_half):])\n \n # Reduce noise\n if histogram[int(hist_peaksl+left_half_min)] > 0.01:\n hist_peaksl = int(hist_peaksl+left_half_min)\n else:\n hist_peaksl = 0\n \n if histogram[int(hist_peaksr + right_half)] > 0.01:\n hist_peaksr = int(hist_peaksr + right_half)\n else:\n hist_peaksr = 0\n \n # Append indices\n indexes.append([hist_peaksl, hist_peaksr])\n \n \n left_lane_idx = []\n right_lane_idx = []\n \n \n for i in range(img_slices):\n # Define window y positions\n win_y1 = (img_slices-i)*win_size\n win_y2 = (img_slices-i)*win_size - win_size \n\n # Draw boxes and get indices of thresholded lane points\n # Left Lane\n if (indexes[i][0] != 0):\n # Define window x positions\n win_x1l = indexes[i][0] - (win_size//2)\n win_x2l = indexes[i][0] + (win_size//2)\n \n # Identify lane points where line was detected\n left_lane_idx_local = np.argwhere(image[win_y2:win_y1, win_x1l:win_x2l] > 0)\n # Append to list of lane indices and apply frame of reference transformation\n left_lane_idx.append(left_lane_idx_local + [win_y2,win_x1l])\n\n # Right lane\n if(indexes[i][1] != 0):\n # Define window x positions\n win_x1r = indexes[i][1] - (win_size//2)\n win_x2r = indexes[i][1] + (win_size//2)\n \n # Identify lane points where line was detected\n right_lane_idx_local = np.argwhere(image[win_y2:win_y1, win_x1r:win_x2r] > 0)\n \n # Append to list of lane indices and apply frame of reference transformation\n right_lane_idx.append(right_lane_idx_local + [win_y2,win_x1r]) \n \n # Concatenate all lane points to respective lane variables\n left_lane_idx = np.concatenate(left_lane_idx)\n right_lane_idx = np.concatenate(right_lane_idx)\n\n \n return left_lane_idx, right_lane_idx\n \n \n### Draw Lane Points on new image\ndef draw_lane_points(img, left_lane_idx, right_lane_idx):\n \n new_img = np.zeros_like(img)\n for i in range(len(left_lane_idx)):\n for j in range(len(left_lane_idx[i])):\n cv2.circle(new_img, (left_lane_idx[i][1],left_lane_idx[i][0]), 1, (255, 255, 0), 1)\n\n for i in range(len(right_lane_idx)):\n cv2.circle(new_img, (right_lane_idx[i][1],right_lane_idx[i][0]), 1, (255, 0, 0), 1)\n \n return new_img\n \n### Fit lane lines\n\ndef identify_lane(left_lane_idx, right_lane_idx, img_size):\n \n global prev_right_fit, right_fit_count\n \n # Obtain individual set of coordinates for each detected lane \n left_lane_y = np.array([item[0] for item in left_lane_idx])\n left_lane_x = np.array([item[1] for item in left_lane_idx])\n right_lane_y = np.array([item[0] for item in right_lane_idx])\n right_lane_x = np.array([item[1] for item in right_lane_idx])\n\n # Fit a second order polynomial to lane lines\n left_fit = np.polyfit(left_lane_y , left_lane_x, 2)\n left_fit_x = left_fit[0]*left_lane_y **2 + left_fit[1]*left_lane_y + left_fit[2]\n \n right_fit = np.polyfit(right_lane_y, right_lane_x, 2)\n right_fit_x = right_fit[0]*right_lane_y**2 + right_fit[1]*right_lane_y + right_fit[2] \n\n \n # Extrapolation of left lane\n top_left_y = top_right_y = 0\n bottom_left_y = bottom_right_y = img_size[1]\n \n top_left_x = left_fit[0]*top_left_y**2 + left_fit[1]*top_left_y + left_fit[2]\n bottom_left_x = left_fit[0]*bottom_left_y**2 + left_fit[1]*bottom_left_y + left_fit[2]\n \n left_fit_x = np.append(np.flipud(left_fit_x), top_left_x)\n left_lane_y = np.append(np.flipud(left_lane_y), top_left_y)\n \n left_fit_x = np.append(np.flipud(left_fit_x), bottom_left_x)\n left_lane_y = np.append(np.flipud(left_lane_y), bottom_left_y)\n \n \n # Use previous frames to keep track of right lane\n if len(right_lane_x)<1000 and right_fit_count==20:\n right_fit = prev_right_fit/20\n right_fit_count = 0\n \n elif right_fit_count == 0:\n prev_right_fit = right_fit\n \n elif 0 < right_fit_count < 20:\n right_fit_count += 1\n prev_right_fit += right_fit\n \n # Extrapolation of right lane\n top_right_x = right_fit[0]*top_right_y**2 + right_fit[1]*top_right_y + right_fit[2]\n bottom_right_x = right_fit[0]*bottom_right_y**2 + right_fit[1]*bottom_right_y + right_fit[2]\n \n right_fit_x = np.append(np.flipud(right_fit_x), top_right_x)\n right_lane_y = np.append(np.flipud(right_lane_y), top_right_y)\n\n right_lane_y = np.append(np.flipud(right_lane_y), bottom_right_y)\n right_fit_x = np.append(np.flipud(right_fit_x), bottom_right_x)\n \n return left_lane_y, right_lane_y, left_fit_x, right_fit_x, left_fit, right_fit\n \n\n# Draw curved lines on image\ndef draw_curved_line(img, line_fit):\n p = np.poly1d(line_fit)\n x = list(range(0, img.shape[0]))\n y = list(map(int, p(x)))\n points = np.array([[y1,x1] for x1, y1 in zip(x, y)])\n points = points.reshape((-1,1,2))\n \n out_img = cv2.polylines(img, np.int32([points]), False, color=(255,255,255), thickness=10)\n \n \n return out_img\n\n\n# Calculate lane curvature\ndef lane_curvature(lane_fit_x, lane_fit_y):\n \n # Define conversions in x and y from pixels space to meters\n ym_per_pix = 30/720 # meters per pixel in y dimension\n xm_per_pix = 3.7/700 # meteres per pixel in x dimension\n\n new_fit = np.polyfit(lane_fit_y*ym_per_pix, lane_fit_x*xm_per_pix, 2)\n \n y_eval = np.max(lane_fit_y)\n \n rad_curvature = ((1 + (2*new_fit[0]*y_eval*xm_per_pix + new_fit[1])**2)**1.5)/np.absolute(2*new_fit[0])\n \n return rad_curvature\n\n\n\n# Calculate offset from lane center\ndef distance_from_lane(img, left_lane, right_lane):\n \n xm_per_pix = 3.7/700 # meteres per pixel in x dimension\n \n img_center = (img.shape[1]//2, img.shape[0])\n \n car_pos = ((left_lane[-1] + right_lane[-1])//2 - img_center[0]) * xm_per_pix\n \n return car_pos\n\n# Plot lane over original image\n\ndef draw_lane_line(image, left_lane_y, right_lane_y, left_fit_x, right_fit_x, M_inv):\n \n global frame_count\n frame_count += 1\n\n # Concatenate lane points\n combined_lane_left = np.array([np.flipud((np.transpose(np.vstack((left_fit_x,left_lane_y)))))])\n combined_lane_right = np.array([np.transpose(np.vstack((right_fit_x,right_lane_y)))])\n combined_lane_idx = np.hstack((combined_lane_left,combined_lane_right))\n \n # Draw lane lines and fill lane area\n img_draw = np.zeros_like(image)\n cv2.polylines(img_draw, np.int_([combined_lane_idx]), isClosed=False, color=(255,0,0), thickness = 40)\n cv2.fillPoly(img_draw, np.int_([combined_lane_idx]), (0,255,0))\n \n # Unwarp transformed image\n new_warp = cv2.warpPerspective(img_draw, M_inv, (image.shape[1], image.shape[0]))\n new_img = cv2.addWeighted(image, 1, new_warp, 0.5, 0)\n \n \n '''\n \n # Get Radius of Curvature\n left_lane_rad = lane_curvature(left_fit_x, left_lane_y)\n right_lane_rad = lane_curvature(right_fit_x, right_lane_y)\n \n \n average_lane_rad = (left_lane_rad+right_lane_rad)/2\n \n \n # Overlay Radius of Curvature (text) \n \n \n cv2.putText(new_img, \"Left Lane Radius: \" + str(\"%5d\" % left_lane_rad) + \" metres\", (100, 100), \n cv2.FONT_HERSHEY_SIMPLEX, 1.2, (0,255,0))\n \n \n \n cv2.putText(new_img, \"Right Lane Radius: \" + str(\"%5d\" % right_lane_rad) + \" metres\", (100, 140), \n cv2.FONT_HERSHEY_SIMPLEX, 1.2, (255,255,255))\n \n cv2.putText(new_img, \"Lane Radius: \" + str(\"%5d\" % average_lane_rad) + \" metres\", (100, 100), \n cv2.FONT_HERSHEY_SIMPLEX, 1.2, (255,255,255))\n '''\n \n \n # Get car position\n vehicle_pos = distance_from_lane(new_img, left_fit_x, right_fit_x)\n \n # Overlay car position (text)\n\n \n cv2.putText(new_img, \"Distance from Road Centre: \" + str(\"%.3f\" % vehicle_pos) + \" metres\", (100, 200), \n cv2.FONT_HERSHEY_SIMPLEX, 1.2, (0,0,255))\n \n return new_img\n\n### Video pipeline\ndef pipeline(image):\n \n # Threshold Image\n thresholded_image, M_inv = thresholded_img(image)\n img_size = [image.shape[1], image.shape[0]]\n \n # Obtain lane coordinates\n left_lane_idx, right_lane_idx = lane_coordinates(thresholded_image)\n \n # Identify Lane Lines\n left_lane_y, right_lane_y, left_fit_x, right_fit_x, left_fit, right_fit = identify_lane(left_lane_idx, \n right_lane_idx, img_size)\n \n # Draw lane lines\n final_img = draw_lane_line(image, left_lane_y, right_lane_y, left_fit_x, right_fit_x, M_inv)\n\n \n return final_img\n\n\n\ndef save():\n # Save the camera calibration result\n dist_pickle = {}\n dist_pickle[\"mtx\"] = camera_mtx\n dist_pickle[\"dist\"] = camera_dist\n dist_pickle[\"objpoints\"] = objpoints\n dist_pickle[\"imgpoints\"] = imgpoints\n pickle.dump( dist_pickle, open( \"dist_pickle.p\", \"wb\" ) )\n \n return 0\n \n \ndef load():\n \n # Read in the saved objpoints and imgpoints\n dist_pickle = pickle.load( open( \"dist_pickle.p\", \"rb\" ) )\n camera_mtx = dist_pickle[\"mtx\"]\n camera_dist = dist_pickle[\"dist\"]\n objpoints = dist_pickle[\"objpoints\"]\n imgpoints = dist_pickle[\"imgpoints\"]\n \n return 0\n\n'''\ndef make_video(video_path, file_out, ending):\n\n output = file_out\n length = ending\n clip1 = VideoFileClip(video_path).subclip(0,length)\n # NOTE: this function expects color images!!\n clip = clip1.fl_image(pipeline)\n clip.write_videofile(output, audio=False)\n\n'''\n\ndef make_video(video_path, file_out):\n\n output = file_out\n clip1 = VideoFileClip(video_path)\n clip = clip1.fl_image(pipeline)\n clip.write_videofile(output, audio=False)\n\n \n\n\n\n\n","sub_path":"Term_1/Project_4_Advanced_Lane_Finding/background.py","file_name":"background.py","file_ext":"py","file_size_in_byte":19947,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"331535115","text":"import math\n\ndef euler1():\n\tz = 0\n\tfor a in range(1,1000):\n\t\tif (a % 3) == 0 or (a % 5) == 0:\n\t\t\tz += a\n\treturn z\n\t\ndef euler2():\n\tz = 0\n\tfor x in range(33):\n\t\tif fibonacci(x) % 2 == 0:\n\t\t\tz += fibonacci(x)\n\treturn z\t\n\t\nfib = [1,1]\n\ndef even():\n\tfor x in range(10):\n\t\tif x % 2 == 0:\n\t\t\tprint(x)\n\ndef fibonacci_gen(i):\n\tglobal fib\n\tfor x in range(i):\n\t\tf1 = fib[x]\n\t\tf2 = fib[x+1]\n\t\tfib.append(f1 + f2)\n\treturn fib\n\t\nfibonacci_gen(31)\n\ndef fibonacci(n):\n\tif n < 2:\n\t\treturn n\n\treturn fib[n-2] + fib[n-1]\n\t\n#print(math.e ** (math.sqrt(163) * math.pi))\n\nprint('1. ' + str(euler1()))\nprint('2. ' + str(euler2()))","sub_path":"python/euler.py","file_name":"euler.py","file_ext":"py","file_size_in_byte":610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"105181039","text":"from django.urls import path\nfrom.views import HomeView, PostView, Add_BlogEntry, Edit_BlogEntry, Delete_Entry, Add_Category, View_By_Category, LikeView, Add_Comment\n\n\nurlpatterns = [\n path('', HomeView.as_view(), name='home'),\n path('article/', PostView.as_view(), name='blogpost'),\n path('add-blogentry/', Add_BlogEntry.as_view(), name='add-blogentry'),\n path('add-category/', Add_Category.as_view(), name='add-category'),\n path('article/edit/', Edit_BlogEntry.as_view(), name='edit-blogentry'),\n path('article//delete', Delete_Entry.as_view(), name='delete-entry'),\n path('category//', View_By_Category, name='category'),\n path('like/', LikeView, name='post_like'),\n path('article//comment', Add_Comment.as_view(), name='add-comment'),\n]\n","sub_path":"blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"196807987","text":"from flask import Blueprint\r\nfrom flask import jsonify, render_template, request\r\n\r\nimport os\r\nimport sys\r\nsys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))\r\n\r\nfrom .logger import SystemLogger\r\n\r\nfrom mail.mailer import Mailer\r\n\r\nlog = Blueprint('log', __name__)\r\nmail = Mailer()\r\nsystemLogger = SystemLogger()\r\n\r\n\r\n@log.route(\"/api/log/error\", methods=['GET'])\r\ndef send_err_file():\r\n attachment_file = systemLogger.zip_log_file()\r\n status_code = mail.send_email_with_attachment(\r\n {},\r\n attachment_file\r\n )\r\n print(status_code)\r\n\r\n if status_code == 202:\r\n msg = str(\"The error log has communicated to the developer! Please wait for a reply.\")\r\n return jsonify(msg=msg), 202\r\n else:\r\n msg = str(\"Unable to communicate error log!\")\r\n return jsonify(msg=msg), 500\r\n","sub_path":"src/Server/log/route.py","file_name":"route.py","file_ext":"py","file_size_in_byte":858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"582846045","text":"# *-* coding: utf-8 *-*\n\nfrom settings import *\nfrom rendering import *\n\n\n\n# Проходит лабиринт зная массив dist\ndef solve(dist, fin_i, fin_j):\n global a, n, m, dx1, dy1\n \n # Прямой порядок изменения для анимации\n to_change = [(n + 2, m + 1), (n + 3, m + 1)]\n \n i = fin_i\n j = fin_j\n while not (i == 2 and j == 2):\n for k in range(4):\n ii, jj = i + dx1[k], j + dy1[k]\n if dist[ii][jj] + 1 == dist[i][j]:\n to_change.append((i, j))\n i, j = ii, jj\n break\n to_change += [(0, 2), (1, 2), (2, 2)]\n \n # Отрисовка\n for cell in to_change[::-1]:\n i, j = cell[0], cell[1]\n a[i][j] = 3\n if rendering_solve: update_one(i, j) # <<<---\n\n\n# Находит самую удалённую от входа клетку\ndef max_cell(dist):\n global n, m\n \n max_i = max_j = 2\n for i in range(n + 4):\n for j in range(m + 4):\n if dist[i][j] > dist[max_i][max_j]:\n max_i, max_j, = i, j\n if max_i == n + 1 and max_j == m + 1:\n max_i = n + 3\n \n return (max_i, max_j)","sub_path":"src/solver.py","file_name":"solver.py","file_ext":"py","file_size_in_byte":1197,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"10502857","text":"import keras\nimport numpy as np\nfrom keras import backend as K\nfrom keras.models import Sequential, Model, Input\nfrom keras.layers import Input, ConvLSTM2D, Lambda, MaxPooling3D\nfrom keras.layers.core import Dense, Dropout, Activation, Flatten, Lambda, Reshape, Permute\nfrom keras.layers.convolutional import Convolution1D, Convolution2D, MaxPooling1D, MaxPooling2D\nfrom keras.layers.recurrent import GRU, LSTM\nfrom keras.layers.advanced_activations import ELU\nfrom keras.layers.normalization import BatchNormalization\nfrom keras.utils import np_utils\nfrom keras.callbacks import EarlyStopping, ModelCheckpoint\nfrom keras.constraints import max_norm\nfrom numpy import newaxis\nfrom keras.optimizers import SGD,Adam,Adagrad,Adadelta,RMSprop\nfrom utils.customCallbacks import MyEarlyStopping, MyModelCheckpoint\nfrom datetime import datetime\nimport tensorflow as tf\n\ndef outer_product(x):\n\t\"\"\"\n\tx list of 2 tensors, assuming each of which has shape = (size_minibatch, total_pixels, size_filter)\n\t\"\"\"\n\treturn keras.backend.batch_dot(x[0], x[1], axes=[1,1]) / x[0].get_shape().as_list()[1] \n\t#return tf.einsum('bom,bpm->bmop', x[0], x[1])\n\n# loading cnn model\ndef load_cnn_model(weights_path, X_train_shape, nb_classes):\n\tfrom .create_cnn_model import create_cnn_model\n\tmodel = create_cnn_model(X_train_shape, nb_classes)\n\tmodel.load_weights(weights_path)\n\treturn model\n\n# Create BCNN\ndef create_bcnn_model(X_train_shape, fold, nb_classes, ds, pp='specific'):\n\tcnn_weights = './{}_{}cls_{}_saved_models/stft_cnn_{}.hdf5'.format(ds, int(nb_classes), pp, fold)\n\tcnn_model = load_cnn_model(cnn_weights, X_train_shape, nb_classes)\n\n\tx_detector = cnn_model.layers[-8].output\n\tshape_detector = cnn_model.layers[-8].output_shape\n\tx_detector = Reshape((shape_detector[1]*shape_detector[2], shape_detector[3]))(x_detector)\n\tx_extractor = x_detector\n\t\n\tx = keras.layers.Lambda(outer_product)([x_detector, x_extractor])\n\tx = Flatten()(x)\n\t##\n\tx = BatchNormalization(axis=-1)(x)\n\t##\n\tx = Dropout(0.5)(x)\n\tx = Dense(256, activation='sigmoid')(x)\n\tx = Dropout(0.5)(x)\n\tpreds = Dense(nb_classes, activation = 'softmax')(x) \n\tmodel = Model(inputs=cnn_model.input,outputs=preds)\n\t\n\t# make sure none of the CNN or RNN layers are trained, only final FC layer is trained\n\tfor layer in cnn_model.layers:\n\t\tlayer.trainable = False\n\t\n\treturn model\n\ndef train_bcnn_model(n_folds, nb_classes, ds, pp):\n\tsummary = []\n\tfold = 1\n\n\tfor X_train, Y_train, X_val, Y_val, X_test, Y_test in n_folds:\n\t\tbcnn_model = create_bcnn_model(X_train.shape, fold, nb_classes, ds, pp)\n\t\tbcnn_model.summary()\n\t\tadam = Adam(lr=1e-4, beta_1=0.9, beta_2=0.999, epsilon=1e-08)\n\t\tbcnn_model.compile(loss='categorical_crossentropy', optimizer=adam, metrics=['accuracy'])\n\t\tclass_weights = {}\n\t\tfor i in range(nb_classes):\n\t\t\tclass_weights[i] = X_train.shape[0] / (np.sum(Y_train==i) + 1e-6)\n\n\t\tY_train = Y_train.astype('uint8')\n\t\tY_train = np_utils.to_categorical(Y_train, nb_classes)\n\t\tY_val = np_utils.to_categorical(Y_val, nb_classes)\n\t\tprint('Shape: x_train, y_train, X_val, y_val, X_test, y_test')\n\t\tprint(X_train.shape, Y_train.shape, X_val.shape, Y_val.shape, X_test.shape, Y_test.shape)\n\n\t\tfilename = \"./{}_{}cls_{}_saved_models/stft_bcnn_{}.hdf5\".format(ds, int(nb_classes), pp, fold)\n\n\t\tearly_stop = MyEarlyStopping(patience=10, verbose=1)\n\t\tcheckpointer = MyModelCheckpoint(filename, verbose=1, save_best_only=True)\n\n\t\tbcnn_model.fit(X_train, Y_train, batch_size=32, epochs=200, class_weight=class_weights, \n\t\t\tvalidation_data=(X_val, Y_val), callbacks=[early_stop, checkpointer])\n\n\t\tbcnn_model.load_weights(filename)\n\t\tpredictions = bcnn_model.predict(X_test, verbose=1)\n\t\ty_pred = np_utils.to_categorical(np.argmax(predictions, axis=1), nb_classes)\n\t\ty_true = np_utils.to_categorical(Y_test, nb_classes)\n\n\t\tfrom sklearn.metrics import f1_score\n\t\tf1_test = f1_score(y_true, y_pred, average='weighted')\n\t\tprint('Test F1-weighted score is:', f1_test)\n\n\t\tsummary.append(f1_test)\n\t\tprint (summary)\n\n\t\tnow = datetime.now()\n\t\tdate_time = now.strftime(\"%m/%d/%Y, %H:%M:%S\")\n\t\tsummary.append(date_time) \n\t\twith open('./{}_{}cls_{}_training_history/bcnn_training_history.txt'.format(ds, int(nb_classes), pp), 'w') as f:\n\t\t\tfor item in summary:\n\t\t\t\tf.write(\"%s\\n\" % item)\n\t\t\t\t\n\t\tfold += 1","sub_path":"models/create_bcnn_model.py","file_name":"create_bcnn_model.py","file_ext":"py","file_size_in_byte":4265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"209721467","text":"import jwt\n# from view.response import Response\n# from config.redis_connection import RedisService\nfrom werkzeug.wrappers import Response\n\n\ndef response(success=False, message='something went wrong', data=[]):\n response = {'success': success,\n \"message\": message,\n \"data\": data, }\n return response\n\ndef is_authenticated(method):\n def authenticate_user(self,request):\n try:\n print(request.path, type(request.path))\n if request.path in ['/api/note']:\n\n token = request.headers['token']\n print(token)\n payload = jwt.decode(token, \"secret\", algorithms='HS256')\n print(payload)\n id_key = payload['id']\n print(id_key)\n # redis_obj = RedisService()\n # token = redis_obj.get(id_key)\n print(token, '------->token====..........>')\n if token is None:\n raise ValueError(\"You Need To Login First\")\n return method(self,request)\n else:\n return method(self,request)\n except jwt.ExpiredSignatureError:\n res = response(message=\"Signature expired. Please log in again.\")\n # Response(self).jsonResponse(status=404, data=res)\n Response(res)\n except jwt.DecodeError:\n res = response(message=\"DecodeError\")\n # Response(self).jsonResponse(status=404, data=res)\n Response(res)\n\n except jwt.InvalidTokenError:\n res = response(message=\"InvalidTokenError\")\n # Response(self).jsonResponse(status=404, data=res)\n Response(res)\n\n return authenticate_user\n","sub_path":"gateway/auth/login_authentication.py","file_name":"login_authentication.py","file_ext":"py","file_size_in_byte":1737,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"644374242","text":"import torch\nimport torch.nn as nn\n\n\n\nclass SELayer1d(nn.Module):\n def __init__(self, channel, reduction=2):\n super(SELayer1d, self).__init__()\n self.avg_pool = nn.AdaptiveAvgPool1d(1)\n self.fc = nn.Sequential(\n nn.Linear(channel, channel // reduction, bias=False),\n nn.ELU(inplace=True),\n nn.Linear(channel // reduction, channel, bias=False),\n nn.Sigmoid()\n )\n\n def forward(self, x):\n b, c, _ = x.size()\n y = self.avg_pool(x).view(b, c)\n y = self.fc(y).view(b, c, 1)\n return x * y.expand_as(x)\n\n\nclass SimpleNet1d(nn.Module):\n def __init__(self,in_channel=2064, num_classes=6):\n super(SimpleNet1d, self).__init__()\n\n p = 0.3\n m = 4\n self.conv1 = nn.Conv1d(in_channels=in_channel, out_channels=100*m, kernel_size=3, stride=1, padding=1)\n self.elu1 = nn.ELU()\n\n self.conv2 = nn.Conv1d(in_channels=100*m, out_channels=100*m, kernel_size=3, stride=1, padding=1)\n self.elu2 = nn.ELU()\n\n self.conv3 = nn.Conv1d(in_channels=100*m, out_channels=200*m, kernel_size=3, stride=1, padding=1)\n self.elu3 = nn.ELU()\n\n self.conv4 = nn.Conv1d(in_channels=200*m, out_channels=200*m, kernel_size=3, stride=1, padding=1)\n self.se4 = SELayer1d(200*m)\n self.elu4 = nn.ELU()\n self.dropout = nn.Dropout(p)\n\n self.conv5 = nn.Conv1d(in_channels=200*m, out_channels=6, kernel_size=3, stride=1, padding=1)\n\n def forward(self, input):\n output = self.conv1(input)\n #print('input size: ' + str(input.size()))\n output = self.elu1(output)\n #print(output.size())\n\n output = self.conv2(output)\n output = self.elu2(output)\n #print(output.size())\n\n output = self.conv3(output)\n output = self.elu3(output)\n #print(output.size())\n\n output = self.conv4(output)\n output = self.se4(output)\n output = self.elu4(output)\n #print(output.size())\n output = self.dropout(output)\n\n output = self.conv5(output)\n\n #print(output.size())\n #output = output.view(-1,6,)\n #print(output.size())\n return output\n\n\n\nclass SELayer3d(nn.Module):\n def __init__(self, channel, reduction=2):\n super(SELayer3d, self).__init__()\n self.avg_pool = nn.AdaptiveAvgPool3d(1)\n self.fc = nn.Sequential(\n nn.Linear(channel, channel // reduction, bias=False),\n nn.ELU(inplace=True),\n nn.Linear(channel // reduction, channel, bias=False),\n nn.Sigmoid()\n )\n\n def forward(self, x):\n b, c, _, _, _ = x.size()\n y = self.avg_pool(x).view(b, c)\n y = self.fc(y).view(b, c, 1, 1, 1)\n return x * y.expand_as(x)\n\nclass SimpleNet3d(nn.Module):\n def __init__(self,in_channel3d = 6, num_classes=6):\n super(SimpleNet3d, self).__init__()\n\n m3d = 1\n\n self.conv3d1 = nn.Conv3d(in_channels=in_channel3d, out_channels=100*m3d, kernel_size= (3,1,1), stride=1, padding=(1,0,0))\n self.relu3d1 = nn.ReLU()\n\n self.conv3d2 = nn.Conv3d(in_channels=100*m3d, out_channels=200*m3d, kernel_size= (1,3,3), stride=(1,2,2), padding=(0,1,1))\n self.relu3d2 = nn.ReLU()\n\n\n self.conv3d3 = nn.Conv3d(in_channels=200*m3d, out_channels=200*m3d, kernel_size= (3,3,3), stride=(1,2,2), padding=(1,1,1))\n self.se3d3 = SELayer3d(200*m3d)\n self.relu3d3 = nn.ReLU()\n\n self.pooling2d = nn.AdaptiveAvgPool3d((None, 1, 1))\n\n # last classifier\n self.dropout = nn.Dropout(0.3)\n self.lastconv = nn.Conv1d(in_channels=200*m3d, out_channels=6, kernel_size=3, stride=1, padding=1)\n\n def forward(self, input3d):\n\n # branch 3D\n output3d = self.conv3d1(input3d)\n output3d = self.relu3d1(output3d)\n\n output3d = self.conv3d2(output3d)\n output3d = self.relu3d2(output3d)\n\n output3d = self.conv3d3(output3d)\n output3d = self.se3d3(output3d)\n output3d = self.relu3d3(output3d)\n\n output3d = self.pooling2d(output3d)\n\n\n output3d = torch.squeeze(output3d)\n\n\n output = self.dropout(output3d)\n\n output = self.lastconv(output)\n\n return output\n\n\nclass SimpleNet(nn.Module):\n def __init__(self,in_channel3d = 6, num_classes=6):\n super(SimpleNet, self).__init__()\n self.net3d = SimpleNet3d()\n self.net1d = SimpleNet1d()\n\n def forward(self, input1d, input3d):\n b, c, k, _, _ = input3d.size()\n\n\n output1d = self.net1d(input1d)\n input3d = input3d + (output1d.view(b, c, k, 1, 1)).expand_as(input3d)\n output = self.net3d(input3d) + output1d\n\n return output\n\n\n\nif __name__ == \"__main__\":\n import torch\n import logging\n logging.getLogger().setLevel(logging.DEBUG)\n # ---------\n net = SimpleNet()\n print (net)\n data1 = torch.autograd.Variable(torch.randn(2, 2054, 32))\n data2 = torch.autograd.Variable(torch.randn(2, 6, 32, 14, 14))\n output = net(data1, data2)\n torch.save({'state_dict': net.state_dict()}, './tmp.pth')\n print (output.shape)\n\n","sub_path":"cls_3/src/cnn/models/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":5130,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"219907073","text":"import os\nimport re\n\nfrom ixiacr.lib import IxiaLogger\n\nixiacrlogger = IxiaLogger(__name__)\n\n\n# ##\n# Utility functions / classes\n# ##\n\ndef enum(*sequential, **named):\n enums = dict(zip(sequential, range(len(sequential))), **named)\n reverse = dict((value, key) for key, value in enums.iteritems())\n enums['reverse_mapping'] = reverse\n return type('Enum', (), enums)\n\n\ndef is_number(x):\n try:\n float(x)\n return True\n except ValueError:\n return False\n\n\ndef is_integer(x):\n return isinstance(x, (int, long))\n\ndef gramatically_correct_list_join(l):\n if not isinstance(l, list):\n try:\n l = list(l)\n except:\n raise TypeError('%s is not list like' % l)\n\n if not len(l):\n return ''\n elif len(l) == 1:\n return l[0]\n elif len(l) == 2:\n return ' and '.join(l)\n else:\n return (', and ').join([(', ').join(l[:-1]), l[-1]])\n\n\n# ##\n# Base classes\n# ##\n\nclass BaseList(list):\n \"\"\"\n Abstract instance of a type specific list\n \"\"\"\n\n def __init__(self, items=None):\n if items:\n for item in items:\n if self._is_valid(item):\n self.append(item)\n\n def append(self, value):\n if self._is_valid(value):\n list.append(self, value)\n\n def __setitem__(self, index, value):\n if self._is_valid(value):\n list.__setitem__(self, index, value)\n\n def _is_valid(self, item):\n raise NotImplementedError('You should implement this')\n\n\nclass BaseObject(object):\n \"\"\"\n Generic object used for test modeling\n \"\"\"\n\n def __init__(self, **kwargs):\n self.id = id(self)\n self.name = None\n\n for key, value in kwargs.items():\n if hasattr(self, key):\n setattr(self, key, value)\n else:\n ixiacrlogger.debug('Unrecognized attribute in BaseObject engine '\n 'model FIXME: %s, for %r' % (key, self))\n\n @property\n def version(self):\n return 1\n\n @property\n def is_valid(self):\n errors = list()\n self._do_validation(errors, self.__class__.__name__)\n\n if len(errors):\n return False\n else:\n return True\n\n def _do_validation(self, errors, prefix=None):\n pass\n\n def __repr__(self):\n attrs = dict()\n for attr in dir(self):\n if attr.startswith('_'):\n continue\n\n if not callable(getattr(self, attr)):\n attrs[attr] = getattr(self, attr)\n\n return '%s: %s' % (type(self), attrs)\n\n def __str__(self):\n if self.name:\n return self.name\n else:\n return type(self).__name__\n\n\nclass BaseTest(BaseObject):\n \"\"\"\n Generic Test object\n \"\"\"\n\n def __init__(self, **kwargs):\n self.enable_qos_results = False\n super(BaseTest, self).__init__(**kwargs)\n\n @property\n def errors(self):\n # Use do_validation to get current errors\n errors = list()\n self._do_validation(errors, 'Test')\n return errors\n\n def _do_validation(self, errors, prefix=None):\n prefix = '%s:%s' % (prefix, 'Test') if prefix else 'Test'\n\n\n###\n# Test Objects\n###\n\nclass SimpleTest(BaseTest):\n def __init__(self, **kwargs):\n self.duration = 60\n super(SimpleTest, self).__init__(**kwargs)\n\n def actionize(self, duration):\n for playlist in self.playlists:\n # Empty any current actions\n while len(playlist.actions):\n playlist.actions.pop(0)","sub_path":"IxiaCR/ixiacr/lib/engines/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"370844131","text":"# 用创建子弹的方式,创建敌人\nimport tkinter as tk, time, threading, random\nfrom PIL import Image, ImageTk\n\nwindow = tk.Tk()\nwindow.title('飞机大战')\n\n# 创建一张画布\ncanvas = tk.Canvas(window, width=480, height=800)\ncanvas.pack()\n\n# 创建一个图片对象\nimg_bg = Image.open(\"Images/background.png\")\n# 重制图片大小\nimg_bg = img_bg.resize((480, 852), Image.ANTIALIAS)\n# 创建相框\npimg_bg = ImageTk.PhotoImage(img_bg)\n# 用画布,将pimg_bg画出来\n# 100,100是pimg_gb绘制时中心点位置\ncanvas.create_image(480 / 2, 852 / 2, image=pimg_bg)\n\n# 创建飞机\n# 创建一个图片对象\nimg_hero = Image.open(\"Images/hero1.png\")\n# 重制图片大小\nimg_hero = img_hero.resize((102, 126), Image.ANTIALIAS)\n# 创建相框\npimg_hero = ImageTk.PhotoImage(img_hero)\n# 用画布,将pimg_bg画出来\n# 100,100是pimg_gb绘制时中心点位置\nhero = canvas.create_image(480 / 2, 852 - 200, image=pimg_hero)\n\n# 飞行控制\njet_x = 480 / 2\ndef fly_control(event):\n global jet_x\n x, y = 0, 0\n speed = 2\n if event.char == \"a\":\n x = -1 * speed\n if event.char == \"d\":\n x = 1 * speed\n jet_x += x\n\n # pimg_hero = ImageTk.PhotoImage(img_hero)\n canvas.move(hero, x, y)\n\n\n# 设置键盘监听\nentry = tk.Frame(window)\nentry.bind('', fly_control)\nentry.focus_set()\nentry.pack()\n\n\nimg_b = Image.open(\"Images/bullet1.png\")\n# 重制图片大小\nimg_b = img_b.resize((10, 22), Image.ANTIALIAS)\n# 创建相框\npimg_b = ImageTk.PhotoImage(img_b)\n# 用画布,将pimg_bg画出来\n# 100,100是pimg_gb绘制时中心点位置\nb_list = []\nfor i in range(10):\n #将创建的子弹添加到b_list里\n b_list.append([canvas.create_image(480 / 2, -200, image=pimg_b), 480 / 2, -200])\n\n\nimg_e = Image.open(\"Images/enemy1.png\")\n# 重制图片大小\nimg_e = img_e.resize((57, 43), Image.ANTIALIAS)\n# 创建相框\npimg_e = ImageTk.PhotoImage(img_e)\n# 用画布,将pimg_bg画出来\n# 100,100是pimg_gb绘制时中心点位置\ne_list = []\nfor i in range(10):\n #将创建的子弹添加到b_list里\n e_list.append([canvas.create_image(480 / 2, 1000, image=pimg_e), 480 / 2, 1000])\n\n\nnum = 0\ndef do_action():\n global b_list, num\n while True:\n time.sleep(0.1)\n num += 1\n if num % 10 == 0:\n shoot()\n if num % 20 == 0:\n attack()\n\n for b in b_list:\n if b[-1] > -199:\n canvas.move(b[0], 0, -40)\n b[-1] += -40\n\n for e in e_list:\n if e[-1] < 999:\n canvas.move(e[0], 0, 10)\n e[-1] += 10\n\n\n# 启动线程\nt = threading.Thread(target=do_action)\nt.start()\n\n# 射击\ndef shoot():\n global b_list\n for b in b_list:\n if b[-1] < -100:\n canvas.move(b[0], jet_x - b[1], 600 - b[-1])\n b[-1] = 600\n b[1] = jet_x\n break\n\n# 发射一名敌人\ndef attack():\n global e_list\n for e in e_list:\n if e[-1] > 900:\n # 生成随机的左右坐标,这里会生成一个随机整数,范围20到460\n x = random.randint(20, 480-20)\n canvas.move(e[0], x - e[1], -200 - e[-1])\n e[-1] = -200\n e[1] = x\n break\n\n\nwindow.mainloop()\n","sub_path":"JetWar/JetWar-09.py","file_name":"JetWar-09.py","file_ext":"py","file_size_in_byte":3252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"218504331","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Course',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(default=b'', max_length=255)),\n ('location', models.CharField(default=b'', max_length=255)),\n ('access_study', models.CharField(default=b'DP', max_length=2, choices=[(b'DP', b'Diploma'), (b'DGG', b'Degree')])),\n ('access_type', models.CharField(default=b'FR', max_length=2, choices=[(b'FR', b'Free'), (b'TS', b'Test'), (b'PG', b'Programmated')])),\n ('lenguage', models.CharField(default=b'', max_length=255)),\n ('description', models.TextField(default=b'')),\n ],\n ),\n migrations.CreateModel(\n name='Exam',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(default=b'', max_length=255)),\n ('year', models.IntegerField()),\n ('cfu', models.IntegerField()),\n ('course', models.ForeignKey(to='app_easyuniversity.Course')),\n ],\n ),\n migrations.CreateModel(\n name='Review',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('title', models.CharField(max_length=255)),\n ('body', models.TextField()),\n ('created_time', models.DateTimeField(auto_now=True)),\n ('is_draft', models.BooleanField(default=True)),\n ('is_approved', models.BooleanField(default=False)),\n ],\n ),\n migrations.CreateModel(\n name='Student',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('picture', models.ImageField(default=b'man-avatar128x128.png', upload_to=b'profile_images', blank=True)),\n ('course', models.ForeignKey(to='app_easyuniversity.Course')),\n ('user', models.OneToOneField(to=settings.AUTH_USER_MODEL)),\n ],\n ),\n migrations.CreateModel(\n name='University',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(default=b'', max_length=255)),\n ('region', models.CharField(default=b'', max_length=255)),\n ('province', models.CharField(default=b'', max_length=2)),\n ('story', models.TextField(default=b'')),\n ('website', models.CharField(default=b'', max_length=255)),\n ('identity', models.TextField(default=b'')),\n ('logo_link_temp', models.CharField(default=b'', max_length=255)),\n ],\n ),\n migrations.CreateModel(\n name='ExamReview',\n fields=[\n ('review_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='app_easyuniversity.Review')),\n ('difficult', models.IntegerField()),\n ('course_valutation', models.IntegerField()),\n ('date', models.DateField()),\n ('valutation', models.CharField(max_length=255)),\n ('days', models.IntegerField()),\n ],\n bases=('app_easyuniversity.review',),\n ),\n migrations.AddField(\n model_name='review',\n name='created_by',\n field=models.ForeignKey(to='app_easyuniversity.Student'),\n ),\n migrations.AddField(\n model_name='course',\n name='university',\n field=models.ForeignKey(related_name='courses', to='app_easyuniversity.University'),\n ),\n ]\n","sub_path":"app_easyuniversity/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":4296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"38991217","text":"from HuffmanTree import *\nfrom operator import itemgetter\nimport os.path\nimport codecs\n\ninput_file_name = \"input.txt\"\ncode_lookup_table_file_name = \"code_lookup_table.txt\"\nrestored_input_file_name = \"restored_input_file.txt\"\ncompressed_asci_text_file_name = \"compressed_asci_text_file.txt\"\n\ndef build_code_dict():\n\n fp = open(code_lookup_table_file_name, 'r')\n contents = fp.read()\n fp.close()\n\n code_dict = {}\n char_list = []\n temp_code = \"\"\n length = len(contents)\n i = 0\n while i < (length - 3):\n temp_code = contents[i]\n char_list.append(i)\n temp_write = \"\"\n i += 2\n #Now in the codeword part\n while contents[i] == '0' or contents[i] == '1':\n if i >= (length - 1): break \n temp_write += contents[i]\n i += 1\n i += 1\n code_dict[temp_code] = temp_write \n\n return char_list, code_dict\n\n\n\ndef build_tree_from_codes(char_list, code_dict):\n\n huffman_tree = Tree(0)\n void_root = Node(None, -1, None, None, None, \"\")\n void_left = Node(None, -1, None, None, void_root, void_root.code_word + \"0\")\n void_right = Node(None, -1, None, None, void_root, void_root.code_word + \"1\")\n void_root.left = void_left\n void_root.right = void_right \n huffman_tree.add_node(void_root)\n huffman_tree.root = void_root\n \n for key in code_dict.keys():\n code = code_dict[key]\n huffman_tree.rebuild_tree(huffman_tree.root, key, code, 0)\n\n #huffman_tree.print_tree(huffman_tree.root)\n return huffman_tree\n\n\ndef get_encoded_file_contents():\n\n fp_encoded = codecs.open(compressed_asci_text_file_name, 'r', encoding='utf8')\n contents = fp_encoded.read()\n #print(\"Encoded:\\n'''\\n\" + contents + \"\\n'''\\n\")\n fp_encoded.close()\n\n ascii_read = \"\"\n for char in contents:\n binary = \"{0:b}\".format(ord(char))\n binary = \"0\"*(8 - len(binary)) + binary\n ascii_read += binary\n\n return ascii_read\n\ndef Decode(code_tree):\n ''' Restore input file from compressed hex file.\n Return true for successful, false for unsuccesful.\n '''\n contents = get_encoded_file_contents()\n\n #Navigate tree, associating codes with chars and writing to a total_write str\n i = 0\n temp_write = \"\"\n total_write = \"\"\n while i < len(contents):\n temp_write += contents[i]\n search_result = code_tree.search_tree(code_tree.root, temp_write, 0)\n if search_result != None:\n total_write += search_result\n temp_write = \"\"\n i += 1\n \n #Write the decoded text to a reconstructed text file\n fp_restore = open(restored_input_file_name, 'w')\n fp_restore.write(total_write)\n fp_restore.close()\n\n #Compare reconstructed text to input text\n fp_input = open(input_file_name, 'r')\n contents = fp_input.read()\n fp_input.close()\n\n return contents == total_write\n\n\ndef main():\n\n if not os.path.exists(code_lookup_table_file_name) or not os.path.exists(compressed_asci_text_file_name):\n raise(\"Exception: One or more of the required files do not exist\")\n return\n\n print(\"Starting to encode \\\"\" + compressed_asci_text_file_name + \"\\\"...\")\n char_list, code_dict = build_code_dict()\n \n print(\"Reconstructing the Huffman Encoding tree...\")\n code_tree = build_tree_from_codes(char_list, code_dict)\n print(\"Writing decoded output to \\\"\" + restored_input_file_name + \"\\\"...\")\n\n if Decode(code_tree):\n print(\"Successfully decoded the input file!\")\n else:\n print(\"Did not successfully decoded the input file!\")\nmain()\n","sub_path":"Decoder.py","file_name":"Decoder.py","file_ext":"py","file_size_in_byte":3593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"110769842","text":"\"\"\"\n\n 单向循环列表\n\"\"\"\n\n\nclass Node:\n def __init__(self, elem):\n self.elem = elem\n self.next = None\n\n\nclass SingleCycLinkList:\n def __init__(self):\n self._head = None\n\n def is_empty(self):\n return self._head is None\n\n def length(self):\n cur = self._head\n\n count = 0\n while cur is not None:\n count += 1\n cur = cur.next\n\n return count\n\n def travel(self):\n cur = self._head\n\n # 如果时循环链表则最后节点的next 是指向 head 的, 所以判断下一个节点不等于head 链表就没有到头\n while cur is not None:\n print(cur.elem)\n cur = cur.next\n\n if cur == self._head:\n break\n\n def add(self, elem):\n node = Node(elem)\n\n # 如果头节点为none\n if self.is_empty():\n self._head = node\n\n node.next = self._head\n\n else:\n node.next = self._head\n cur = self._head\n\n while cur.next != self._head:\n cur = cur.next\n\n # 将最后节点next 指向新加入的node ,形成环\n cur.next = node\n\n self._head = node\n\n def append(self, elem):\n node = Node(elem)\n\n if self.is_empty():\n self._head = node\n node.next = self._head\n\n else:\n cur = self._head\n\n while cur.next != self._head:\n cur = cur.next\n\n cur.next = node\n\n # 将新加入的尾节点指向头节点, 形成环\n node.next = self._head\n\n def insert(self, elem, pos):\n if pos <= 0:\n self.add(elem)\n\n elif pos > (self.length() - 1):\n self.append(elem)\n\n else:\n node = Node(elem)\n\n cur = self._head\n count = 0\n\n # 获取尾节点\n while count < (pos - 1):\n count += 1\n cur = cur.next\n\n # 将旧尾节点指向的节点交给新尾节点\n node.next = cur.next\n cur.next = node\n\n def remove(self, elem):\n if self.is_empty():\n return\n\n cur = self._head\n pre = None\n\n while cur.next != self._head:\n if cur.elem == elem:\n\n # 判断是否为头节点\n if cur == self._head:\n rear = self._head\n\n while rear.next != self._head:\n rear = rear.next\n\n self._head = cur.next\n rear.next = self._head\n\n else:\n pre.next = cur.next\n\n return\n\n else:\n pre = cur\n cur = cur.next\n\n if cur.elem == elem:\n if cur == self._head:\n self._head = None\n\n else:\n pre.next = self._head\n\n def search(self, elem):\n if self.is_empty():\n return False\n\n cur = self._head\n\n if cur.item == elem:\n return True\n\n while cur.next != self._head:\n cur = cur.next\n\n if cur.elem == elem:\n return True\n\n return False\n\n\nif __name__ == '__main__':\n ll = SingleCycLinkList()\n ll.add(1)\n ll.add(2)\n ll.append(3)\n ll.insert(2, 4)\n ll.insert(4, 5)\n ll.insert(0, 6)\n print(\"length:\", ll.length())\n\n ll.travel()\n print(ll.search(3))\n\n print(ll.search(7))\n\n ll.remove(1)\n print(\"length:\", ll.length())\n\n ll.travel()\n","sub_path":"05algstruct/structs/singlecyclinklist.py","file_name":"singlecyclinklist.py","file_ext":"py","file_size_in_byte":3575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"438645345","text":"# -*- coding: utf-8 -*-\n#\n# Copyright 2016 Mirantis, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom fuelclient import objects\nfrom fuelclient.v1 import base_v1\n\n\nclass SequenceClient(base_v1.BaseV1Client):\n _entity_wrapper = objects.Sequence\n\n executor_path = _entity_wrapper.instance_api_path + 'execute/'\n\n def create(self, release_id, name, graph_types):\n \"\"\"Creates new sequence object.\n\n :param release_id: the release object id\n :param name: the sequence name\n :param graph_types: the types of graphs\n :returns: created object\n \"\"\"\n data = {'name': name}\n graphs = data['graphs'] = []\n for graph_type in graph_types:\n graphs.append({'type': graph_type})\n\n return self.upload(release_id, data)\n\n def upload(self, release_id, data):\n \"\"\"Creates new sequence object from data.\n\n :param release_id: release object id\n :param data: the sequence properties\n :returns: created object\n \"\"\"\n url = self._entity_wrapper.class_api_path\n data['release'] = release_id\n return self.connection.post_request(url, data)\n\n def download(self, sequence_id):\n \"\"\"Get raw content of sequence.\"\"\"\n return super(SequenceClient, self).get_by_id(sequence_id)\n\n def update(self, sequence_id, name=None, graph_types=None):\n \"\"\"Updates existing object.\n\n :param sequence_id: the sequence object id\n :param name: new name\n :param graph_types: new graph types\n :returns: updated object or False if nothing to update\n \"\"\"\n data = {}\n if name:\n data['name'] = name\n if graph_types:\n graphs = data['graphs'] = []\n for graph_type in graph_types:\n graphs.append({'type': graph_type})\n\n if not data:\n return False\n\n url = self._entity_wrapper.instance_api_path.format(sequence_id)\n return self.connection.put_request(url, data)\n\n def get_by_id(self, sequence_id):\n \"\"\"Gets formatted sequence data by id.\"\"\"\n data = super(SequenceClient, self).get_by_id(sequence_id)\n data['graphs'] = ', '.join(g['type'] for g in data['graphs'])\n return data\n\n def delete_by_id(self, sequence_id):\n \"\"\"Deletes existed object.\n\n :param sequence_id: the sequence object id\n \"\"\"\n url = self._entity_wrapper.instance_api_path.format(sequence_id)\n self.connection.delete_request(url)\n\n def execute(self, sequence_id, env_id, **kwargs):\n \"\"\"Executes sequence on cluster.\n\n :param sequence_id: the sequence object id\n :param env_id: the cluster id\n :param kwargs: options - force, dry_run and noop.\n \"\"\"\n data = {'cluster': env_id}\n data.update(kwargs)\n url = self.executor_path.format(sequence_id)\n deploy_data = self.connection.post_request(url, data)\n return objects.DeployTask.init_with_data(deploy_data)\n\n\ndef get_client(connection):\n return SequenceClient(connection)\n","sub_path":"fuelclient/v1/sequence.py","file_name":"sequence.py","file_ext":"py","file_size_in_byte":3617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"610989255","text":"#!/usr/bin/env python\nimport sys\nimport struct\n\n\ndef main():\n if len(sys.argv) < 3:\n print(\"Usage: tfrecord2idx \")\n exit()\n\n infile = open(sys.argv[1], \"rb\")\n outfile = open(sys.argv[2], \"w\")\n\n while True:\n current = infile.tell()\n try:\n byte_len = infile.read(8)\n if len(byte_len) == 0:\n break\n infile.read(4)\n proto_len = struct.unpack(\"q\", byte_len)[0]\n infile.read(proto_len)\n infile.read(4)\n outfile.write(str(current) + \" \" + str(infile.tell() - current) + \"\\n\")\n except:\n print(\"Failed to parse TFRecord.\")\n break\n\n infile.close()\n outfile.close()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"tfrecord/tools/tfrecord2idx.py","file_name":"tfrecord2idx.py","file_ext":"py","file_size_in_byte":790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"424758001","text":"#!/usr/bin/env python\n\n# import reflex module\nimport reflex\n\nimport sys\n\nif __name__ == '__main__':\n\n # create an option parser\n parser = reflex.ReflexIOParser()\n\n # define inputs (Note: you must define at least long option)\n parser.add_input(\"-i\", \"--input1\")\n parser.add_input(\"-j\", \"--input2\")\n\n # define outputs (Note: you must define at least long option)\n parser.add_output(\"-o\", \"--output1\")\n parser.add_output(\"-p\", \"--output2\")\n\n # get inputs from the command line\n inputs = parser.get_inputs()\n # get output variables\n outputs = parser.get_outputs()\n\n # read inputs and assign outputs\n if hasattr(inputs, \"input1\"):\n outputs.output1 = inputs.input1\n else:\n outputs.output1 = 'test1'\n\n if hasattr(inputs, \"input2\"):\n outputs.output2 = inputs.input2\n else:\n outputs.output2 = 'test2'\n\n # print outputs\n parser.write_outputs()\n\n sys.exit()\n","sub_path":"reflexy/base/example.py","file_name":"example.py","file_ext":"py","file_size_in_byte":935,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"473806288","text":"'''\r\n WAP in python to accept a number from user and\r\n display table of that number.\r\n'''\r\n\r\nn=int(input(\"Enter a number\"))\r\n\r\ni=1\r\nwhile(i<=10):\r\n print(n*i) \r\n i=i+1\r\n","sub_path":"19.09.2020 python/while_demo5.py","file_name":"while_demo5.py","file_ext":"py","file_size_in_byte":182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"197700001","text":"from Database_connect import *\nfrom function_admin import *\nfrom user_display import *\nclass color:\n PURPLE = '\\033[95m'\n CYAN = '\\033[96m'\n DARKCYAN = '\\033[36m'\n BLUE = '\\033[94m'\n GREEN = '\\033[92m'\n YELLOW = '\\033[93m'\n RED = '\\033[91m'\n BOLD = '\\033[1m'\n UNDERLINE = '\\033[4m'\n END = '\\033[0m'\ndict={}\nwhile True:\n print(\" .................................................. \")\n print(\" .................................................. \")\n\n print(color.BLUE+color.BOLD + ' PHONE BOOK ' + color.END)\n print(\" .................................................. \")\n print(\" .................................................. \")\n print(\"1.User\\n2.Admin\\n3.Exit\")\n choice = int(input(\"Identify Yourself first\"))\n if choice ==1:\n if len(dict.keys())==0:\n print(\"nothing to show\")\n else:\n select_records()\n display_records()\n name=input(\"Enter name to search required phone number : \")\n ph_no=dict.get(name,\"NO data found\")\n print(ph_no)\n elif choice ==2:\n admin_login()\n elif choice ==3:\n break\n else:\n print(\"Invalid choice\")\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"215514885","text":"from __future__ import print_function\nfrom array_methods import ArrayMethods as np1\nfrom logger import MyLogger\n\nimport cv2\nimport os\nimport pickle\n\n\nclass Calibrator:\n def calibrate_camera(self):\n criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)\n\n objp = np1.zeros(9 * 6, 3)\n objp[:, :2] = np1.mgrid(0, 9, 0, 6).T.reshape(-1, 2)\n\n objpoints = []\n imgpoints = []\n\n cap = cv2.VideoCapture(0)\n\n while (True):\n ret, frame = cap.read()\n\n img = frame\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n ret, corners = cv2.findChessboardCorners(gray, (9, 6), None)\n\n if ret:\n objpoints.append(objp)\n\n corners2 = cv2.cornerSubPix(gray, corners, (11, 11), (-1, -1), criteria)\n imgpoints.append(corners2)\n\n img = cv2.drawChessboardCorners(img, (9, 6), corners2, ret)\n chess = cv2.resize(img, (0, 0), fx=0.5, fy=0.5)\n cv2.imshow('chess', chess)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n сap.release()\n cv2.destroyAllWindows()\n MyLogger.info(\"Run calibrateCamera:\")\n ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1], None, None)\n\n np1.save('mtx.npy', mtx)\n np1.save('dist.npy', dist)\n MyLogger.info(\"mtx.npy and dist.npy saved\")\n self.save_calibr((ret, mtx, dist, rvecs, tvecs))\n\n @staticmethod\n def save_calibr(data):\n if os.path.exists(os.getcwd() + '/configs/camera.ini'):\n os.rename(os.getcwd() + '/configs/camera.ini', os.getcwd() + '/configs/camera.ini.bak')\n MyLogger.info(\"/configs/camera.ini backed up\")\n with open(os.getcwd() + '/configs/camera.ini', 'wb') as f:\n pickle.dump(data, f)\n MyLogger.info(\"/configs/camera.ini saved\")\n","sub_path":"swagger_server/calibrate_camera.py","file_name":"calibrate_camera.py","file_ext":"py","file_size_in_byte":1945,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"535327564","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Feb 21 21:26:40 2020\n\n@author: Ahmed\n\"\"\"\n\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jan 20 22:37:15 2020\n\n@author: Ahmed\n\"\"\"\nfrom sklearn.datasets import load_iris\nfrom sklearn.cluster import KMeans\nfrom sklearn.metrics import silhouette_score\nimport matplotlib.pyplot as plt\nfrom sklearn.preprocessing import LabelEncoder\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nimport numpy as np\nfrom sklearn.cluster import MiniBatchKMeans\n# #############################################################################\n#read the dataset\ndataset = pd.read_csv('E:\\Master_external/Data.csv')\ndataset.head()\nprint(\"feature shape: \", dataset.shape)\ndataset.describe()\nlbl=LabelEncoder()\n\n#convert each column to number\ndataset.gender=lbl.fit_transform(dataset.gender)\ndataset.Nationality=lbl.fit_transform(dataset.Nationality)\ndataset.PlaceofBirth=lbl.fit_transform(dataset.PlaceofBirth)\ndataset.StageID=lbl.fit_transform(dataset.StageID)\ndataset.GradeID=lbl.fit_transform(dataset.GradeID)\ndataset.SectionID=lbl.fit_transform(dataset.SectionID)\ndataset.Topic=lbl.fit_transform(dataset.Topic)\ndataset.Semester=lbl.fit_transform(dataset.Semester)\ndataset.Relation=lbl.fit_transform(dataset.Relation)\ndataset.ParentAnsweringSurvey=lbl.fit_transform(dataset.ParentAnsweringSurvey)\ndataset.ParentschoolSatisfaction=lbl.fit_transform(dataset.ParentschoolSatisfaction)\ndataset.StudentAbsenceDays=lbl.fit_transform(dataset.StudentAbsenceDays)\ndataset.Class=lbl.fit_transform(dataset.Class)\n\n#extract the training data and the actual output\nX = dataset.iloc[:,2:].values\n\n\n\nMiniBatchKMeansModel = MiniBatchKMeans(n_clusters=3,batch_size=30,init='random') #also can be random\nkmean=MiniBatchKMeansModel.fit(X)\n\nprint('KMeansModel Train Score is : ' , kmean.score(X))\nprint('KMeansModel labels are : ' , kmean.labels_)\nprint('KMeansModel intertia is : ' , kmean.inertia_)\nprint('KMeansModel No. of iteration is : ' , kmean.n_iter_)\n\nmydict = {i: np.where(kmean.labels_ == i)[0] for i in range(kmean.n_clusters)}\n\n# Transform this dictionary into list (if you need a list as result)\ndictlist = []\nfor key, value in mydict.items():\n temp = [key,value]\n dictlist.append(temp)\n\n#extract the values of evry index in the data \n######### SVM Classifier ###########\nfrom sklearn.svm import SVC\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.metrics import classification_report\n\n\nfor i in range(len(mydict)):\n dataset=X[mydict[i]]\n X_train2=dataset[:,:-1]\n y_train2=dataset[:,-1]\n \n #create object for SVC\n svm = SVC(kernel='rbf', C=10.0, random_state=33)\n X_train, X_test, y_train, y_test = train_test_split(X_train2, y_train2, test_size=0.2, random_state=33)\n svm.fit(X_train, y_train)\n \n print(( X_train.shape, X_test.shape, y_train.shape, y_test.shape))\n \n #predict the test data\n y_pred = svm.predict(X_test)\n print(\"the confution matrix is : \",confusion_matrix(y_test, y_pred))\n print('Misclassified samples: %d' % (y_test != y_pred).sum())\n print('Accuracy: %.2f' % accuracy_score(y_test, y_pred))\n print(classification_report(y_test, y_pred)) \n","sub_path":"Data/2.6 K-Means/ةهىهلاشفؤانةثشىس.py","file_name":"ةهىهلاشفؤانةثشىس.py","file_ext":"py","file_size_in_byte":3209,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"179164316","text":"#! python3\n'''\nScrapet wikipedia pagina's voor alle kandidaten\nversion 1, 10/03/2021\nby Anna van Harmelen\n'''\n\nimport requests\nimport json\nfrom tqdm import tqdm\nfrom bs4 import BeautifulSoup\nimport re\nfrom datetime import datetime\nimport dateparser\n\ndef wikipedia(inputfile=None, outputfile=None):\n\t\n\tif inputfile is not None:\n\t\twith open(inputfile, 'r') as kandidaten:\n\t\t\tkandidaten = json.load(kandidaten)\n\telse:\n\t\tkandidaten = json.load(sys.stdin)\n\n\tcounter = 0\n\n\tfor person in tqdm(kandidaten.values()):\n\t\t\n\t\tnaam_zonder_spaces = person['naam'].replace(' ', '_')\n\t\t\n\t\t# Check for Ferd\n\t\tif naam_zonder_spaces == \"Ferd_Grapperhaus\": #lelijk lelijk lelijk sorry\n\t\t\tnaam_zonder_spaces = \"Ferdinand_Grapperhaus\"\n\t\t\n\t\t# Create wikipedia url\n\t\turl = \"https://nl.wikipedia.org/wiki/\" + naam_zonder_spaces\n\t\t\n\t\t# Scrape wikipage for info\n\t\tresponse = requests.get(url)\n\t\tif response.status_code == 404:\n\t\t\t# then it doesn't exist, so:\n\t\t\tcontinue\n\t\t\n\t\t# Only add url if it actually exists\n\t\tperson['links']['wikipedia'] = url\n\t\tsoup = BeautifulSoup(response.text, features='html.parser')\n\t\ttable = soup.find('table')\n\t\tif table is not None:\n\t\t\tinfo_table = table.get_text()\n\t\t\tinfo_table = re.sub('\\n+', '\\n', info_table)\n\t\t\tinfo_list = info_table.split(\"\\n\")\n\t\t\tbirthday = get_wiki_age(info_list)\n\t\t\tif birthday is not None:\n\t\t\t\tcounter += 1\n\t\t\t\tperson['geboortedatum'] = get_wiki_age(info_list)\n\t\t\t\t# If birthday was found, remove age (because having both is redundant)\n\t\t\t\tperson.pop('leeftijd', None)\n\n\tif outputfile is not None:\n\t\twith open(outputfile, 'w') as jsonfile:\n\t\t\tjson.dump(kandidaten, jsonfile, indent=2)\n\t\t\tprint(file=jsonfile) # trailing newling\n\telse:\n\t\tjson.dump(kandidaten, sys.stdout, indent=2)\n\t\tprint() # trailing newling\n\n\tprint(counter, \"birthdays successfully found!\")\n\ndef get_wiki_age(info_list):\n\tdate_present = False\n\n\tif 'Geboren' in info_list:\n\t\tdate_str = info_list[info_list.index('Geboren') + 1]\n\t\tdate_present = True\n\n\tif 'Geboortedatum' in info_list:\n\t\tdate_str = info_list[info_list.index('Geboortedatum') + 1]\n\t\tdate_present = True\n\n\tif date_present:\n\n\t\t# Necessary because some people add birthplace after birthdate\n\t\tif len(date_str) > 17:\n\t\t\tcounter = 0\n\t\t\tfor i, letter in enumerate(date_str):\n\t\t\t\tif letter.isnumeric():\n\t\t\t\t\tcounter += 1\n\t\t\t\telif letter.isalpha():\n\t\t\t\t\tcounter = 0\n\t\t\t\tif counter >= 4:\n\t\t\t\t\tidx = i\n\t\t\t\t\tbreak\n\t\t\tdate_str = date_str[0:idx+1]\n\n\t\t# Necessary because some people add birthplace before birthdate\n\t\tif ',' in date_str:\n\t\t\tdate_list = date_str.split(\", \")\n\t\t\tdate_str = date_list[-1]\n\n\t\tdate = dateparser.parse(date_str).date().isoformat()\n\t\t\n\t\treturn date\n\nif __name__ == '__main__':\n\tARGV_OVERRIDE = None\n\tARGV_OVERRIDE = ['-i', 'kandidaten_2021_kiesraad/kandidaten.json', '-o', 'kandidaten_2021_kiesraad/kandidaten_nieuw_wikipedia.json']\n\n\tfrom argparse import ArgumentParser, FileType\n\tparser = ArgumentParser(description=__doc__)\n\t# inputfile, outputfile\n\tparser.add_argument('-i', '--inputfile', help=\"file to add new info to, defaults to stdin.\", default=None)\n\tparser.add_argument('-o', '--outputfile', help=\"file to outputfile to, defaults to stdout.\", default=None)\n\n\twikipedia(**vars(parser.parse_args(ARGV_OVERRIDE)))\t","sub_path":"kandidaten_add-wikipedia.py","file_name":"kandidaten_add-wikipedia.py","file_ext":"py","file_size_in_byte":3199,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"90505105","text":"# @Time : 2021/2/9 11:55 AM \n# @Author : LinXiaofei\n# @File : HistoryDM.py\n\nfrom backend.dm.DialogManagement import DialogManagement\nfrom backend.subject.history_business import *\nfrom backend.data.data_process import read_file\nimport configparser\n\n\n\nclass HistoryDM(object):\n\n def __init__(self):\n config = configparser.ConfigParser()\n content = config.read(\"../backend/config.ini\")\n\n config['DEFAULT']['subject'] = '历史'\n\n with open('../backend/config.ini', 'w') as file:\n config.write(file)\n self.dm = DialogManagement()\n #year_pro = read_file(\"../backend/data/历史/year_pro.csv\")\n self.dm.setConpro(['内容','概况','定义','简介','定义','发展','过程','历史意义','意义'])\n self.history_business = HistoryBussiness()\n print(\"HistoryDM????????????????????????????\")\n\n def doNLU(self,words):\n\n\n if '多少年' in words:\n words = words.replace('多少年','几年')\n ans = []\n\n\n flag,ans = self.history_business.compareTime(words)\n\n if flag:\n print(\"compareTime\",ans)\n return ans\n\n entity, book_ans, task_type = self.history_business.dealBookSBV(words)\n print(entity, book_ans, task_type,\"entity, ans, task_type\")\n ans = []\n\n if entity:\n ans = self.dm.dealNormal(entity, book_ans)\n\n\n if '谁' in words or '哪位' in words or '哪个人' in words or '哪一个人' in words or '哪一位' in words:\n nluresult = self.history_business.dealWho(words)\n ans = self.dm.dealContent('单人',nluresult)\n\n\n elif '哪年' in words or '哪一年' in words or '几年' in words or '多少年' in words:\n\n ans = self.history_business.dealYear(words)\n\n elif entity:\n ans = self.dm.dealNormal(entity, book_ans)\n elif self.history_business.checkSplitEnt(words):\n ans = self.history_business.dealCombineEntForCon(words)\n\n\n\n if len(ans)==0 or ans[0] == 0:\n ans = self.dm.doNLU(words)\n\n if ans[0] == 0:\n ans = self.history_business.ansAgain(ans[1],ans[2])\n\n return ans\n\n def getHisType(self):\n type_list = read_file(\"../backend/data/历史/his_type.csv\")\n\n type_list = list(sorted(type_list, key=lambda i: len(i)))\n print(type_list)\n\n\n return type_list\n\n def getEntity(self,etype):\n ent_list = self.history_business.getEntitybyType(etype)\n return ent_list\n\n def AEntityInformation(self, entity):\n return self.dm.AEntityInformation(entity)\n\n def AEntityRelation(self, entity):\n return self.dm.AEntityRelation(entity)\n\n\n","sub_path":"backend/dm/HistoryDM.py","file_name":"HistoryDM.py","file_ext":"py","file_size_in_byte":2696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"552796275","text":"import psutil\nimport sys\nif not (psutil.LINUX or psutil.WINDOWS or psutil.OSX):\n sys.exit(\"platform not supported\")\n\n\ndef bytes2human(n):\n symbols = ('K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')\n prefix = {}\n for index, symbol in enumerate(symbols):\n prefix[symbol] = 1 << (index + 1) * 10\n for symbol in reversed(symbols):\n if n >= prefix[symbol]:\n value = float(n) / prefix[symbol]\n return (\"%.1f%s\" % (value, symbol))\n return \"%sB\" % n\n\n\ndef main():\n ad_pids = []\n procs = []\n for process in psutil.process_iter():\n try:\n memoInfo = process.memory_full_info()\n proInfo = process.as_dict(attrs=[\"cmdline\", \"username\"])\n except psutil.AccessDenied:\n ad_pids.append(process.pid)\n except psutil.NoSuchProcess:\n pass\n else:\n process._uss = memoInfo.uss\n process._rss = memoInfo.rss\n if not process._uss:\n continue\n \"\"\" \n OSX and windows do not have pss and swap metric\n \"\"\"\n process._pss = getattr(memoInfo, 'pss', '')\n process._swap = getattr(memoInfo, 'swap', '')\n process._info = proInfo\n procs.append(process)\n procs.sort(key=lambda process: process._uss)\n templ = \"%-7s %-7s %-30s %7s %7s %7s %7s\"\n print(templ % (\"PID\", \"User\", \"Cmdline\", \"USS\", \"PSS\", \"Swap\", \"RSS\"))\n print(\"=\" * 78)\n for process in procs:\n line = templ % (\n process.pid,\n process._info[\"username\"][:7],\n \" \".join(process._info['cmdline'])[:30],\n bytes2human(process._uss),\n bytes2human(process._pss) if process._pss else \"\",\n bytes2human(process._swap) if process._swap else \"\",\n bytes2human(process._rss) if process._rss else \"\"\n )\n print(line)\n if ad_pids:\n print(\"Warining: access denied for % pids\" % len(ad_pids))\n\n # USS = bytes2human(process.memory_full_info()[7])\n # PSS = bytes2human(process.memory_full_info()[8])\n # Swap = bytes2human(process.memory_full_info()[9])\n # RSS = bytes2human(process.memory_full_info()[0])\n # print(\"%s %s %s %s %s %s %s\" % (str(pid), process.name(),\n # process.cmdline(), USS, PSS, Swap, RSS))\nif __name__ == '__main__':\n sys.exit(main())\n","sub_path":"第一章/procsmem.py","file_name":"procsmem.py","file_ext":"py","file_size_in_byte":2376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"386406372","text":"'''\n问题描述:给定一串只含有小写形式的、排序过的 letters,\n并且给定一个目标字母 target ,请找出在给定字母串中,\n大于目���字母的最小的那一个字母。\n\n在本题中,字母是绕回编址的(即“z”后一位重新变为“a”)。\n比如说,如果target = 'z',而给定字母串为letters = ['a', 'b'],\n那么答案为“a”。\n\n示例:\n输入:\nletters = [\"c\", \"f\", \"j\"]\ntarget = \"a\"\n输出: \"c\"\n\n思路:两种情况 1.没有回绕,那么找到第一个比target大的返回即可 2.即便是回绕,返回第一个就可以了\n\n'''\n\n\ndef nextGreatestLetter(letters, target):\n # Write your code here\n for ch in letters:\n if ch > target:\n return ch\n else:\n continue","sub_path":"1056_done.py","file_name":"1056_done.py","file_ext":"py","file_size_in_byte":783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"562905446","text":"#!/usr/bin/env python\nfrom join import load_transformed, AttackRun\n\nif __name__ == \"__main__\":\n runs = load_transformed(\"results/runs.pickle\")\n present = {}\n for run in runs:\n val = present.setdefault((run.dataset, run.bounds, run.method, run.recenter, run.c), 0)\n present[(run.dataset, run.bounds, run.method, run.recenter, run.c)] += 1\n for k, v in sorted(present.items()):\n print(\"_\".join(k), v)\n","sub_path":"experiments/random/present.py","file_name":"present.py","file_ext":"py","file_size_in_byte":432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"421638064","text":"import pandas as pd\nimport numpy as np\nimport random\nimport config\n\nfrom sklearn.preprocessing import StandardScaler\n\ndef getLstmData(data,train_index,test_index,site='陵东街',start=0):\n\n train_lstm_direction1 = []\n train_lstm_direction2 = []\n train_target = []\n\n test_lstm_direction1 = []\n test_lstm_direction2 = []\n test_target = []\n\n group_data = data.groupby('mname')\n # data = data[data['mname'] == site].sort_values('mtime')\n # data = data[config.reorder_col + ['month', 'day'] + ['label']].values.tolist()\n for (site, data) in group_data:\n if site not in config.sitelist:\n continue\n data = data.sort_values('mtime')\n data = data[config.reorder_col + ['month', 'day'] + ['label']].values.tolist()\n for i in range(144,len(data)):\n lstm1 = []\n lstm2 = []\n month = data[i][-3]\n\n if month not in config.month:\n continue\n day = data[i][-2]\n target = data[i][-1]\n for pre in range(config.timestep1,0,-1):\n lstm1.extend(data[i-pre][:-3])\n for pre in range(config.timestep2,1,-1):\n lstm2.extend(data[i-pre * 24][:-3])\n if month in config.train_month:\n # if random.random() > 0.2:\n train_lstm_direction1.append(lstm1)\n train_lstm_direction2.append(lstm2)\n train_target.append(target)\n train_index.append(i-72+start)\n else:\n test_lstm_direction1.append(lstm1)\n test_lstm_direction2.append(lstm2)\n test_target.append(target)\n test_index.append(i-72+start)\n\n return train_lstm_direction1,train_lstm_direction2,train_target,\\\n test_lstm_direction1,test_lstm_direction2,test_target,train_index,test_index\n\n\ndef getLocMap(data):\n locdf = pd.read_excel('data/monitor_loc.xlsx')\n\n print(locdf['mname'])\n locmap = dict(zip(locdf['mname'].values.tolist(),locdf[['x','y']].values.tolist()))\n print(locmap)\n data_len = len(data) // len(config.sitelist)\n print(data_len)\n #cnnmap = np.zeros((len(list(range(72,data_len))),len(config.reorder_col),21,21),dtype=np.float32)\n cnnmap = np.zeros((len(list(range(72, data_len))), 5, 21, 21), dtype=np.float32)\n sitelist = config.sitelist\n for site in sitelist:\n x = locmap[site][0]\n y = locmap[site][1]\n sitedata = data[data['mname'] == site].sort_values('mtime')\n sitedata = sitedata[config.reorder_col].values.tolist()\n for i in range(72,data_len):\n #for j in range(len(sitedata[i])):\n for j in [0,-1,-2,-3,-4]:\n cnnmap[i-72,j,x,y] = sitedata[i-1][j]\n return locmap,cnnmap\n\n\ndef getCNNData(locmap,cnnmap,center = '陵东街'):\n x = locmap[center][0]\n y = locmap[center][1]\n return cnnmap[:,:,x-config.cnnwidth//2:x+config.cnnwidth//2,y-config.cnnheight//2:y+config.cnnheight//2]\n\n\ndef ensemble_lstm(bc_df,ad_df):\n train_lstm_data1 = []\n train_lstm_data2 = []\n train_y = []\n\n test_lstm_data1 = []\n test_lstm_data2 = []\n test_y = []\n\n train_index = []\n test_index = []\n\n for data in [bc_df,ad_df]:\n train_lstm_direction1, train_lstm_direction2, train_target, \\\n test_lstm_direction1, test_lstm_direction2, test_target,train_index,test_index = \\\n getLstmData(data,train_index,test_index,start =len(train_index))\n\n train_lstm_data1.extend(train_lstm_direction1)\n train_lstm_data2.extend(train_lstm_direction2)\n train_y.extend(train_target)\n test_lstm_data1.extend(test_lstm_direction1)\n test_lstm_data2.extend(test_lstm_direction2)\n test_y.extend(test_target)\n\n\n\n\n return train_index,test_index,\\\n np.array(train_lstm_data1),\\\n np.array(train_lstm_data2),\\\n np.array(test_lstm_data1),\\\n np.array(test_lstm_data2),\\\n np.array(train_y),\\\n np.array(test_y)\n\n\ndef main():\n\n ad_df = pd.read_csv('data/ad_data.csv', encoding='gb2312')\n bc_df = pd.read_csv('data/bc_data.csv',encoding='gb2312')\n\n\n bc_df['type'] = 1\n ad_df['type'] = 2\n\n alldf = pd.concat([bc_df,ad_df],axis=0)\n\n alldf['label'] = alldf['PM2.5']\n\n for i in config.scaler:\n ss = StandardScaler()\n alldf[[i]] = ss.fit_transform(alldf[[i]])\n #test_lstm1[:, i:i+config.input_size * config.timestep1:config.input_size] = ss.transform(test_lstm1[:,i:i+config.input_size * config.timestep1:config.input_size])\n\n\n\n\n bc_df = alldf[alldf['type']==1].drop('type',axis=1)\n ad_df = alldf[alldf['type'] == 2].drop('type', axis=1)\n\n train_index, test_index, train_lstm1, train_lstm2, test_lstm1, test_lstm2, trainy, testy = ensemble_lstm(bc_df,ad_df)\n\n # locmap,cnnmap1 = getLocMap(bc_df)\n # _,cnnmap2= getLocMap(ad_df)\n #\n # cnnmap = np.concatenate((cnnmap1,cnnmap2),axis=0)\n # sitecnnmap = getCNNData(locmap,cnnmap)\n # train_cnnmap = sitecnnmap[train_index,:,:,:]\n # test_cnnmap = sitecnnmap[test_index,:,:,:]\n\n return np.array(train_lstm1),\\\n np.array(train_lstm2),\\\n np.array(test_lstm1),\\\n np.array(test_lstm2),\\\n np.array(trainy),\\\n np.array(testy),\n\n \n \n \n\n\n\n\n\n\n\n\n\n\n","sub_path":"2018104106/src/dataLoader.py","file_name":"dataLoader.py","file_ext":"py","file_size_in_byte":5329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"36570898","text":"# This Python file uses the following encoding: utf-8\r\nfrom __future__ import unicode_literals\r\nimport os\r\nimport re\r\nimport codecs\r\nimport sys\r\nimport json\r\nfrom bs4 import BeautifulSoup\r\n\t\r\n#trans coding just for test in html page not to use this because html charset\r\ndef trans_result(result):\r\n\ttrans_result = []\r\n\tif result != []:\r\n\t\tfor item in result:\r\n\t\t\ttrans_item = item.encode(\"utf-8\", \"ignore\")\r\n\t\t\ttrans_result.append(trans_item)\r\n\treturn trans_result\r\n\r\ndef make_index():\r\n\t\r\n\tresult = []\r\n\ttitle_result = []\r\n\tauthor_result = []\r\n\thref_result = []\r\n\tmeeting_result = []\r\n\tfor root, dirs, files in os.walk(\"./static\"):\r\n\t\tfor file in files:\r\n\t\t\ttransfound = []\r\n\t\t\tif file.endswith(\".html\"):\r\n\t\t\t\tfile_path = os.path.join(root, file)\r\n\t\t\t\tsoup = BeautifulSoup(open(file_path), \"html5lib\")\r\n\t\t\t\tfound = soup.find_all(\"p\")\r\n\t\t\t\tif found != []:\r\n\t\t\t\t\tfor item in found:\r\n\t\t\t\t\t\titem_string = unicode(item)\r\n\t\t\t\t\t\t#print(root)\r\n\t\t\t\t\t\t#print(item_string)\r\n\t\t\t\t\t\t#paper_group = re.search('(.+?):(.+?)', item_string)\r\n\t\t\t\t\t\tpaper_group = re.search('([\\s\\S]+?):([\\s\\S]+?)', item_string)\r\n\t\t\t\t\t\tif paper_group:\r\n\t\t\t\t\t\t\tpaper = paper_group.group(1)\r\n\t\t\t\t\t\t\tpaper_fixed = paper[1:-1]\r\n\t\t\t\t\t\t\t#here is meeting\r\n\t\t\t\t\t\t\tmeeting_pre = file_path.split('/')\r\n\t\t\t\t\t\t\tmeeting_item = meeting_pre[len(meeting_pre)-1]\r\n\t\t\t\t\t\t\tmeeting_item = re.sub('\\.html$', '', meeting_item)\r\n\t\t\t\t\t\t\tmeeting_string = unicode(meeting_item)\r\n\t\t\t\t\t\t\tmeeting_tmp = meeting_string\r\n\t\t\t\t\t\t\tprint(meeting_tmp)\r\n\t\t\t\t\t\t\tmeeting_tmp = meeting_tmp.replace('-', '')\r\n\t\t\t\t\t\t\tmeeting_string = meeting_tmp.replace('L', 'L ')\r\n\t\t\t\t\t\t\t#meeting_string = meeting_tmp.replace('CCL', 'CCL ')\r\n\t\t\t\t\t\t\t#meeting_string = meeting_tmp.replace('SWCL', 'SWCL ')\r\n\t\t\t\t\t\t\t#meeting_string = meeting_tmp.replace('YCCL', 'YCCL ')\r\n\t\t\t\t\t\t\t#meeting_string = meeting_tmp.replace('YWCL', 'YWCL ')\r\n\t\t\t\t\t\t\tprint(meeting_string)\r\n\t\t\t\t\t\t\t\r\n\t\t\t\t\t\t\tmeeting_result.append(meeting_string)\r\n\t\t\t\t\t\t\t#here is href\r\n\t\t\t\t\t\t\thref = root + '/' + paper_fixed\r\n\t\t\t\t\t\t\thref_string = unicode(href)\r\n\t\t\t\t\t\t\thref_string = href_string[1:]\r\n\t\t\t\t\t\t\t#print(href_string)\r\n\t\t\t\t\t\t\t#print(file_path)\r\n\t\t\t\t\t\t\t#href = href.decode(\"utf-8\")\r\n\t\t\t\t\t\t\thref_result.append(href_string)\r\n\t\t\t\t\t\t\tresult.append(item_string)\r\n\t\t\t\t\t\t#if without href it shouldn't be included in the result list\r\n\tfor item in result:\r\n\t\titem_string = unicode(item)\r\n\t\tauthor_group = re.search('([\\s\\S]+?)', item_string)\r\n\t\t#group has a problem\r\n\t\tif author_group:\r\n\t\t\t#here is author\r\n\t\t\tauthor = author_group.group(1)\r\n\t\t\t#author = author.decode(\"utf-8\")\r\n\t\t\tauthor_result.append(author)\r\n\t\t\t#print(author)\r\n\t\telse:\r\n\t\t\t#no author\r\n\t\t\tauthor_result.append('')\r\n\t\t\t#print item_string\r\n\t\r\n\tfor item in result:\r\n\t\titem_string = unicode(item)\r\n\t\t#title_group = re.search('(.+?)<', item_string)\r\n\t\t#should consider \\n\\t\r\n\t\ttitle_group = re.search('([\\s\\S]+?)<', item_string)\r\n\t\tif title_group:\r\n\t\t\t#here is title\r\n\t\t\ttitle = title_group.group(1)\r\n\t\t\ttitle_fixed = title[1:]\r\n\t\t\t#title_fixed = title_fixed.decode(\"utf-8\")\r\n\t\t\ttitle_result.append(title_fixed)\r\n\t\t\t#print(type(title_fixed))\r\n\t\telse:\r\n\t\t\ttitle_result.append('')\r\n\t\t\t#print item_string\r\n\t#make index, index format dictionary{html_item, [title, href, author]}\r\n\t\r\n\tindex = dict((z[0], list(z[1:])) for z in zip(result, title_result, href_result, author_result, meeting_result))\r\n\tmulti_list = zip(result, title_result, href_result, author_result, meeting_result)\r\n\t\r\n\t#or item in index:\r\n\t\t#print item, index[item]\r\n\t\t\r\n\t\t\r\n\twith open('index.json', 'w') as f:\r\n\t\tjson.dump(index, f)\r\n\t\r\n\treturn result\r\n\t\t\t\r\n\t\t\t\r\n\t\t\t\r\nmake_index()\r\n\t\t\t\r\n\t\t\t\r\n","sub_path":"index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":3634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"127676591","text":"import function as f\n\n### 1.1a\nm = 5 # Mean examples: m = 0 for 1D and m = [0,0] for 2D\nc = 1 # Covariance matrix examples: c = 1 for 1D and c = [[1,0],[0,1]] for 2D \nk = 1000 # Number of samples\nbins = 10 # Number of bins in histogram\nhlist = [0.1, 1, 5, 10] # Bandwidth in kernel density estimation\nx = f.norm_data_generate(m,c,k,bins)\nfor h in hlist:\n f.mykde(x, h)\n\n### 1b\nm0 = 5\nc0 = 1\nm1 = 0\nc1 = 0.04\nk = 500\nbins = 10\nhlist = [0.1, 1, 5, 10]\n\nx = f.Gauss_mixt_data_generate(m0, c0, k, m1, c1, k, bins, 0)\nfor h in hlist:\n f.mykde(x, h)\n\n### 1.2\nm0 = [1, 0]\nc0 = [[0.9, 0.4], [0.4, 0.9]]\nm1 = [0, 2.5]\nc1 = [[0.9, 0.4], [0.4, 0.9]]\nk = 500\nhlist = [0.1, 1, 5, 10]\n\nx = f.Gauss_mixt_data_generate(m0, c0, k, m1, c1, k, 0, 0)\nfor h in hlist:\n f.mykde(x, h)","sub_path":"Assignment3/Kernel.py","file_name":"Kernel.py","file_ext":"py","file_size_in_byte":855,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"32324284","text":"from django.db import models\n\n# 장고 프레임워크 자체에서 설정 정보를 불러오는거\nfrom django.conf import settings\n# Create your models here.\n\n\nclass Booking(models.Model):\n # 예약 회원 고유번호 : 회원테이블의 id값 (FK)\n # 예약하는 당사자(구독자^ㅇ^)\n # related 관계를 맺게되면 relation이 생기는데 그것의 이름을 'booking'으로 지정한거임\n # 사용자의 고유번호 PK가 123456.. 사용자 고유번호로 찍히는거임\n subscriber = models.ForeignKey(\n settings.AUTH_USER_MODEL, on_delete=models.PROTECT, related_name='bookings')\n\n # 예약시작일과 종료일\n date_from = models.DateTimeField()\n date_to = models.DateTimeField(null=True, blank=True)\n\n room = models.TextField(max_length=100)\n note = models.TextField()\n\n # 새로운 booking객체를 만들고 DB에 쏴줘야할때, 그 시간을 쏴주는거\n created = models.DateTimeField(auto_now_add=True)\n updated = models.DateTimeField(auto_now=True)\n\n # self는 해당하는 클래스(booking)을 말함\n # 해당하는 row의 대표값을 출력해주는것\n # username과 이름을 같이 보여주면 어떤 사용자가 어떤 룸을 예약했는지 알 수 있음\n def __str__(self):\n return self.subscriber.username+\"\"+self.room\n\n # 시스템 관리, 개발자를 위해 부수적으로 추가해놓은 정보를 제공해주는 Meta (다른 프로그래밍에서도 Meta는 그런 용도)\n # 목록형태의 데이터를 가져왔을 때 date_from이 내림차순으로 정렬되게 보여줌\n class Meta:\n ordering = ['-date_from']\n","sub_path":"backend/mysite/booking/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"36058205","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\n#finished by Kay Towner\n\nif __name__ == \"__main__\":\n fname_part1 = 'problem_1_1_data.txt'\n data_to_plot = np.loadtxt(fname_part1,\n delimiter=',',\n skiprows=2)\n print(\"Data to plot:\")\n print(data_to_plot)\n #now you make the plot and save it.\n\n plt.plot(data_to_plot)\n plt.yscale(\"log\")\n plt.show()\n plt.savefig('Homework_3graph.png')\n","sub_path":"plot_problem1_1.py","file_name":"plot_problem1_1.py","file_ext":"py","file_size_in_byte":470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"151173576","text":"#! /usr/bin/env python3\n\nimport requests\nimport json\nimport datetime\nimport re\n\ndef get_date(date):\n date = list(date)\n year = int(\"\".join(date[0:4]))\n month = int(\"\".join(date[5:7]))\n day = int(\"\".join(date[8:10]))\n return datetime.date(year, month, day)\n\n# fix this at some point so it actually formats properly\ndef clean_html(html):\n r = re.compile(\"

\")\n html = re.sub(r, \"\\n\", html)\n \n reg = re.compile(\"<.*?>\")\n clean = re.sub(reg, \"\", html)\n return clean\n\nclass Smhw:\n def __init__(self, user, pwd, school_id):\n self.user = user\n self.pwd = pwd\n self.school_id = school_id\n\n def login(self):\n data = {\"grant_type\": \"password\",\n \"school_id\": self.school_id, \n \"username\": self.user,\n \"password\": self.pwd}\n\n headers = {\"Accept\": \"application/smhw.v3+json\"}\n \n self.response = requests.post(\n \"https://api.showmyhomework.co.uk/oauth/token?\" \\\n \"client_id=55283c8c45d97ffd88eb9f87e13f390675c7\" \\\n \"5d22b4f2085f43b0d7355c1f&client_secret=c8f7d8f\" \\\n \"cd0746adc50278bc89ed6f004402acbbf4335d3cb12d6a\" \\\n \"c6497d3\",\n headers = headers,\n data = data)\n \n self.token = json.loads(self.response.text)[\"smhw_token\"]\n\n def get_todos(self):\n data = {\"completed\": False}\n todos = requests.get(\"https://api.showmyhomework.co.uk/api/todos\",\n headers = {\"Accept\": \"application/smhw.v3+json\", \n \"Authorization\": \"Bearer \" + self.token},\n data = data)\n\n todos = json.loads(todos.text)[\"todos\"]\n todos = [todos[i] for i in range(0, len(todos)) \n if todos[i][\"completed\"] == False]\n todos.sort(key=lambda x: get_date(x[\"due_on\"]))\n\n tdlist = []\n\n for i in range(len(todos)):\n tdlist.append({\"title\": todos[i][\"class_task_title\"], \n \"due\": get_date(todos[i][\"due_on\"]),\n \"info\": clean_html(todos[i][\"class_task_description\"]),\n \"subject\": todos[i][\"subject\"],\n \"attach\": todos[i][\"has_attachments\"]})\n\n self.todos = tdlist\n return self.todos\n","sub_path":"smhwwrap.py","file_name":"smhwwrap.py","file_ext":"py","file_size_in_byte":2554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"335562221","text":"from __future__ import print_function, division\n\nimport odbAccess\nfrom abaqusConstants import DEFORMABLE_BODY, THREE_D\nimport os\nimport sys\n\n\ndef _copy_node_and_elements(to_odb_base, from_odb_base):\n nodal_data = from_odb_base.nodes\n if len(nodal_data) > 0:\n node_labels = []\n nodal_coordinates = []\n for n in nodal_data:\n node_labels.append(n.label)\n nodal_coordinates.append(n.coordinates)\n to_odb_base.addNodes(labels=node_labels, coordinates=nodal_coordinates)\n\n element_data = from_odb_base.elements\n element_dict = {}\n\n for e in element_data:\n element_type = e.type\n if element_type not in element_dict:\n element_dict[element_type] = {'labels': [], 'connectivity': []}\n element_dict[element_type]['labels'].append(e.label)\n element_dict[element_type]['connectivity'].append(e.connectivity)\n\n for element_type, element_data in element_dict.iteritems():\n to_odb_base.addElements(labels=element_data['labels'], connectivity=element_data['connectivity'],\n type=element_type)\n\n\ndef _copy_sets(to_odb_base, from_odb_base):\n for node_set_name in from_odb_base.nodeSets.keys():\n node_set = from_odb_base.nodeSets[node_set_name]\n to_odb_base.NodeSet(name=node_set_name, nodes=node_set.nodes)\n\n for element_set_name in from_odb_base.elementSets.keys():\n element_set = from_odb_base.elementSets[element_set_name]\n to_odb_base.ElementSet(name=element_set_name, elements=element_set.elements)\n\n\ndef create_empty_odb(new_odb_file_name, old_odb_file_name):\n \"\"\"\n :param new_odb_file_name: Filename including path for the new odb\n :param old_odb_file_name: Filename including path for the odb file containing the geometry\n :return: Nothing\n \"\"\"\n\n new_odb = odbAccess.Odb(name=os.path.basename(new_odb_file_name), path=new_odb_file_name)\n old_odb = odbAccess.openOdb(old_odb_file_name, readOnly=True)\n # Copying the part and copying the nodes in that part\n for part_name in old_odb.parts.keys():\n old_part = old_odb.parts[part_name]\n new_part = new_odb.Part(name=part_name, embeddedSpace=THREE_D, type=old_part.type)\n _copy_node_and_elements(new_part, old_part)\n _copy_sets(new_part, old_part)\n new_odb.update()\n new_odb.save()\n\n # Copying the instances and copying the nodes\n for instance_name in old_odb.rootAssembly.instances.keys():\n old_instance = old_odb.rootAssembly.instances[instance_name]\n try:\n new_part = new_odb.parts[instance_name]\n except KeyError:\n try:\n new_part = new_odb.Part(name=instance_name, embeddedSpace=THREE_D,\n type=old_odb.parts[instance_name].type)\n except KeyError:\n new_part = new_odb.Part(name=instance_name, embeddedSpace=THREE_D, type=DEFORMABLE_BODY)\n\n # Copying the instance nodes to the part with the same name\n _copy_node_and_elements(new_part, old_instance)\n\n new_instance = new_odb.rootAssembly.Instance(name=instance_name, object=new_odb.parts[instance_name])\n _copy_sets(new_instance, old_instance)\n new_odb.update()\n new_odb.save()\n new_odb.close()\n old_odb.close()\n\n\nif __name__ == '__main__':\n old_file = sys.argv[-1]\n new_file = sys.argv[-2]\n create_empty_odb(new_file, old_file)\n","sub_path":"abaqus_python_scripts/create_empty_odb_from_odb.py","file_name":"create_empty_odb_from_odb.py","file_ext":"py","file_size_in_byte":3473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"263068528","text":"# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\n\nclass Solution:\n \"\"\"\n 归并排序 时间复杂度0(nlogn) 自顶向下\n \"\"\"\n\n def sortList(self, head: ListNode) -> ListNode:\n if head == None or head.next == None:\n return head\n\n slow = head\n fast = head.next\n\n while fast and fast.next:\n slow = slow.next\n fast = fast.next.next\n\n mid, slow.next = slow.next, None\n\n l1 = self.sortList(head)\n l2 = self.sortList(mid)\n\n return self.merge(l1, l2)\n\n def merge(self, l1, l2):\n l = p = ListNode(0)\n while l1 and l2:\n if l1.val < l2.val:\n p.next = l1\n l1 = l1.next\n else:\n p.next = l2\n l2 = l2.next\n p = p.next\n\n p.next = l1 if l1 else l2\n return l.next\n\n\ns = Solution()\nl = ListNode(4)\nl.next = ListNode(2)\nl.next.next = ListNode(1)\nl.next.next.next = ListNode(3)\nss = s.sortList(l)\n\nwhile ss:\n print(ss.val)\n ss = ss.next\n","sub_path":"leetcode_python/148. 排序链表.py","file_name":"148. 排序链表.py","file_ext":"py","file_size_in_byte":1136,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"358039680","text":"\nfrom django.conf.urls import url\nfrom . import views as vw\n\nurlpatterns =[\n url('rentedpro/', vw.rented , name=\"rented\"),\n url('managerent/', vw.managerent ,name=\"managerent\"),\n url('messages/', vw.usermessages ,name=\"messages\"),\n url('auctions/', vw.myauctions ,name=\"auctions\"),\n]\n","sub_path":"RentalAuction/tenant/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"593992644","text":"import logging.config\nimport sys\n\nfrom chat.settings_base import *\n\nTEMPLATE_DEBUG = False\nDEBUG = False\nMAIN_TORNADO_PROCESS_PORT = 8888\n\nTEMPLATES[0]['OPTIONS']['loaders'] = [(\n\t'django.template.loaders.cached.Loader',\n\t[\n\t\t'django.template.loaders.filesystem.Loader',\n\t\t'django.template.loaders.app_directories.Loader',\n\t]\n)]\n\n\nLOGGING['handlers'] = {\n\t'default': {\n\t\t'level': 'DEBUG',\n\t\t'class': 'logging.StreamHandler',\n\t\t'stream': sys.stdout,\n\t\t'filters': ['id', ],\n\t\t'formatter': 'django',\n\t},\n\t'mail_admins': {\n\t\t'level': 'ERROR',\n\t\t'class': 'django.utils.log.AdminEmailHandler',\n\t}\n}\n\nLOGGING['loggers'] = {\n\t'': {\n\t\t'handlers': ['default', ],\n\t\t'level': 'DEBUG',\n\t\t'propagate': False,\n\t},\n}\n\nlogging.config.dictConfig(LOGGING)\n","sub_path":"backend/chat/settings_prod.py","file_name":"settings_prod.py","file_ext":"py","file_size_in_byte":737,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"541735135","text":"import numpy as np\nimport random\n\n\"\"\"\nTakes the input of:\n -numfolds = the number of folds given\n -model = the machine learning model\n -parameters = \n -X = \n -y\n\"\"\"\ndef cross_validation(numfolds,model,parameters,X,y):\n model.params = parameters\n \n # initialization\n folds = [[]for i in range(numfolds)]\n labels = [[]for i in range(numfolds)]\n \n # distribution of data into folds \n for i, datapoint in enumerate(X):\n folds[i % numfolds].append(datapoint) \n labels[i % numfolds].append(y[i]) \n \n # K-fold cross validation\n average_result = 0\n for i in range(numfolds):\n test = folds[i]\n trainlist = []\n labellist = []\n for j, fold in enumerate(folds):\n if (j != i):\n trainlist += fold\n labellist += labels[j]\n \n model.train(np.array(trainlist),np.array(labellist))\n result = model.evaluate(test)\n average_result += result/numfolds\n \n return average_result, model \n \n\"\"\"\nThis function searches for the optimal hyperparameters using Ranodmized Search.\n\nParameters:\nnumfold : int - number of folds k for k-fold cross validation\nmodel : AbstractCustomModel - model object for training\nparameters : dict - parameters with ranged for hyperparameter search\nX : array-like of size (n_samples,n_features) - dataset to train on\ny : array-like of size (n_samples,) - ground truth labels\nnum_iter : int - number of iterations of hyperparameter search to perform\nrandom_state : int - seed for random number generator\n\"\"\"\ndef hyperparameter_search(num_folds,model,parameters,X,y,num_iter=200,random_state=879057,interval=10,verbose=False):\n best_result = -1e9\n best_model = None\n best_params = None \n np.random.seed(random_state)\n for i in range(num_iter):\n # threshold function\n if verbose and (i + 1) % interval == 0:\n print(\"\"\"Iteration {} / {}\nBest Result: {:.2f}\"\"\".format(i + 1,num_iter,best_result))\n params = {\n \"thresholds\" : np.random.normal(0.5, 0.15, parameters[\"num_features\"]), \n \"min_support\" : np.random.uniform(\n parameters[\"min_support_lo\"],\n parameters[\"min_support_hi\"],1\n )[0],\n \"min_confidence\" : np.random.uniform(\n parameters[\"min_confidence_lo\"],\n parameters[\"min_confidence_hi\"],1\n )[0],\n \"col_names\" : parameters[\"col_names\"],\n \"label_names\" : parameters[\"label_names\"],\n \"label_support\" : parameters[\"label_support\"],\n \"min_rules\" : random.randint(\n parameters[\"min_rules_lo\"],\n parameters[\"min_rules_hi\"]\n )\n }\n \n # highest threshold\n result, cur_model = cross_validation(num_folds,model,params,X,y)\n print(\"Result = {}\".format(result))\n if result > best_result:\n best_params = params\n best_result = result\n best_model = cur_model\n best_model.params = best_params\n best_model.train(X,y)\n \n return best_params,best_result,best_model \n \n","sub_path":"Notebooks/Feature Mapping/.ipynb_checkpoints/xlb_hyperparamsearch-checkpoint.py","file_name":"xlb_hyperparamsearch-checkpoint.py","file_ext":"py","file_size_in_byte":3179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"652596413","text":"\nfrom __future__ import absolute_import\nimport hug\n\nfrom api.errorcode import ErrorCode\nfrom config import db\nfrom datetime import datetime\nfrom falcon import HTTPNotFound, HTTP_201, HTTP_204\nfrom log import logger\nfrom plugins.weixin.models.wxaccount import wxaccounts, AccountStatus\nfrom plugins.weixin.services.account import WxAccountService\nfrom models import row2dict, rows2dict, bind_dict, change_dict\nfrom sqlalchemy.sql import select\n\nwxaccountService = WxAccountService()\n\nIGNORES = {'app_secret', 'access_token', 'created_date', 'last_modifed'}\n\n\nclass WxAccountMixin(object):\n\n def get_wxaccount(self, id):\n row = db.get(wxaccounts, id)\n if row:\n return row2dict(row, wxaccounts)\n else:\n raise HTTPNotFound(title=\"no_wxaccount\")\n\n\n@hug.object.urls('')\nclass WxAccounts(object):\n '''微信公众号管理REST API\n '''\n @hug.object.get()\n def get(self, request, response, q: str=None):\n '''微信公众号\n '''\n try:\n t = wxaccounts.alias('d')\n query = db.filter(wxaccounts, request)\n if q:\n query = query.where(t.c.name.like('%' + q + '%'))\n rs = db.paginate_data(query, request, response)\n return rows2dict(rs, wxaccounts, IGNORES)\n except Exception as e:\n return {'code': 1, 'message': 'error'}\n\n @hug.object.post(status=HTTP_201)\n def post(self, body):\n '''\n 微信公众号REST API Post接口\n :param: id int 微信公众号ID\n :return: json\n '''\n wxaccount = bind_dict(wxaccounts, body)\n d = db.save(wxaccounts, wxaccount)\n return d\n\n @hug.object.delete(status=HTTP_204)\n def delete(self, request, response):\n ids = request.params.get('ids')\n db.bulk_delete(wxaccounts, ids)\n return {'code': 0, 'message': 'OK'}\n\n\n@hug.object.http_methods('/{id}')\nclass WxAccountInst(WxAccountMixin, object):\n\n def get(self, id: int):\n t = self.get_wxaccount(id)\n return t\n\n def patch(self, id: int, body):\n t = self.get_wxaccount(id)\n if t:\n excludes = ['refresh_time', 'effective_time']\n t = change_dict(wxaccounts, t, body, excludes)\n db.update(wxaccounts, t)\n return t\n\n @hug.object.delete(status=HTTP_204)\n def delete(self, id: int):\n '''\n 删除微信公众号\n :param: id int 微信公众号ID\n :return:\n '''\n db.delete(wxaccounts, id)\n\n\n@hug.get('/getAllWxAccounts')\ndef get_all_wxaccounts():\n datas = []\n t = wxaccounts.alias('d')\n query = select([t.c.id, t.c.code, t.c.name])\n rows = db.execute(query).fetchall()\n for r in rows:\n datas.append({'id': r[0], 'code': r[1], 'name': r[2]})\n return datas\n\n\ndef query_all_wxaccounts():\n rs = db.fetch_all(wxaccounts, ['name'])\n wxaccount_dict = {}\n for r in rs:\n code = r[1]\n wxaccount_dict[code] = {'id': r[0], 'code': code, 'name': r[2]}\n return wxaccount_dict\n\n\n@hug.get('/syncAccessToken')\ndef sync_access_token(request, response):\n result = {'code': ErrorCode.OK.value,\n 'message': ErrorCode.OK.name}\n try:\n account_id = request.params.get('id')\n logger.info(' id: ' + str(account_id))\n result = wxaccountService.refresh_access_token(db, account_id)\n except Exception:\n logger.exception(' error: ')\n result = {'code': ErrorCode.EXCEPTION.value,\n 'message': ErrorCode.EXCEPTION.name}\n\n return result\n\n\ndef init_wxaccounts():\n db.connect()\n count = db.count(wxaccounts)\n if count > 1:\n db.close()\n return\n now = datetime.now()\n account = {'name': '微信公众号',\n 'code': 'gh_62241ba29385',\n 'app_id': 'wx8a55d4f043c6ae91',\n 'app_secret': 'fe864a5bfccfff0ca07c121f9bb8f81c',\n 'status': AccountStatus.Active,\n 'created_date': now,\n 'last_modifed': now}\n db.insert(wxaccounts, account)\n db.close()\n","sub_path":"plugins/weixin/api/wxaccount.py","file_name":"wxaccount.py","file_ext":"py","file_size_in_byte":4118,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"520014731","text":"from PyQt5.QtWidgets import QDialog, QApplication, QMessageBox\nfrom PyQt5 import QtCore\nfrom ui_link import Ui_MainDialog\nimport pyodbc\n\nCONN_STR = (\n r'DRIVER={Microsoft Access Driver (*.mdb, *.accdb)};'\n r'DBQ=.\\\\data\\\\Veracode.accdb;'\n)\n\nconn = pyodbc.connect(CONN_STR)\n\nclass MainDialog(QDialog):\n def __init__(self, *positional_parameters, **keyword_parameters):\n super(MainDialog, self).__init__()\n\n # Set up the user interface from Designer.\n self.ui = Ui_MainDialog()\n self.resize(550, 350)\n self.setMinimumSize(QtCore.QSize(450, 350))\n self.ui.setupUi(self)\n\n # Connect buttons\n self.ui.buttonClose.clicked.connect(self.close)\n self.ui.buttonLink.clicked.connect(self.onLinkClick)\n \n def onLinkClick(self):\n if self.ui.textAnalysisId.text():\n analysisID = self.ui.textAnalysisId.text()\n else:\n QMessageBox.about(self, \"Error\", \"Analysis ID required\")\n self.ui.textAnalysisId.setFocus()\n return\n \n if self.ui.textSandboxId.text():\n sandboxID = self.ui.textSandboxId.text()\n else:\n QMessageBox.about(self, \"Error\", \"Sandbox ID required\")\n self.ui.textSandboxId.setFocus()\n return\n\n if self.ui.textTicketId.text():\n ticketID = self.ui.textTicketId.text()\n else:\n QMessageBox.about(self, \"Error\", \"Ticket ID required\")\n self.ui.textTicketId.setFocus()\n return\n \n if self.ui.textFlaw.toPlainText():\n strFlaws = self.ui.textFlaw.toPlainText().replace(\"\\n\", \",\")\n else:\n QMessageBox.about(self, \"Error\", \"One or more flaws required\")\n self.ui.textFlaw.setFocus()\n return\n \n # Check if flaw list starts with an alpha, maybe garbage pasted in\n if strFlaws[0].isalpha():\n QMessageBox.about(self, \"Error\", \"Check for text in flaw list\")\n return\n \n # Get rid of last comma\n if strFlaws.endswith(\",\"):\n strFlaws = strFlaws[:-1]\n \n queryParams = [ticketID, sandboxID, analysisID, strFlaws]\n count = self.updateLinkFlaws(queryParams)\n QMessageBox.about(self, \"Ticket ID \" + ticketID, str(count) + \" flaws updated\")\n \n def updateLinkFlaws(self, queryParams):\n sql = \"UPDATE flaws \" +\\\n \" SET ticket_id = \" + queryParams[0] + \" \" +\\\n \"WHERE sandbox_id = \" + queryParams[1] +\\\n \" AND analysis_id = \" + queryParams[2] +\\\n \" AND flaw_id IN (\" + queryParams[3] + \")\"\n try:\n cursor = conn.cursor()\n row_count = cursor.execute(sql).rowcount\n conn.commit()\n except pyodbc.Error as ex:\n QMessageBox.about(self, \"Error\", str(ex))\n row_count = 0\n \n return row_count\n \n def updateFlawsParam(self, queryParams):\n print (len(queryParams[2]))\n placeholders = \",\".join(\"?\" * len(queryParams[2]))\n sql = \"UPDATE flaws \" +\\\n \" SET ticket_id = ? \" +\\\n \"WHERE sandbox_id = ? \" +\\\n \" AND flaw_id IN (%s)\" % placeholders\n try:\n cursor = conn.cursor()\n row_count = cursor.execute(sql, queryParams).rowcount\n conn.commit()\n except pyodbc.Error as ex:\n QMessageBox.about(self, \"Error\", str(ex))\n row_count = 0\n \n return row_count \n\nif __name__ == '__main__':\n import sys\n\n app = QApplication(sys.argv)\n dialog = MainDialog()\n # Vs Code bug\n # sys.exit(dialog.exec_())\n dialog.exec_()\n conn.close()","sub_path":"LinkApp.py","file_name":"LinkApp.py","file_ext":"py","file_size_in_byte":3746,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"331761284","text":"import pytest\n\nfrom dnd.models.armor import Armor, ArmorType\nfrom dnd.models.character import Character, Ability\nfrom dnd.models.damage import DamageType, Damage\nfrom dnd.models.feat import Resistance\nfrom dnd.models.spell import Spell\nfrom dnd.models.weapon import Weapon, WeaponType, WeaponProperty\nfrom tests.mocking_models.dummies import DUMMY_CHARACTER, DUMMY_PLAYER_WEAPON, DUMMY_DAMAGE_D6\nfrom tests.mocking_models.mocking_die import MockingDie\n\n\ndef test_character_check_ability_negative():\n with pytest.raises(AttributeError):\n Character.check_ability(-1, Ability.CHARISMA)\n\n\ndef test_ability_representation():\n assert Ability.CHARISMA.__repr__() == 'charisma'\n\n\ndef test_character_check_ability_huge():\n with pytest.raises(AttributeError):\n Character.check_ability(22, Ability.CHARISMA)\n\n\ndef test_character_check_ability_normal():\n Character.check_ability(15, Ability.CHARISMA)\n\n\ndef test_character_get_modifier():\n assert Character.get_modifier(10) == 0\n assert Character.get_modifier(9) == -1\n assert Character.get_modifier(8) == -1\n assert Character.get_modifier(11) == 0\n assert Character.get_modifier(12) == 1\n\n\ndef test_character_default_proficiency_value():\n assert Character(**DUMMY_CHARACTER).proficiency == 2\n\n\ndef test_character_set_proficiency_value():\n character = Character(**DUMMY_CHARACTER)\n character.proficiency = 3\n\n assert character.proficiency == 3\n\n with pytest.raises(AttributeError):\n character.proficiency = -2\n\n\ndef test_character_apply_damage():\n character = Character(**DUMMY_CHARACTER)\n character.apply_damage(5, DamageType.MAGIC_ACID)\n\n assert character.hit_points == 5\n\n\ndef test_character_apply_damage_with_resistance():\n character = Character(**DUMMY_CHARACTER)\n character.feat_list.append(Resistance(DamageType.PIERCING))\n\n character.apply_damage(4, DamageType.PIERCING)\n assert character.hit_points == 8\n character.apply_damage(4, DamageType.MAGIC_ACID)\n assert character.hit_points == 4\n\n\ndef test_character_attack():\n character = Character(strength=8, dexterity=12, constitution=10, intelligence=10, wisdom=10, charisma=10,\n hit_points=10)\n character.active_weapon = Weapon(damage=DUMMY_DAMAGE_D6, weapon_type=WeaponType.MARTIAL_RANGED,\n properties={WeaponProperty.AMMUNITION: (30, 120)})\n character.active_weapon.ammo = 120\n\n assert character.attack_modifier == 3\n\n character.active_weapon = Weapon(damage=DUMMY_DAMAGE_D6, weapon_type=WeaponType.MARTIAL_MELEE)\n assert character.attack_modifier == 1\n\n character.active_weapon = Weapon(damage=DUMMY_DAMAGE_D6, weapon_type=WeaponType.MARTIAL_MELEE,\n properties={WeaponProperty.FINESSE: True})\n assert character.attack_modifier == 3\n\n\ndef test_character_damage():\n character = Character(**DUMMY_CHARACTER)\n\n assert character.damage(DUMMY_PLAYER_WEAPON) == 4\n\n\ndef test_character_attack_versatile_weapon():\n character = Character(**DUMMY_CHARACTER)\n weapon = Weapon.simple_melee(die_list=[MockingDie(4)], damage_type=DamageType.PIERCING, versatile=[MockingDie(6)])\n\n assert character.damage(weapon) == 6\n\n character.using_shield = True\n assert character.damage(weapon) == 4\n\n\ndef test_character_armor_class():\n character = Character(**DUMMY_CHARACTER)\n\n assert character.armor_class == 10\n character.armor = Armor(11, ArmorType.LIGHT)\n\n assert character.armor_class == 11\n character.using_shield = True\n assert character.armor_class == 13\n\n\ndef test_spell_caster_character():\n character = Character(**DUMMY_CHARACTER)\n character.cast_ability = Ability.CHARISMA\n magic_arrow = Spell(Damage([MockingDie(6)], DamageType.MAGIC_COLD), spell_lvl=1)\n magic_arrow.slots = 2\n character.spell_list = [magic_arrow, Spell(Damage([MockingDie(4)], DamageType.MAGIC_COLD))]\n\n assert character.cast_modifier == 2\n","sub_path":"tests/test_models/test_character.py","file_name":"test_character.py","file_ext":"py","file_size_in_byte":3960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"474498776","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Oct 2 14:11:06 2019\n@author: Jakub Widawski\n@email: jdwidawski@gmail.com\n\"\"\"\nimport random\n\n# Get the work matrix, line index and element index in line.\n# Return count of neigbouring cells (Xs)\ndef eval_space(work_matrix, index, i):\n neighbor_count = 0\n try:\n # Deals with all normal cases (count number of X around the item)\n if index != 0:\n if work_matrix[index-1][i-1] == 'X':\n neighbor_count += 1\n if work_matrix[index-1][i] == 'X':\n neighbor_count += 1\n if work_matrix[index-1][i+1] == 'X':\n neighbor_count += 1\n if work_matrix[index][i-1] == 'X':\n neighbor_count += 1\n if work_matrix[index][i+1] == 'X':\n neighbor_count += 1\n if work_matrix[index+1][i-1] == 'X':\n neighbor_count += 1\n if work_matrix[index+1][i] == 'X':\n neighbor_count += 1\n if work_matrix[index+1][i+1] == 'X':\n neighbor_count += 1\n\n # Handles first line, only check elements from left right and under current element\n elif index == 0:\n if work_matrix[index][i-1] == 'X':\n neighbor_count += 1\n if work_matrix[index][i+1] == 'X':\n neighbor_count += 1\n if work_matrix[index+1][i-1] == 'X':\n neighbor_count += 1\n if work_matrix[index+1][i] == 'X':\n neighbor_count += 1\n if work_matrix[index+1][i+1] == 'X':\n neighbor_count += 1\n\n # Handles last line, only check line above, left and right\n elif index == len(work_matrix):\n if work_matrix[index-1][i-1] == 'X':\n neighbor_count += 1\n if work_matrix[index-1][i] == 'X':\n neighbor_count += 1\n if work_matrix[index-1][i+1] == 'X':\n neighbor_count += 1\n if work_matrix[index][i-1] == 'X':\n neighbor_count += 1\n if work_matrix[index][i+1] == 'X':\n neighbor_count += 1\n\n # Index error will occur in first left element and last element on the right,\n # There is no elements from the \"wall\" side ==> pass\n except IndexError:\n pass\n\n return neighbor_count\n\n# 20x20 matrix is initialized\n# assignment3_picksize.py allows user to input matrix size and max iteration number\nmatrix_size = 20\n\ncolumns = matrix_size\nrows = matrix_size\nmatrix = [[' ' for x in range(columns)] for y in range(rows)]\n\n# Make list of possible number of X being input into the grid\n# Later they will be randomized\nchoice_list = list(range(len(matrix)))\n\n# Work_matrix will contain our new matrix for later processing\nwork_matrix = []\nfor line in matrix:\n # Randomly pick number of X to put in the line\n random_x = random.choice(choice_list)\n # Create new string now containing Xs to put in the new grid\n random_x_string = random_x*'X' + (len(matrix)-random_x)*' '\n l = list(random_x_string)\n random.shuffle(l)\n l.insert(0, '|')\n l.insert(len(matrix)+1, '|')\n work_matrix.append(l)\n\n\n\nprint(f'\\nRandomly generated Xs inside a {matrix_size}*{matrix_size} matrix.')\n\n# Initiate variables for the program's main iteration\nchoice = 'yes'\nsecond_last_matrix = []\nlast_matrix = []\niteration = 0\n\n# As long as the user inputs a string starting with y, the next iteration will commence\nwhile choice.startswith('y'):\n # Keep track of the generation number.\n iteration +=1\n print(f'\\nGeneration no:\\t{iteration}')\n\n # if new_matrix wasn't yet initiated, start with work_matrix\n try:\n if new_matrix:\n work_matrix = new_matrix\n except NameError:\n pass\n\n # First initiate matrix and append first line to the list of list (our grid)\n new_matrix = []\n new_matrix.append((len(matrix)+2)*['-'])\n # Game logic, for each line write new list based on the line from the previous generation\n # If the space in line was X and had 2 or 3 neighbours it dies. Else keep it same for next generation\n # If the space was empty and now neighbours 3 Xs it becomes a new X\n # If its just the \"wall\" from left and right just append it as is\n for index, line in enumerate(work_matrix):\n new_matrix_line = []\n for i, space in enumerate(line):\n neighbor_count = eval_space(work_matrix, index,i)\n if space == 'X':\n if neighbor_count < 2 or neighbor_count > 3:\n new_matrix_line.append(' ')\n else:\n new_matrix_line.append('X')\n elif space == ' ':\n if neighbor_count == 3:\n new_matrix_line.append('X')\n else:\n new_matrix_line.append(' ')\n elif space == '|':\n new_matrix_line.append(space)\n # Append every line to the newly created matrix\n new_matrix.append(new_matrix_line)\n # Finish with appending the bottom line\n new_matrix.append((len(matrix)+2)*['-'])\n new_matrix = [line for line in new_matrix if len(line) > 1]\n\n # Print the new matrix to the user\n [print(''.join(x)) for x in new_matrix]\n\n # If the last matrix looks exactly the same as the one from previous generation,\n # AKA nothing happened, then inform the user and exit the program\n is_stuck = bool(new_matrix==last_matrix)\n is_stuck2 = bool(new_matrix==second_last_matrix)\n if is_stuck or is_stuck2:\n print('\\nNothing will happen here! The population will not develop any further!')\n exit(1)\n\n # To check if the program is stuck, I check whether the matrix state is equal to up to two iterations before.\n # The last matrix from this iteration becomes the second_last_matrix\n # New matrix from this iteration now becomes the last matrix, save it for the next iteration\n second_last_matrix = last_matrix\n last_matrix = new_matrix\n\n # Allow user to pick whether they want to continue\n print('\\n\\nWould you like to go to the next generation? [y/n]')\n choice = str(input()).lower()\n\n # If choice was different than y or n, ask the user to pass the right input\n while choice.startswith('y') == False and choice.startswith('n') == False:\n print('\\nPlease pass y (yes) or n (no)!')\n choice = str(input()).lower()\n","sub_path":"assignment3/assignment3.py","file_name":"assignment3.py","file_ext":"py","file_size_in_byte":6397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"208845138","text":"# -*- coding: utf-8 -*-\n\n# Set default logging handler to avoid \"No handler found\" warnings.\nimport logging\nfrom my_python_project.MyClassManager import MyClassManager\n\n\ndef main():\n logging.basicConfig(level=logging.DEBUG,\n format=\"%(levelname)s %(asctime)s [%(process)d] %(name)s %(filename)s:%(lineno)d %(message)s\",\n )\n try:\n logging.info(\"Start my class manager\")\n myclassmanager = MyClassManager()\n myclassmanager.run()\n except Exception as exception:\n logging.info(\"MyClassManager exception: %s\", exception)\n logging.info(\"Try to finish...\")\n","sub_path":"my_python_project/command_line.py","file_name":"command_line.py","file_ext":"py","file_size_in_byte":639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"20201112","text":"#!/usr/bin/env python\n\n\"\"\"\nORTHO\nSimple configuration manager.\n\"\"\"\n\nfrom __future__ import print_function\nimport os,json,re,sys\nfrom .misc import treeview,str_types\nfrom .bootstrap import bootstrap\nfrom .data import delve,delveset\nfrom .hooks import hook_handler\n\n# exported in from __init__.py (defined for linting)\nconf = {}\nconfig_fn = None\n\ndef check_ready():\n\tglobal config_fn\n\tif not config_fn:\n\t\t#! raise Exception('ortho import failure. config_fn is not set!')\n\t\t#! print('warning ortho import failure. config_fn is not set!')\n\t\tpass\n\t\t#!!! revisit this\n\ndef abspath(path):\n\t\"\"\"Get the right path.\"\"\"\n\treturn os.path.abspath(os.path.expanduser(path))\n\ndef read_config(source=None,cwd=None,default=None,hook=False,strict=True):\n\t\"\"\"Read the configuration.\"\"\"\n\tglobal config_fn\n\tcheck_ready()\n\tif source and cwd:\n\t\traise Exception('source and cwd are mutually exclusive: %s, %s'%(source,cwd))\n\telif cwd: source = os.path.join(cwd,config_fn)\n\telse: source = source if source else config_fn\n\tif source==None: raise Exception('the source value is None, which typically occurs when you try to '\n\t\t'access ortho.conf before everything is imported and ortho/__init__.py sets config.py, config_fn to'\n\t\t'e.g. config.json. we recommend checking your import scheme.')\n\tlocations = [abspath(source),os.path.join(os.getcwd(),source)]\n\tfound = next((loc for loc in locations if os.path.isfile(loc)),None)\n\tif not found and default==None: raise Exception('cannot find file \"%s\"'%source)\n\telif not found and default!=None: \n\t\t# when new users run make for the first time and create the config.json it also runs bootstrap.py\n\t\t# to set up any other paths from the dependent module\n\t\t# a minimal bootstrap.py might be: def bootstrap_default(): return {'commands':['path/to/code.py']}\n\t\tboot = bootstrap(post=False)\n\t\tif type(boot)==dict:\n\t\t\tif 'default' not in boot and 'post' not in boot: \n\t\t\t\traise Exception('bootstrap.py must contain function bootstrap_default or bootstrap_post')\n\t\t\telif 'default' in boot: default.update(**boot.get('default',{}))\n\t\t# we write the config once even if bootstrap writes it again\n\t\twrite_config(config=default,source=locations[0])\n\t\treturn default\n\telse: \n\t\twith open(found,'r') as fp: result = json.load(fp)\n\t\t# configuration keys starting with the \"@\" sign are special hooks\n\t\t# which can either include a direct value or a function to get them\n\t\tif hook==True: hook_handler(result)\n\t\telif isinstance(hook,str_types): \n\t\t\thook_handler(result,this=hook,strict=strict)\n\t\treturn result\n\ndef config_hook_get(hook,default):\n\t\"\"\"Get a hook if it exists otherwise return a default.\"\"\"\n\tconf = read_config(hook=hook,strict=False)\n\treturn conf.get(hook,default)\n\t\ndef write_config(config,source=None):\n\t\"\"\"Write the configuration.\"\"\"\n\tglobal config_fn\n\tcheck_ready()\n\twith open(source if source else config_fn,'w') as fp:\n\t\tjson.dump(config,fp)\n\ndef interpret_command_text(raw):\n\t\"\"\"\n\tInterpret text pythonically, if possible.\n\tAdapted from the pseudo-yaml parser in automacs.\n\tNote that sending in python text from makefile requires weird syntax: key=\\\"\"\"\\\"\n\t\"\"\"\n\ttry: val = eval(raw)\n\texcept: val = raw\n\t# protect against sending e.g. \"all\" as a string and evaluating to builtin all function\n\tif val.__class__.__name__=='builtin_function_or_method': result = str(val)\n\telif type(val) in [list,dict]: result = val\n\telif type(val) in str_types:\n\t\tif re.match('^(T|t)rue$',val): result = True\n\t\telif re.match('^(F|f)alse$',val): result = False\n\t\telif re.match('^(N|n)one$',val): result = None\n\t\t#! may be redundant with the eval command above\n\t\telif re.match('^[0-9]+$',val): result = int(val)\n\t\telif re.match(r\"^[0-9]*\\.[0-9]*$\",val): result = float(val)\n\t\telse: result = val\n\telse: result = val\n\treturn result\n\ndef set_config(*args,**kwargs):\n\t\"\"\"\n\tUpdate the configuration in a local configuration file (typically ``config.json``).\n\tThis function routes ``make set` calls so they update flags using a couple different syntaxes.\n\tWe make a couple of design choices to ensure a clear grammar: a\n\t1. a single argument sets a boolean True (use unset to remove the parameter and as a style convention, \n\talways assume that something is False by default, or use kwargs to specify False)\n\t2. pairs of arguments are interpreted as key,value pairs\n\t3. everything here assumes each key has one value. if you want to add to a list, use ``setlist``\n\t\"\"\"\n\tglobal conf # from __init__.py\n\toutgoing = dict()\n\t# pairs of arguments are interpreted as key,value pairs\n\tif len(args)%2==0: outgoing.update(**dict(zip(args[::2],args[1::2])))\n\t# one argument means we set a boolean\n\telif len(args)==1: outgoing[args[0]] = True\n\telse: raise Exception('set_config received an odd number of arguments more than one: %s'%args)\n\t# interpret kwargs with an opportunity to use python syntax, or types other than strings\n\tfor key,raw in kwargs.items(): outgoing[key] = interpret_command_text(raw)\n\t# write the config\n\tconf.update(**outgoing)\n\twrite_config(conf)\n\ndef set_hook(*args,**kwargs):\n\t\"\"\"\n\tHooks get the \"@\" prepended to keys but the makefile interface does \n\tnot allow this easily so we provide this function.\n\t\"\"\"\n\targs = [m for n in [('@%s'%args[2*i],args[2*i+1]) for i in range(int(len(args)/2))] for m in n]\n\tkwargs = dict([('@%s'%i,j) for i,j in kwargs.items()])\n\t# note that we typically use set_dict to set dictionary items but many hooks will use dictionary\n\t# forms, so we try an eval here in case it's a dict. set_dict does more than this to set children\n\t# without obliterating the other leaves of t he t ree\n\tfor k,v in kwargs.items():\n\t\t#! dangerous?\n\t\ttry: kwargs[k] = eval(v)\n\t\texcept: pass\n\t# note that since conf requires a read, and read would substitute @key with key, setting a hook\n\t# will displace the non-hook keys automatically\n\tset_config(*args,**kwargs)\n\ndef setlist(*args):\n\t\"\"\"\n\tSpecial handler for adding list items.\n\tThe first argument must be the key and the following arguments are the values to add. Send kwargs to the\n\t``unset`` function below to remove items from the list.\n\t\"\"\"\n\tglobal conf,config_fn # from __init__.py\n\tif len(args)<=1: raise Exception('invalid arguments for setlist. you need at least two: %s'%args)\n\tkey,vals = args[0],list(args[1:])\n\tif key not in conf: conf[key] = vals\n\telif type(conf[key])!=list: raise Exception('cannot convert singleton to list in %s'%config_fn)\n\telse: conf[key] = list(set(conf[key]+vals))\n\twrite_config(conf)\n\ndef set_list(*args): \n\t\"\"\"Alias for setlist.\"\"\"\n\treturn setlist(*args)\n\ndef unset(*args):\n\t\"\"\"Remove items from config.\"\"\"\n\tconfig = read_config()\n\tfor arg in args: \n\t\tif arg in config: del config[arg]\n\t\telse: print('[WARNING] cannot unset %s because it is absent'%arg)\n\twrite_config(config)\n\ndef config(text=False):\n\t\"\"\"Print the configuration.\"\"\"\n\tglobal conf,config_fn # from __init__.py\n\tcheck_ready()\n\ttreeview({config_fn:conf},style={False:'unicode',True:'pprint','json':'json'}[text])\n\ndef set_dict(*args,**kwargs):\n\t\"\"\"\n\tAdd a dictionary hash to the configuration.\n\tNote that sending a pythonic hash through makefile otherwise requires the following clumsy syntax\n\twhich uses escaped quotes and quotes to escape makefile parsing and protect the insides:\n\tmake set env_ready=\\\"\"{'CONDA_PREFIX':'/Users/rpb/worker/factory/env/envs/py2'}\"\\\"\n\tThe standard method names the hash with the first argument and the rest are key,value pairs.\n\tThe standard method also accepts kwargs which override any args.\n\tWe use interpret_command_text to allow Pythonic inputs.\n\tNote that the Makefile is extremely limited on incoming data, hence you must be careful to use the double\n\tquote escape method described above. The Makefile does not tolerate colons or slashes without this\n\tprotection. We also cannot necessarily pipe e.g. JSON into Python with the special Makefile. Hence the\n\tprotected pythonic strings give us full, if not necessarily elegant, control over the config from \n\tBASH. Casual users can still manipulate the config easily. More complicated BASH manipulation should\n\tbe scripted, or ideally, placed in a Python script which just uses read_config and write_config.\n\tThe alternative mode allows you to specify a path to the child node (empty nested dicts are created \n\totherwise) and a value to store there. In combination with the protected Pythonic input trick above,\n\tthis allows complete control of the arbitrarily nested dict stored in config.json.\n\tSee ortho/devnotes.txt for more details.\n\t\"\"\"\n\t# alternative mode for deep dives into the nested dictionary\n\tif set(kwargs.keys())=={'path','value'} and not args:\n\t\ttry: path = eval(str(kwargs['path']))\n\t\texcept Exception as e: raise Exception('failed to eval the path, exception: %s'%e)\n\t\tif type(path)!=tuple: raise Exception('path must be Pythonic tuple: %s'%path)\n\t\tdelveset(conf,*path,value=kwargs['value'])\n\t\twrite_config(conf)\n\t\treturn\n\t# standard execution mode cannot do a deep dive into the dict\n\tuse_note = '`make set_dict key_3=val3 ...`'\n\tif len(args)==0 or len(args)%2!=1: \n\t\tprint('usage',use_note)\n\t\traise Exception('invalid arguments args=%s kwargs=%s'%(str(args),kwargs))\n\tname,pairs = args[0],args[1:]\n\tprint(pairs)\n\tpairwise = dict(zip(pairs[::2],pairs[1::2]))\n\tpairwise.update(**kwargs)\n\tfor key,val in pairwise.items():\n\t \tpairwise[key] = interpret_command_text(val)\n\tconf[name] = pairwise\n\twrite_config(conf)\n\ndef config_fold(fn,key):\n\t\"\"\"Update the config dictionary with a python script.\"\"\"\n\t#! python 2 vs 3 compatibility\n\tincoming = {}\n\texecfile(fn,incoming)\n\tif key not in incoming: raise Exception('key must exist in file')\n\tdelveset(conf,key,value=incoming[key])\n\twrite_config(conf)\n\ndef look(at='config.json'):\n\t\"\"\"Drop into a debugger with the conf available.\"\"\"\n\t#! replace this with code.interact?\n\tif at and not os.path.isfile(at): raise Exception('cannot find %s'%at)\n\telif at:\n\t\tname = re.sub(r'\\.','_',re.match(r'^(.*?)\\.json',at).group(1))\n\t\twith open(at) as fp: globals()[name] = json.load(fp)\n\t\tprint('status','looking at %s as %s'%(at,name))\n\telse: pass\n\ttry: \n\t\timport ipdb\n\t\tipdb.set_trace()\n\texcept: pass\n\ttry:\n\t\timport pdb\n\t\tpdb.set_trace()\n\texcept: raise Exception('cannot find ipdb or pdb')\n","sub_path":"ortho/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":10155,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"285417672","text":"from global_imports import *\nfrom gaiatest import GaiaTestCase\n\nclass main(GaiaTestCase):\n #################################################################################\n #\n # Methods which deal with reporting the results.\n #\n def screenShot(self, p_fileSuffix):\n #\n # Take a screenshot.\n #\n outFile = os.environ['RESULT_DIR'] + \"/\" + p_fileSuffix + \".png\"\n screenshot = self.marionette.screenshot()[22:] \n with open(outFile, 'w') as f:\n f.write(base64.decodestring(screenshot)) \n return outFile\n\n def screenShotOnErr(self):\n #\n # Take a screenshot on error (increments the file number).\n #\n\n #\n # Build the error filename.\n #\n self.errNum = self.errNum + 1\n fnam = self.testNum + \"_err_\" + str(self.errNum)\n \n #\n # Record the screenshot.\n #\n screenDump = self.screenShot(fnam)\n \n #\n # Dump the current page's html source too.\n #\n htmlDump = os.environ['RESULT_DIR'] + \"/\" + fnam + \".html\"\n self.savePageHTML(htmlDump)\n return (htmlDump, screenDump)\n\n def savePageHTML(self, p_outfile):\n #\n # Save the HTML of the current page to the specified file.\n #\n f = open(p_outfile, 'w')\n f.write( self.marionette.page_source.encode('ascii', 'ignore') )\n\n\n","sub_path":"OWDTestToolkit/utils/debug_utilities.py","file_name":"debug_utilities.py","file_ext":"py","file_size_in_byte":1413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"256636517","text":"from tkinter import *\r\nimport os\r\n\r\n\r\nwin= Tk()\r\n\r\ndef run():\r\n os.system('py D:\\FF14AlphaProject\\classes.py')\r\n\r\ndef run1():\r\n os.system('py D:\\FF14AlphaProject\\other.py')\r\n\r\n\r\n\r\nbutton1=Button(text=\"Gear\", font='times 12', command = run)\r\nbutton1.grid(column=0, row=0)\r\n\r\nbutton2=Button(text=\"Other\", font='times 12', command = run1)\r\nbutton2.grid(column=1, row=0)\r\n\r\n\r\n\r\n\r\n\r\n\r\nwin.mainloop()","sub_path":"FF14AlphaProject/mainwin.py","file_name":"mainwin.py","file_ext":"py","file_size_in_byte":400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"573922588","text":"\n\"\"\"\nhub_pip\nUsage:\n hub_pip create-config []\n hub_pip \n [\n ((-c | --Config) )\n --Hub-Url=\n --Hub-Username=\n --Hub-Password=\n --Hub-Proxy-Host=\n --Hub-Proxy-Port=\n --Hub-Proxy-Username=\n --Hub-Proxy-Password=\n --Hub-Timeout=<120>\n --Hub-ScanTimeout=<300>\n --Hub-CodeLocationName=\n --OutputDirectory=\n --RequirementsFile=\n --IgnoreFailure=\n --CreateFlatDependencyList=\n --CreateTreeDependencyList=\n --CreateHubBdio=\n --DeployHubBdio=\n --CheckPolicies=\n --VerifySSL=\n ]\n\nOptions:\n -h --help Show this screen.\n --version Show version.\n\nExamples:\n hub-pip -c config.ini --DeployHubBdio=True\nHelp:\n For help using this tool, please open an issue on the Github_pip repository:\n https://github.com/BlackDuckSoftware/hub-python-plugin\n\"\"\"\n\nfrom inspect import getmembers, isclass\nimport os\n\nfrom docopt import docopt\n\nfrom hub_pip.BlackDuckConfig import BlackDuckConfig\nfrom hub_pip.BlackDuckCore import BlackDuckCore\n\nfrom . import __version__ as VERSION\n\n\ndef cli():\n \"\"\"Main CLI entrypoint.\"\"\"\n options = docopt(__doc__, version=VERSION)\n\n if options[\"\"]:\n options[\"--Project-Name\"] = options[\"\"]\n\n if options[\"\"]:\n options[\"--Project-Version\"] = options[\"\"]\n\n if options[\"create-config\"]:\n if options[\"\"]:\n copy_config(path=options[\"\"])\n else:\n copy_config()\n else:\n main(options)\n\n\ndef main(options):\n \"\"\"Build config file from options\"\"\"\n config_str = \"[Black Duck Config]\\n\"\n\n for key, value in options.items():\n if \"--\" in key and value is not None and value is not \"--Config\":\n field = key.replace(\"--\", \"\")\n config_option = field + \" = \" + str(value) + \"\\n\"\n config_str += config_option\n\n config = None\n if options[\"-c\"] or options[\"--Config\"]:\n config_file_path = options[\"\"]\n config = BlackDuckConfig.from_file(config_file_path)\n\n config = BlackDuckConfig.from_string(config_str, black_duck_config=config)\n\n core = BlackDuckCore(config)\n core.run()\n\n\nsample_config = \"\"\"\n# This is a sample comment\n; This is also a sample comment\n# Values are set to None are set to default values\n\n[Black Duck Config]\nHub-Url = None\nHub-Username = None\nHub-Password = None\n\nVerifySSL = True\n\nHub-Proxy-Host = None\nHub-Proxy-Port = None\nHub-Proxy-Username = None\nHub-Proxy-Password = None\n\nHub-Timeout = 120\nHub-ScanTimeout = 300\n\nHub-CodeLocationName = None\n\nOutputDirectory = build/output/\nRequirementsFile = None\n\nIgnoreFailure = False\nCreateFlatDependencyList = True\nCreateTreeDependencyList = True\nCreateHubBdio = True\nDeployHubBdio = True\nCheckPolicies = True\n\nProject-Name = None\nProject-Version = None\n\"\"\"\n\n\ndef copy_config(path=None):\n fullpath = os.path.join(os.getcwd(), \"hub_config.ini\")\n if path is None:\n path = fullpath\n with open(path, \"w+\") as file:\n file.write(sample_config)\n print(\"Created a sample config file @ \" + path)\n","sub_path":"src/hub_pip/Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":3415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"247952470","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"Read dataset for CTC network (CSJ corpus).\n In addition, frame stacking and skipping are used.\n You can use the multi-GPU version.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom os.path import join, basename\nimport pickle\nimport random\nimport numpy as np\nimport tensorflow as tf\n\nfrom utils.frame_stack import stack_frame\nfrom utils.sparsetensor import list2sparsetensor\nfrom utils.progressbar import wrap_iterator\n\n\nclass DataSet(object):\n \"\"\"Read dataset.\"\"\"\n\n def __init__(self, data_type, train_data_size, label_type, batch_size,\n num_stack=None, num_skip=None,\n is_sorted=True, is_progressbar=False, num_gpu=1):\n \"\"\"\n Args:\n data_type: string, train, dev, eval1, eval2, eval3\n train_data_size: string, default or large\n label_type: string, phone or character or kanji\n batch_size: int, the size of mini-batch\n num_stack: int, the number of frames to stack\n num_skip: int, the number of frames to skip\n is_sorted: if True, sort dataset by frame num\n is_progressbar: if True, visualize progressbar\n num_gpu: int, if more than 1, divide batch_size by num_gpu\n \"\"\"\n if data_type not in ['train', 'dev', 'eval1', 'eval2', 'eval3']:\n raise ValueError(\n 'data_type is \"train\" or \"dev\", \"eval1\" \"eval2\" \"eval3\".')\n\n self.data_type = data_type\n self.train_data_size = train_data_size\n self.label_type = label_type\n self.batch_size = batch_size * num_gpu\n self.num_stack = num_stack\n self.num_skip = num_skip\n self.is_sorted = is_sorted\n self.is_progressbar = is_progressbar\n self.num_gpu = num_gpu\n\n self.input_size = 123\n self.input_size = self.input_size\n self.dataset_path = join(\n '/n/sd8/inaguma/corpus/csj/dataset/monolog/ctc/',\n label_type, train_data_size, data_type)\n\n # Load the frame number dictionary\n self.frame_num_dict_path = join(\n self.dataset_path, 'frame_num.pickle')\n with open(self.frame_num_dict_path, 'rb') as f:\n self.frame_num_dict = pickle.load(f)\n\n # Sort paths to input & label by frame num\n print('=> loading paths to dataset...')\n self.frame_num_tuple_sorted = sorted(\n self.frame_num_dict.items(), key=lambda x: x[1])\n input_paths, label_paths = [], []\n for input_name, frame_num in wrap_iterator(self.frame_num_tuple_sorted,\n self.is_progressbar):\n speaker_name = input_name.split('_')[0]\n input_paths.append(join(self.dataset_path, 'input',\n speaker_name, input_name + '.npy'))\n label_paths.append(join(self.dataset_path, 'label',\n speaker_name, input_name + '.npy'))\n self.input_paths = np.array(input_paths)\n self.label_paths = np.array(label_paths)\n self.data_num = len(self.input_paths)\n\n if (self.num_stack is not None) and (self.num_skip is not None):\n self.input_size = self.input_size * num_stack\n # NOTE: Not load dataset yet\n\n self.rest = set([i for i in range(self.data_num)])\n\n if data_type in ['eval1', 'eval2', 'eval3'] and label_type == 'kanji':\n self.is_test = True\n else:\n self.is_test = False\n\n def next_batch(self, batch_size=None, session=None):\n \"\"\"Make mini-batch.\n Args:\n batch_size: int, the size of mini-batch\n session:\n Returns:\n inputs: list of input data, size `[batch_size]`\n labels_st: list of SparseTensor of labels\n if num_gpu > 1, list of labels_st, size of num_gpu\n inputs_seq_len: list of length of inputs of size `[batch_size]`\n input_names: list of file name of input data of size `[batch_size]`\n \"\"\"\n if session is None and self.num_gpu != 1:\n raise ValueError('Set session when using multiple GPUs.')\n\n if batch_size is None:\n batch_size = self.batch_size\n\n next_epoch_flag = False\n\n while True:\n #########################\n # sorted dataset\n #########################\n if self.is_sorted:\n if len(self.rest) > batch_size:\n sorted_indices = list(self.rest)[:batch_size]\n self.rest -= set(sorted_indices)\n else:\n sorted_indices = list(self.rest)\n self.rest = set(\n [i for i in range(self.data_num)])\n next_epoch_flag = True\n if self.data_type == 'train':\n print('---Next epoch---')\n\n # Shuffle selected mini-batch\n random.shuffle(sorted_indices)\n\n # Load dataset in mini-batch\n input_list, label_list, input_name_list = [], [], []\n for i in sorted_indices:\n # input_list.append(np.load(self.input_paths[i]))\n # label_list.append(np.load(self.label_paths[i]))\n # input_name_list.append(basename(\n # self.input_paths[i]).split('.')[0])\n input_list.append(np.load(np.take(self.input_paths, i,\n axis=0)))\n label_list.append(np.load(np.take(self.label_paths, i,\n axis=0)))\n input_name_list.append(\n basename(np.take(self.input_paths, i,\n axis=0)).split('.')[0])\n input_list = np.array(input_list)\n label_list = np.array(label_list)\n input_name_list = np.array(input_name_list)\n\n # Frame stacking\n if (self.num_stack is not None) and (self.num_skip is not None):\n stacked_input_list = stack_frame(\n input_list,\n self.input_paths[sorted_indices],\n self.frame_num_dict,\n self.num_stack,\n self.num_skip,\n is_progressbar=False)\n input_list = np.array(stacked_input_list)\n\n # Compute max frame num in mini-batch\n max_frame_num = max(map(lambda x: x.shape[0], input_list))\n\n # Compute max target label length in mini-batch\n max_seq_len = max(map(len, label_list))\n\n # Initialization\n inputs = np.zeros(\n (len(sorted_indices), max_frame_num, self.input_size))\n # Padding with -1\n labels = np.array([[-1] * max_seq_len]\n * len(sorted_indices), dtype=int)\n inputs_seq_len = np.empty((len(sorted_indices),), dtype=int)\n input_names = [None] * len(sorted_indices)\n\n # Set values of each data in mini-batch\n for i_batch in range(len(sorted_indices)):\n data_i = input_list[i_batch]\n frame_num = data_i.shape[0]\n inputs[i_batch, :frame_num, :] = data_i\n labels[i_batch, :len(label_list[i_batch])\n ] = label_list[i_batch]\n inputs_seq_len[i_batch] = frame_num\n input_names[i_batch] = input_name_list[i_batch]\n\n #########################\n # not sorted dataset\n #########################\n else:\n if len(self.rest) > batch_size:\n # Randomly sample mini-batch\n random_indices = random.sample(\n list(self.rest), batch_size)\n self.rest -= set(random_indices)\n else:\n random_indices = list(self.rest)\n self.rest = set([i for i in range(self.data_num)])\n next_epoch_flag = True\n if self.data_type == 'train':\n print('---Next epoch---')\n\n # Shuffle selected mini-batch\n random.shuffle(random_indices)\n\n # Load dataset in mini-batch\n input_list, label_list, input_name_list = [], [], []\n for i in random_indices:\n # input_list.append(np.load(self.input_paths[i]))\n # label_list.append(np.load(self.label_paths[i]))\n # input_name_list.append(\n # basename(self.input_paths[i]).split('.')[0])\n input_list.append(\n np.load(np.take(self.input_paths, i, axis=0)))\n label_list.append(\n np.load(np.take(self.label_paths, i, axis=0)))\n input_name_list.append(\n basename(np.take(self.input_paths, i, axis=0)).split('.')[0])\n input_list = np.array(input_list)\n label_list = np.array(label_list)\n input_name_list = np.array(input_name_list)\n\n # Frame stacking\n if (self.num_stack is not None) and (self.num_skip is not None):\n stacked_input_list = stack_frame(\n input_list,\n self.input_paths[random_indices],\n self.frame_num_dict,\n self.num_stack,\n self.num_skip,\n is_progressbar=False)\n input_list = np.array(stacked_input_list)\n\n # Compute max frame num in mini-batch\n max_frame_num = max(map(lambda x: x.shape[0], input_list))\n\n # Compute max target label length in mini-batch\n max_seq_len = max(map(len, label_list))\n\n # Initialization\n inputs = np.zeros(\n (len(random_indices), max_frame_num, self.input_size))\n # Padding with -1\n labels = np.array([[-1] * max_seq_len]\n * len(random_indices), dtype=int)\n inputs_seq_len = np.empty((len(random_indices),), dtype=int)\n input_names = [None] * len(random_indices)\n if self.is_test:\n labels = [None] * len(random_indices)\n\n # Set values of each data in mini-batch\n for i_batch in range(len(random_indices)):\n data_i = input_list[i_batch]\n frame_num = data_i.shape[0]\n inputs[i_batch, : frame_num, :] = data_i\n if not self.is_test:\n labels[i_batch, :len(label_list[i_batch])\n ] = label_list[i_batch]\n else:\n labels[i_batch] = label_list[i_batch]\n inputs_seq_len[i_batch] = frame_num\n input_names[i_batch] = input_name_list[i_batch]\n\n if self.num_gpu > 1:\n divide_num = self.num_gpu\n if next_epoch_flag:\n for i in range(self.num_gpu, 0, -1):\n if len(self.rest) % i == 0:\n divide_num = i\n break\n next_epoch_flag = False\n\n # Now we split the mini-batch data by num_gpu\n inputs = tf.split(inputs, divide_num, axis=0)\n labels = tf.split(labels, divide_num, axis=0)\n inputs_seq_len = tf.split(inputs_seq_len, divide_num, axis=0)\n input_names = tf.split(input_names, divide_num, axis=0)\n\n # Convert from SparseTensor to numpy.ndarray\n inputs = list(map(session.run, inputs))\n labels = list(map(session.run, labels))\n labels_st = list(map(list2sparsetensor, labels))\n inputs_seq_len = list(map(session.run, inputs_seq_len))\n input_names = list(map(session.run, input_names))\n\n else:\n labels_st = list2sparsetensor(labels)\n\n yield inputs, labels_st, inputs_seq_len, input_names\n","sub_path":"experiments/csj/data/read_dataset_ctc.py","file_name":"read_dataset_ctc.py","file_ext":"py","file_size_in_byte":12622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"507224423","text":"#!/usr/bin/env python3\n\ndef inter_money(period,\n interest,\n amount_money):\n # TODO it may be necessary to add a recalculation of time intervals and percentages\n # period - the amount of time for which money is invested\n # interest - the interest rate at which money is invested\n # amount_money - the amount of money invested\n # inter_money - the amount of money received as a percentage of the money invested\n int_money = interest / 100 * period * amount_money\n return int_money\n\n\ndef inter_rate(interest_money,\n amount_money):\n # TODO it may be necessary to add a recalculation of time intervals and percentages\n # inter_rate - the ratio of the amount of money received from investments to the amount of money invested\n # amount_money - the amount of money invested\n # interest_money - the amount of money received as a percentage of the money invested\n int_rate = interest_money / amount_money * 100\n return int_rate\n\n\ndef discount_rate(interest_money,\n accrued_amount):\n # TODO it may be necessary to add a recalculation of time intervals and percentages\n # interest_money - the amount of money received as a percentage of the money invested\n # accrued_amount - total amount of money after investment\n disc_rate = interest_money / accrued_amount * 100\n return disc_rate\n\n\ndef capital_growth_index(accrued_amount,\n amount_money):\n # TODO it may be necessary to add a recalculation of time intervals and percentages\n # accrued_amount - total amount of money after investment\n # amount_money - the amount of money invested\n cap_grow_ind = accrued_amount / amount_money\n return cap_grow_ind\n\n\ndef simple_inter(period,\n interest,\n amount_money):\n # TODO it may be necessary to add a recalculation of time intervals and percentages\n # period - the amount of time for which money is invested\n # interest - the interest rate at which money is invested\n # amount_of_money - the amount of money invested\n # interest_money - the amount of money received as a percentage of the money invested\n simple_int = amount_money + inter_money(period, interest, amount_money)\n return simple_int\n\n\ndef comp_inter(period,\n replenishment_period,\n interest,\n amount_money,\n amount_money_replenishment):\n # TODO it may be necessary to add a settlement with different\n # replenishment times and different capitalization times\n # period - the amount of time for which money is invested\n # replenishment_period - the investment replenishment period\n # interest - the interest rate at which money is invested\n # amount_of_money - the amount of money invested\n # amount_of_money_for_replenishment - the amount of money to replenish investments\n for i in (period / replenishment_period - 1):\n amount_money = amount_money + \\\n inter_money(1, interest, amount_money) + \\\n amount_money_replenishment\n comp_int = amount_money\n return comp_int\n\n\ndef differentiated_loan_payment(amount_of_money,\n period,\n interest):\n # period - the amount of time for which money is invested\n # interest - the interest rate at which money is invested\n # amount_of_money - the amount of money invested\n arr = []\n mp_cnt = period * 12\n rest = amount_of_money\n mp_real = amount_of_money / (period * 12.0)\n while mp_cnt != 0:\n mp = mp_real + (rest * interest / 1200)\n arr.append(round(mp, 2))\n rest = rest - mp_real\n mp_cnt = mp_cnt - 1\n return arr, round(sum(arr), 2)\n\n\ndef annuity_loan_payments(amount_of_money,\n period,\n interest):\n mp_cnt = period * 12\n r = interest / 1200.0\n ak = (r * (1 + r) ** mp_cnt) / (((1 + r) ** mp_cnt) - 1)\n mp = amount_of_money * ak\n total = mp * mp_cnt\n return round(mp, 2), round(total, 2)\n","sub_path":"bii.py","file_name":"bii.py","file_ext":"py","file_size_in_byte":4098,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"188518120","text":"#Calculate the minimal chromosome length in a training track set.\nimport numpy as np\nimport sys\nimport ModelUtil as mu\nfrom Common import chrList, trackListT, trackListT2, trackListV, trackSize\ntrackList = trackListT2\nlog = mu.Logger()\n\nchrMinLen = {}\nfor chr in chrList:\n chrMinLen[chr] = sys.maxsize \n for k, tr in enumerate(trackList):\n seq = log.LoadBlob(chr + '.'+ tr)\n chrMinLen[chr] = min(chrMinLen[chr], seq.shape[0])\n print(chr, '.', tr)\n\nprint(chrMinLen)\n\nfor t in trackSize:\n if chrMinLen[t] < trackSize[t]:\n print('%s: %d < %d'%(t, chrMinLen[t], trackSize[t]))","sub_path":"GetMinChrSize.py","file_name":"GetMinChrSize.py","file_ext":"py","file_size_in_byte":617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"331270588","text":"#Divide and Conquer Algorithm\n# TO-DO: complete the helped function below to merge 2 sorted arrays\ndef merge( arrA, arrB):\n elements = len( arrA ) + len( arrB )\n merged_arr = [0] * elements\n j = 0\n k = 0\n # TO-DO\n for i in range(0, elements): \n if j >= len(arrA):\n merged_arr[i]= arrB[k] #indices to second var is k\n k += 1\n elif k >= len(arrB): #checking element of each one\n merged_arr[i]= arrA[j]\n j += 1\n elif arrA[j] < arrB[k]:\n merged_arr[i] = arrA[j]\n j += 1\n else: \n merged_arr[i] = arrB[k]\n k += 1\n return merged_arr\n \n \n# arr1 = [1, 5, 8, 4, 2, 9, 6, 0, 3, 7]\n\n\n# TO-DO: implement the Merge Sort function below USING RECURSION\ndef merge_sort( arr ):\n # TO-DO\n if len(arr) > 1:\n left = merge_sort(arr[0: len(arr)// 2]) \n right = merge_sort(arr[len(arr)// 2:]) #starts in the middle\n arr = merge(left,right)\n\n return arr\n\n\n# STRETCH: implement an in-place merge sort algorithm\ndef merge_in_place(arr, start, mid, end):\n # TO-DO\n#AAAHHHHHH\n\n\n return arr\n\ndef merge_sort_in_place(arr, l, r): \n # TO-DO\n\n return arr\n\n\n# STRETCH: implement the Timsort function below\n# hint: check out https://github.com/python/cpython/blob/master/Objects/listsort.txt\ndef timsort( arr ):\n\n return arr\n","sub_path":"src/recursive_sorting/recursive_sorting.py","file_name":"recursive_sorting.py","file_ext":"py","file_size_in_byte":1387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"127116819","text":"from math import sqrt\n\ndef gap(g, m, n):\n # better use previous prime to track down prime number one by one!x\n for i in range(m, n+1-g):\n if isPrime(i) and isPrime(i+g):\n found = True\n for j in range(i+2, i+g-1):\n if isPrime(j):\n found = False\n break\n if found == True:\n return [i, i+g] \n return None\n\ndef isPrime(n):\n if n == 2: \n return True\n if n > 2: \n if n % 2 == 0:\n return False\n for i in range(3, int(sqrt(n)+1), 2):\n if n % i == 0:\n return False\n return True\n\ntest = [i for i in range(100, 111) if isPrime(i)]\nprint(test)\nprint(gap(6, 100, 110))\ntest2 = [i for i in range(300, 400) if isPrime(i)]\nprint(test2)\nprint(gap(10, 300, 400))\n","sub_path":"gapInPrimes.py","file_name":"gapInPrimes.py","file_ext":"py","file_size_in_byte":832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"118648573","text":"from src.detection.box_walker import BoxWalker\nfrom src.base.node import Node\n\n\ndef test_load_tile(small_bbox):\n walker = BoxWalker(small_bbox)\n walker.load_tiles()\n assert walker.tile is not None\n\n\ndef test_load_streets(small_bbox):\n walker = BoxWalker(small_bbox)\n walker.load_streets()\n assert walker.streets is not None\n\n\ndef test_walk(zurich_bellevue):\n walker = BoxWalker(zurich_bellevue)\n walker.load_convnet()\n walker.load_tiles()\n walker.load_streets()\n walker.walk()\n crosswalk_nodes = walker.plain_result\n assert crosswalk_nodes is not None\n assert len(crosswalk_nodes) > 0\n\n\ndef test_compare_detected_with_osm_same_points(small_bbox, node1, node2):\n walker = BoxWalker(small_bbox)\n detected_crosswalks = [node1, node2]\n walker.osm_crosswalks = detected_crosswalks\n result = walker._compare_osm_with_detected_crosswalks(detected_crosswalks)\n assert len(result) == 0\n\n\ndef test_compare_detected_with_osm_near_points(small_bbox):\n walker = BoxWalker(small_bbox)\n detected_crosswalks = [Node(47.0, 8.0), Node(47.1, 8.1), Node(47.2, 8.2)]\n walker.osm_crosswalks = [Node(47.000001, 8.0), Node(47.100001, 8.1), Node(48.2, 8.2)]\n result = walker._compare_osm_with_detected_crosswalks(detected_crosswalks)\n assert len(result) == 1\n\n\ndef test_compare_detected_with_osm_different_points(small_bbox):\n walker = BoxWalker(small_bbox)\n detected_crosswalks = [Node(47.0, 8.0), Node(47.1, 8.1)]\n walker.osm_crosswalks = [Node(48.0, 8.0), Node(48.1, 8.1)]\n result = walker._compare_osm_with_detected_crosswalks(detected_crosswalks)\n assert len(result) == 2\n","sub_path":"tests/detection/test_box_walker.py","file_name":"test_box_walker.py","file_ext":"py","file_size_in_byte":1639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"477836214","text":"# -*- coding: utf-8 -*-\n\nfrom random import choice\n\nfrom marshmallow import ValidationError\n\nfrom pyramid.view import view_defaults\nfrom pyramid.view import view_config\nfrom pyramid.renderers import render_to_response\nfrom pyramid.httpexceptions import HTTPBadRequest\n\nfrom sqlalchemy import sql\n\nfrom amnesia.modules.folder import FolderEntity\nfrom amnesia.modules.folder import FolderBrowser\nfrom amnesia.modules.folder.validation import FolderBrowserSchema\nfrom amnesia.modules.file import File\nfrom amnesia.views import BaseView\n\n\ndef includeme(config):\n config.scan(__name__)\n\n\n@view_defaults(context=FolderEntity)\nclass FolderBrowserView(BaseView):\n\n @view_config(request_method='GET', renderer='json',\n name='browse', accept='application/json')\n def browse_json(self):\n params = self.request.GET.mixed()\n schema = FolderBrowserSchema(context={'request': self.request})\n\n try:\n data = schema.load(params)\n except ValidationError as error:\n raise HTTPBadRequest(error.messages)\n\n browser = FolderBrowser(self.context.entity, self.request.dbsession)\n result = browser.query(**data)\n schema = self.context.get_validation_schema()\n return {'results': schema.dump(result.query.all(), many=True)}\n\n @view_config(request_method='GET', name='browse')\n def browse(self, **kwargs):\n params = self.request.GET.mixed()\n schema = FolderBrowserSchema(context={'request': self.request})\n\n try:\n data = schema.load(params)\n except ValidationError as error:\n raise HTTPBadRequest(error.messages)\n\n browser = FolderBrowser(self.context.entity, self.request.dbsession)\n result = browser.query(**data)\n\n data.update(result._asdict())\n data.update(kwargs)\n data['content'] = self.context.entity\n data['options'] = ()\n\n response = render_to_response('amnesia:templates/folder/_browse.xml',\n data, request=self.request)\n response.content_type = 'application/xml'\n\n return response\n\n @view_config(request_method='GET', name='browse_events')\n def browse_events(self):\n container_id = self.request.registry.settings['default_event_pictures_container_id']\n random_pictures = self.request.dbsession.query(File).filter_by(\n container_id=container_id).order_by(sql.func.random()).limit(50).all()\n random_picture = lambda: choice(random_pictures)\n return self.browse(random_picture=random_picture)\n\n @view_config(request_method='GET', name='browse_news')\n def browse_news(self):\n container_id = self.request.registry.settings['default_news_pictures_container_id']\n random_pictures = self.request.dbsession.query(File).filter_by(\n container_id=container_id).order_by(sql.func.random()).limit(50).all()\n random_picture = lambda: choice(random_pictures)\n return self.browse(random_picture=random_picture)\n\n","sub_path":"amnesia/modules/folder/views/browser.py","file_name":"browser.py","file_ext":"py","file_size_in_byte":3027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"582853386","text":"#\n# Example python program to run on the computer to process commands from\n# the Serial Port. Use in conjunction with the ComputerController Sketch\n# to control the computer with an IR remote.\n#\n# Make sure you set the serial port to match your Arduino configuration.\n#\nimport serial\nimport os\nfrom pymouse import PyMouse\nfrom pykeyboard import PyKeyboard\n\nser=serial.Serial('/dev/ttyUSB0',19200,timeout=999)\nkey=PyKeyboard()\n\nwhile (1 == 1):\n line=ser.readline()\n tokens=line.split()\n print(tokens)\n if (tokens[0] == 'Sense'):\n intro()\n elif (tokens[0] == \"NoSense\"):\n speak(\"Goodbye. Thanks for visiting.\")\n elif (tokens[0] == \"Cmd\"):\n if (tokens[1] == \"Up\"):\n key.tap_key(key.up_key);\n elif (tokens[1] == \"Down\"):\n key.tap_key(key.down_key);\n elif (tokens[1] == \"Left\"):\n key.tap_key(key.left_key)\n elif (tokens[1] == \"Right\"):\n key.tap_key(key.right_key)\n elif (tokens[0] == \"ScrollDown\"):\n for i in range(0,5):\n key.tap_key(key.down_key)\n elif (tokens[0] == \"IRCode\"):\n print(tokens[1]);\n\n# clear the serial buffer \n ser.flushInput()\n","sub_path":"ComputerController/Controller.py","file_name":"Controller.py","file_ext":"py","file_size_in_byte":1181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"65395973","text":"# Copyright (c) 2001-2016, Canal TP and/or its affiliates. All rights reserved.\n#\n# This file is part of Navitia,\n# the software to build cool stuff with public transport.\n#\n# Hope you'll enjoy and contribute to this project,\n# powered by Canal TP (www.canaltp.fr).\n# Help us simplify mobility and open public transport:\n# a non ending quest to the responsive locomotion way of traveling!\n#\n# LICENCE: This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see .\n#\n# Stay tuned using\n# twitter @navitia\n# IRC #navitia on freenode\n# https://groups.google.com/d/forum/navitia\n# www.navitia.io\nfrom typing import Optional\n\nimport flask_restful\nfrom flask import Response\nfrom flask import request\nfrom marshmallow import ValidationError\nfrom pymongo.errors import PyMongoError, DuplicateKeyError\n\nfrom tartare.core import models\nfrom tartare.decorators import JsonDataValidate, RemoveLastActiveJob, \\\n ValidateInputDataSourceIds, ValidateUniqueDataSources\nfrom tartare.exceptions import EntityNotFound\nfrom tartare.helper import setdefault_ids\nfrom tartare.http_exceptions import InvalidArguments, DuplicateEntry, InternalServerError, ObjectNotFound\nfrom tartare.interfaces import schema\nfrom tartare.processes.processes import ProcessManager\n\n\nclass Coverage(flask_restful.Resource):\n @classmethod\n def __pre_save_coverage(self, post_data: dict) -> models.Coverage:\n coverage_schema = schema.CoverageSchema(strict=True)\n processes = post_data.get('processes', [])\n ProcessManager.check_processes_for_instance(processes, 'coverage')\n setdefault_ids(processes)\n try:\n coverage = coverage_schema.load(post_data).data\n coverage.add_computed_data_sources()\n return coverage\n except ValidationError as err:\n raise InvalidArguments(err.messages)\n\n @JsonDataValidate()\n @ValidateInputDataSourceIds()\n @ValidateUniqueDataSources()\n def post(self) -> Response:\n coverage = self.__pre_save_coverage(request.json)\n try:\n coverage.save()\n except DuplicateKeyError as e:\n raise DuplicateEntry('duplicate entry: {}'.format(str(e)))\n except PyMongoError:\n raise InternalServerError('impossible to add coverage')\n\n response, status = self.get(coverage.id)\n return response, 201\n\n def get(self, coverage_id: Optional[str] = None) -> Response:\n try:\n if coverage_id:\n result = schema.CoverageSchema().dump(models.Coverage.get(coverage_id))\n return {'coverages': [result.data]}, 200\n return {'coverages': schema.CoverageSchema(many=True).dump(models.Coverage.all()).data}, 200\n except ValidationError as err:\n raise InvalidArguments(err.messages)\n except EntityNotFound as e:\n raise ObjectNotFound(str(e))\n\n def delete(self, coverage_id: str) -> Response:\n c = models.Coverage.delete(coverage_id)\n if c == 0:\n raise ObjectNotFound(\"coverage '{}' not found\".format(coverage_id))\n return \"\", 204\n\n @JsonDataValidate()\n @ValidateInputDataSourceIds()\n @RemoveLastActiveJob()\n @ValidateUniqueDataSources()\n def put(self, coverage_id: str) -> Response:\n post_data = request.json\n if 'id' in post_data and coverage_id != post_data['id']:\n raise InvalidArguments('the modification of the id is not possible')\n post_data['id'] = coverage_id\n new_coverage = self.__pre_save_coverage(request.json)\n try:\n existing_coverage = models.Coverage.get(coverage_id)\n existing_coverage.update_with_object(new_coverage)\n except EntityNotFound as e:\n raise ObjectNotFound(str(e))\n except DuplicateKeyError as e:\n raise DuplicateEntry('duplicate entry: {}'.format(str(e)))\n except PyMongoError:\n raise InternalServerError('impossible to add coverage')\n return self.get(coverage_id)\n","sub_path":"tartare/interfaces/coverages.py","file_name":"coverages.py","file_ext":"py","file_size_in_byte":4598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"401156541","text":"import sys\nimport os\nimport time\n\nfrom mininet.net import Mininet\nfrom mininet.node import Controller, OVSKernelSwitch, RemoteController\nfrom mininet.cli import CLI\nfrom mininet.log import setLogLevel, info\n\nPORT = 6633\n#PORT = 4000\n\ndef multiCtrlNet(controllers, switch_count):\n\n net = Mininet(controller=RemoteController, switch=OVSKernelSwitch)\n\n c = [ net.addController('c%s' % ci, controller=RemoteController,ip=controllers[ci-1], port=PORT)\n for ci in range(1, len(controllers) + 1) ]\n\n s = [ net.addSwitch( 's%s' % si )\n for si in range(1, switch_count + 1) ]\n\n #net.build()\n\n for ctrl in c:\n ctrl.start()\n\n for switch in s:\n switch.start(c);\n time.sleep(1)\n\n CLI( net )\n net.stop()\n\nif __name__ == '__main__':\n \n if(len(sys.argv) < 2):\n raise SystemExit('Usage: %s switch_count controller_ips' % os.path.basename(__file__))\n\n #setLogLevel( 'debug' )\n multiCtrlNet(sys.argv[2:], int(sys.argv[1]))\n\n","sub_path":"java/test/mn_bm.py","file_name":"mn_bm.py","file_ext":"py","file_size_in_byte":992,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"642116022","text":"# Given an array of positive integers nums and a positive integer target, \n# return the minimal length of a subarray whose sum is greater than or equal\n# to target. If there is no such subarray, return 0 instead.\n\n# Example 1:\n# Input: target = 7, nums = [2,3,1,2,4,3]\n# Output: 2\n# Explanation: The subarray [4,3] has the minimal length under the problem constraint.\n\n# Example 2:\n# Input: target = 4, nums = [1,4,4]\n# Output: 1\n\n# Example 3:\n# Input: target = 11, nums = [1,1,1,1,1,1,1,1]\n# Output: 0\n\n# Constraints:\n# 1 <= target <= 10^9\n# 1 <= nums.length <= 10^5\n# 1 <= nums[i] <= 10^4\n\n# Follow up: If you have figured out the O(n) solution, try coding another solution of which the time complexity is O(n log(n)).\n\nfrom typing import List\n\nclass InitialSolution:\n def minSubArrayLen(self, target: int, nums: List[int]) -> int:\n total = start = end = 0\n length = len(nums) + 1\n while end < len(nums):\n total += nums[end]\n while total >= target:\n length = min(length, end - start + 1)\n total -= nums[start]\n start += 1\n end += 1\n return length if length <= len(nums) else 0\n","sub_path":"python/leetcode/medium/ex0101_0200/ex0209.py","file_name":"ex0209.py","file_ext":"py","file_size_in_byte":1200,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"262187497","text":"from string import ascii_lowercase\n\ntext =\"\"\"\nTake the block of text provided and strip() off the whitespace at the ends.\nSplit the whole block up by newline (\\n).\n if the first character is lowercase, split it into words and add the last word\nof that line to the results list.\nStrip the trailing dot (.) and exclamation mark (!) from the word first.\n finally return the results list!\n\"\"\"\n\n\ndef slice_and_dice(text: str = text) -> list:\n \"\"\"Get a list of words from the passed in text.\n See the Bite description for step by step instructions\"\"\"\n results = []\n paragraph = (text.strip()).splitlines()\n\n for line in paragraph:\n line = line.lstrip()\n if line[0] not in ascii_lowercase:\n continue\n\n word = line.split(\" \")[-1].rstrip(\"!.\")\n results.append(word)\n\n return results\n\n\nprint(slice_and_dice(text))\n\nfrom typing import Tuple\n\ntext1 = \"\"\"\nThe Zen of Python, by Tim Peters\n\nBeautiful is better than ugly.\nExplicit is better than implicit.\nSimple is better than complex.\nComplex is better than complicated.\nFlat is better than nested.\nSparse is better than dense.\nReadability counts.\nSpecial cases aren't special enough to break the rules.\nAlthough practicality beats purity.\nErrors should never pass silently.\nUnless explicitly silenced.\nIn the face of ambiguity, refuse the temptation to guess.\nThere should be one-- and preferably only one --obvious way to do it.\nAlthough that way may not be obvious at first unless you're Dutch.\nNow is better than never.\nAlthough never is often better than *right* now.\nIf the implementation is hard to explain, it's a bad idea.\nIf the implementation is easy to explain, it may be a good idea.\nNamespaces are one honking great idea -- let's do more of those!\n\"\"\"\nvowels = 'aeiou'\n\n\ndef strip_vowels(txt: str) -> Tuple[str, int]:\n \"\"\"Replace all vowels in the input text string by a star\n character (*).\n Return a tuple of (replaced_text, number_of_vowels_found)\n\n So if this function is called like:\n strip_vowels('hello world')\n\n ... it would return:\n ('h*ll* w*rld', 3)\n\n The str/int types in the function defintion above are part\n of Python's new type hinting:\n https://docs.python.org/3/library/typing.html\"\"\"\n new_str = []\n letter = list(text1)\n num_vowels = 0\n\n for c in letter:\n if c.lower() in vowels:\n c = '*'\n num_vowels += 1\n new_str.append(c)\n\n return ''.join(new_str), num_vowels\n\n\nprint(strip_vowels(text1))\n\n\ndef filter_positive_even_numbers(numbers):\n \"\"\"Receives a list of numbers, and returns a filtered list of only the\n numbers that are both positive and even (divisible by 2), try to use a\n list comprehension.\"\"\"\n only_evens = [num for num in numbers if num % 2 == 0 if num > 0]\n return only_evens\n\n\nlist_numbers = [0, -1, -3, -5]\nprint(filter_positive_even_numbers(list_numbers))\n\n\nfrom collections import namedtuple\n\nBeltStats = namedtuple('BeltStats', 'score ninjas')\n\nninja_belts = {'yellow': BeltStats(50, 11),\n 'orange': BeltStats(100, 7),\n 'green': BeltStats(175, 1),\n 'blue': BeltStats(250, 5)}\n\n\ndef get_total_points(belts=ninja_belts):\n \"\"\"Calculate the amount of points rewarded on PyBites given the\n ninja_belts dictionary, formula: belt score x belt owners (aka ninjas)\n (of course there are more points but let's keep it simple)\n\n Make your code generic so if we update ninja_belts to include\n more belts (which we do in the tests) it will still work.\n\n Ah and you can get score and ninjas from the namedtuple with nice\n attribute access: belt.score / belt.ninjas (reason why we get\n you familiar with namedtuple here, because we love them and use\n them all over the place!)\n\n Return the total number of points int from the function.\"\"\"\n total_score = 0\n\n for belt in belts.values():\n total_score = total_score + belt.score*belt.ninjas\n\n return total_score\n\n\nprint(get_total_points(ninja_belts))\n\n\nWORKOUT_SCHEDULE = {'Friday': 'Shoulders',\n 'Monday': 'Chest+biceps',\n 'Saturday': 'Rest',\n 'Sunday': 'Rest',\n 'Thursday': 'Legs',\n 'Tuesday': 'Back+triceps',\n 'Wednesday': 'Core'}\nREST, CHILL_OUT, TRAIN = 'Rest', 'Chill out!', 'Go train {}'\nINVALID_DAY = 'Not a valid day'\n\n\ndef get_workout_motd(day):\n \"\"\"First title case the passed in day argument\n (so monday or MONDAY both result in Monday).\n\n If day is not in WORKOUT_SCHEDULE, return INVALID_DAY\n\n If day is in WORKOUT_SCHEDULE retrieve the value (workout)\n and return the following:\n - weekday, return TRAIN with the workout value interpolated\n - weekend day (value 'Rest'), return CHILL_OUT\n\n Examples:\n - if day is Monday -> function returns 'Go train Chest+biceps'\n - if day is Thursday -> function returns 'Go train Legs'\n - if day is Saturday -> function returns 'Chill out!'\n - if day is nonsense -> function returns 'Not a valid day'\n\n Trivia: /etc/motd is a file on Unix-like systems that contains\n a 'message of the day'\n \"\"\"\n\n formatted_day = day.lower().capitalize()\n if formatted_day in WORKOUT_SCHEDULE.keys():\n if WORKOUT_SCHEDULE[formatted_day] == REST:\n return CHILL_OUT\n else:\n return TRAIN.format(WORKOUT_SCHEDULE[formatted_day])\n else:\n return INVALID_DAY\n\n\nprint(get_workout_motd('MONDAY'))\n\n\ndef divide_numbers(numerator, denominator):\n \"\"\"For this exercise you can assume numerator and denominator are of type\n int/str/float.\n Try to convert numerator and denominator to int types, if that raises a\n ValueError reraise it. Following do the division and return the result.\n However if denominator is 0 catch the corresponding exception Python\n throws (cannot divide by 0), and return 0\"\"\"\n try:\n num = int(numerator)\n den = int(denominator)\n\n except ValueError as err:\n raise ValueError('The input could not be converted') from err\n\n try:\n result = num / den\n return result\n except ZeroDivisionError as err:\n return 0\n\n\nprint(divide_numbers(1, 2))\nprint(divide_numbers(8, 2))\nprint(divide_numbers('3', '2'))\nprint(divide_numbers(8.2, 2))\nprint(divide_numbers(1, 2.9))\n# print(divide_numbers(2, 's'))\n# print(divide_numbers('s', 2))\n# print(divide_numbers('v', 'w'))\nprint(divide_numbers(10, 0))\n\n\nimport xml.etree.ElementTree as ET\n\n# from OMDB\nxmlstring = '''\n\n \n \n \n \n \n''' # noqa E501\n\n\ndef get_tree():\n \"\"\"You probably want to use ET.fromstring\"\"\"\n root = ET.fromstring(xmlstring)\n return root\n\n\ndef get_movies():\n \"\"\"Call get_tree and retrieve all movie titles, return a list or generator\"\"\"\n movies = []\n\n tree = get_tree()\n\n for branch in tree:\n movies.append(branch.get('title'))\n\n return movies\n\n\ndef get_movie_longest_runtime():\n \"\"\"Call get_tree again and return the movie title for the movie with the longest\n runtime in minutes, for latter consider adding a _get_runtime helper\"\"\"\n tree = get_tree()\n runtimes = {}\n\n for branch in tree:\n runtime = branch.get('runtime')\n runtimes[branch.get('title')] = ([int(s) for s in runtime.split() if s.isdigit()])\n\n longest_runtime = max(runtimes, key=runtimes.get)\n\n return longest_runtime\n\n\nprint(get_movies())\nprint(get_movie_longest_runtime())\n","sub_path":"pybites_intro.py","file_name":"pybites_intro.py","file_ext":"py","file_size_in_byte":8424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"403519897","text":"\n#figure 25.11, p.727\n#author: Luis Paris\n#solves ODE (integral actually) using the following methods w/ step-size h=0.5:\n# - euler\n# - heun\n# - midpoint\n\nimport numpy as np, matplotlib.pyplot as plt\nimport _ode\n\ndef dfxy(x, y): return -2*x**3 + 12*x**2 - 20*x + 8.5\n\ndef f(x): return -0.5*x**4 + 4*x**3 - 10*x**2 + 8.5*x + 1\n\nclass Meth:\n def __init__(self, name, func, color):\n self.name=name; self.func=func; self.color=color; self.ypts=[]\n#end class\n\nmeths = [\n Meth(\"euler\", _ode.euler, 'k'),\n Meth(\"heun\", _ode.heun, 'g'),\n Meth(\"midpoint\", _ode.midpoint, 'y'),\n]\n\nh= 0.5\nx0, xn = 0, 4\ny0 = 1\ntab = 15\n\nxpts = np.arange(x0, xn+h, h)\nfor meth in meths:\n print(\"{}'s method:\".format(meth.name))\n print(\"{:<{t}}{:<{t}}{:<{t}}{:<{t}}\".format(\"x\",\"ytrue\",\"y{}\".format(meth.name),\"|et|\",t=tab))\n y = y0\n for x in xpts:\n et = abs(1-y/f(x))\n meth.ypts.append(y)\n print(\"{:<{t}}{:<{t}}{:<{t}}{:<{t}.1%}\".format(x, f(x), y, et, t=tab))\n y = meth.func(dfxy, x, y, h)\n #end for\n#end for\n\n#plot functions\nx = np.linspace(x0, xn, 1000)\nplt.plot(x, f(x), label=\"analytical\")\nfor meth in meths:\n plt.plot(xpts, meth.ypts, '--{}o'.format(meth.color), label=meth.name)\nplt.legend(loc=\"upper left\", shadow=True)\nplt.grid(True)\nplt.show()\n","sub_path":"Assignment 6/fig-25.11.py","file_name":"fig-25.11.py","file_ext":"py","file_size_in_byte":1290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"62214685","text":"# -*- coding:utf-8 -*-\n__author__ = 'lisong'\n\n\"\"\"\n“Great!” Exclaimed Sofia. “Now we have the password.”\n“To what exactly?” Quipped Nikola.\n“Untold treasures, vast riches beyond belief! Gold! Silver! Silicon! Hydraulic Fluid! Anything your heart desires!”\n“And you’re going to do what with a password to absolutely nothing?” Nikola smirked.\n“Oh... Right...”\nStephen spoke up. “Well, this door back here has a keypad. Only thing is the brackets look pretty busted up.\n We could try fixing it and then punching in the password?”\n“YES! That!” Sofia exclaimed.\nYou are given an expression with numbers, brackets and operators. For this task only the brackets matter.\nBrackets come in three flavors: \"{}\" \"()\" or \"[]\". Brackets are used to determine scope or to restrict some expression.\n If a bracket is open, then it must be closed with a closing bracket of the same type.\n The scope of a bracket must not intersected by another bracket. In this task you should make a decision,\n whether to correct an expression or not based on the brackets. Do not worry about operators and operands.\n\nInput: An expression with different of types brackets as a string (unicode).\n\nOutput: A verdict on the correctness of the expression in boolean (True or False).\n\nExample:\n\ncheckio(\"((5+3)*2+1)\") == True\ncheckio(\"{[(3+1)+2]+}\") == True\ncheckio(\"(3+{1-1)}\") == False\ncheckio(\"[1+1]+(2*2)-{3/3}\") == True\ncheckio(\"(({[(((1)-2)+3)-3]/3}-3)\") == False\ncheckio(\"2+3\") == True\n\nHow it is used: When you write code or complex expressions in a mathematical package,\nyou can get a huge headache when it comes to excess or missing brackets. This concept can be useful for your own IDE.\n\nPrecondition:\nThere are only brackets (\"{}\" \"()\" or \"[]\"), digits or operators (\"+\" \"-\" \"*\" \"/\").\n0 < len(expression) < 103\n\"\"\"\n\ndef checkio(expression):\n brackets = (u'(', u'[', u'{', u')', u']', u'}',)\n\n my_stack = list()\n res = True\n for x in expression:\n if x in brackets:\n # print(my_stack)\n if brackets.index(x) < 3:\n my_stack.append(x)\n else:\n try:\n if brackets.index(my_stack.pop()) - brackets.index(x) != -3:\n res = False\n break;\n except:\n res = False\n\n return res and len(my_stack) == 0\n\n# These \"asserts\" using only for self-checking and not necessary for auto-testing\nif __name__ == '__main__':\n assert checkio(u\"((5+3)*2+1)\") == True, \"Simple\"\n assert checkio(u\"{[(3+1)+2]+}\") == True, \"Different types\"\n assert checkio(u\"(3+{1-1)}\") == False, \") is alone inside {}\"\n assert checkio(u\"[1+1]+(2*2)-{3/3}\") == True, \"Different operators\"\n assert checkio(u\"(({[(((1)-2)+3)-3]/3}-3)\") == False, \"One is redundant\"\n assert checkio(u\"2+3\") == True, \"No brackets, no problem\"\n\n'''\ndef checkio(data):\n stack=[\"\"]\n brackets={\"(\":\")\",\"[\":\"]\",\"{\":\"}\"}\n for c in data:\n if c in brackets:\n stack.append(brackets[c])\n elif c in brackets.values() and c!=stack.pop():\n return False\n return stack==[\"\"]\n​\n#These \"asserts\" using only for self-checking and not necessary for auto-testing\nif __name__ == '__main__':\n assert checkio(\"((5+3)*2+1)\") == True, \"Simple\"\n assert checkio(\"{[(3+1)+2]+}\") == True, \"Different types\"\n assert checkio(\"(3+{1-1)}\") == False, \") is alone inside {}\"\n assert checkio(\"[1+1]+(2*2)-{3/3}\") == True, \"Different operators\"\n assert checkio(\"(({[(((1)-2)+3)-3]/3}-3)\") == False, \"One is redundant\"\n'''","sub_path":"checkio/electronic_station/brackets.py","file_name":"brackets.py","file_ext":"py","file_size_in_byte":3584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"631247643","text":"import json\nimport libnacl\nfrom indy import payment\nfrom indy import ledger, anoncreds\n\nfrom perf_load.perf_utils import ensure_is_reply, rawToFriendly\nfrom perf_load.perf_req_gen import NoReqDataAvailableException\nfrom perf_load.perf_req_gen_payment import RGBasePayment\n\n\nclass RGFeesNym(RGBasePayment):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._sources_amounts = {}\n self._last_used = None\n\n async def __retrieve_minted_sources(self):\n for payment_address in self._payment_addresses:\n self._sources_amounts[payment_address] = []\n self._sources_amounts[payment_address].extend(await self._get_payment_sources(payment_address))\n\n async def on_pool_create(self, pool_handle, wallet_handle, submitter_did, *args, **kwargs):\n await super().on_pool_create(pool_handle, wallet_handle, submitter_did, *args, **kwargs)\n await self.__retrieve_minted_sources()\n\n fees_req = await payment.build_set_txn_fees_req(wallet_handle, submitter_did, self._payment_method,\n json.dumps({\"1\": 1}))\n for trustee_did in [self._submitter_did, *self._additional_trustees_dids]:\n fees_req = await ledger.multi_sign_request(self._wallet_handle, trustee_did, fees_req)\n\n resp = await ledger.submit_request(self._pool_handle, fees_req)\n ensure_is_reply(resp)\n\n def _rand_data(self):\n raw = libnacl.randombytes(16)\n req_did = rawToFriendly(raw)\n return req_did\n\n def _from_file_str_data(self, file_str):\n raise NotImplementedError(\"ne _from_file_str_data\")\n\n async def _gen_req(self, submit_did, req_data):\n req = await ledger.build_nym_request(submit_did, req_data, None, None, None)\n\n for ap in self._sources_amounts:\n if self._sources_amounts[ap]:\n (source, amount) = self._sources_amounts[ap].pop()\n address = ap\n inputs = [source]\n outputs = [{\"recipient\": address, \"amount\": amount - 1}]\n req_fees = await payment.add_request_fees(self._wallet_handle, submit_did, req,\n json.dumps(inputs),\n json.dumps(outputs), None)\n return req_fees[0]\n raise NoReqDataAvailableException()\n\n async def on_request_replied(self, req_data, req, resp_or_exp):\n if isinstance(resp_or_exp, Exception):\n return\n\n resp = resp_or_exp\n\n try:\n resp_obj = json.loads(resp)\n\n if \"op\" not in resp_obj:\n raise Exception(\"Response does not contain op field.\")\n\n if resp_obj[\"op\"] == \"REQNACK\" or resp_obj[\"op\"] == \"REJECT\":\n return\n # self._sources_amounts.append((source, amount))\n elif resp_obj[\"op\"] == \"REPLY\":\n receipt_infos_json = await payment.parse_response_with_fees(self._payment_method, resp)\n receipt_infos = json.loads(receipt_infos_json)\n receipt_info = receipt_infos[0]\n self._sources_amounts[receipt_info[\"recipient\"]].append((receipt_info[\"receipt\"], receipt_info[\"amount\"]))\n\n except Exception as e:\n print(\"Error on payment txn postprocessing: {}\".format(e))\n\n\nclass RGFeesSchema(RGFeesNym):\n async def _gen_req(self, submit_did, req_data):\n _, schema_json = await anoncreds.issuer_create_schema(submit_did, req_data,\n \"1.0\", json.dumps([\"name\", \"age\", \"sex\", \"height\"]))\n schema_request = await ledger.build_schema_request(submit_did, schema_json)\n\n for ap in self._sources_amounts:\n if self._sources_amounts[ap]:\n (source, amount) = self._sources_amounts[ap].pop()\n address = ap\n inputs = [source]\n outputs = [{\"recipient\": address, \"amount\": amount - 1}]\n req_fees = await payment.add_request_fees(self._wallet_handle, submit_did, schema_request,\n json.dumps(inputs),\n json.dumps(outputs), None)\n return req_fees[0]\n raise NoReqDataAvailableException()\n\n async def on_pool_create(self, pool_handle, wallet_handle, submitter_did, *args, **kwargs):\n await super().on_pool_create(pool_handle, wallet_handle, submitter_did, *args, **kwargs)\n\n fees_req = await payment.build_set_txn_fees_req(wallet_handle, submitter_did, self._payment_method,\n json.dumps({\"101\": 1}))\n for trustee_did in [self._submitter_did, *self._additional_trustees_dids]:\n fees_req = await ledger.multi_sign_request(self._wallet_handle, trustee_did, fees_req)\n\n resp = await ledger.submit_request(self._pool_handle, fees_req)\n ensure_is_reply(resp)\n","sub_path":"scripts/performance/perf_load/perf_req_gen_fees.py","file_name":"perf_req_gen_fees.py","file_ext":"py","file_size_in_byte":5076,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"210008638","text":"import requests\nfrom pprint import pprint as pp\n# json 이쁘게 보여줌\nurl = 'https://www.dhlottery.co.kr/common.do?method=getLottoNumber&drwNo=874'\nres = requests.get(url)\n# json 자체는 문자열일 뿐이기 때문에 python의 객체인 dictionary로 바꿔줘야 한다(파싱)\n\n# print(pp(res.json()))\ndata = res.json()\nwinner = []\n\n# js의 push와 거의 동일\nfor i in range(1,7):\n # print(data[f'drwtNo{i}'])\n # JavaScript의 push 대신 append\n winner.append(data[f'drwtNo{i}'])\nprint(winner)","sub_path":"git과python/python_basic/lotto_api.py","file_name":"lotto_api.py","file_ext":"py","file_size_in_byte":518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"615058211","text":"# We import the necessary modules for the correct execution of the code\nimport bitfinex\nimport time\nimport datetime\nimport pandas as pd\nfrom tqdm import tqdm\n\n# Define query parameters\npair = 'XRPUSD' # Currency pair of interest\nbin_size = '1m' # This will return minute data\nlimit = 1000 # We want the maximum of 1000 data points \napi_key = 'Lm4QXQhVHUH24UeY2Ym4ih5NnpYYk3TtG8V4hb9jJiC' #your apikey of bitfinex\napi_secret = 'Bs7bjJUUUl4byEMUFdApx7tbzSd2pSFEA7YE2mcOpQe' # your secret key of bitfinex\n\n# In this function I prepare the query through the api, taking into account the batch of requests and also the number of steps\ndef fetch_data(start, stop, symbol, interval, tick_limit, step):\n # Create api instance\n api_v2 = bitfinex.bitfinex_v2.api_v2(api_key=api_key, api_secret=api_secret)\n data = []\n start = start - step\n while start < stop:\n start = start + step\n end = start + step\n res = api_v2.candles(symbol=symbol, interval=interval,\n limit=tick_limit, start=start,\n end=end)\n print(str(start)+' >>> '+ str(end)+' ---- '+str(stop))\n # print(res if res else \"Respuesta sin datos... \")\n if res:\n data.extend(res)\n time.sleep(5)\n else:\n print(\"Respuesta sin datos...\")\n time.sleep(5)\n\n return data\n\n# Set step size\ntime_step = 90000000\n# Define the start date \nt_start = datetime.datetime(2018, 1, 1, 0, 0)\nt_start = time.mktime(t_start.timetuple()) * 1000\n# Define the end date\nt_stop = datetime.datetime(2019, 11, 30, 0, 0)\nt_stop = time.mktime(t_stop.timetuple()) * 1000\npair_data = fetch_data(start=t_start, stop=t_stop, symbol=pair,\n interval=bin_size, tick_limit=limit, \n step=time_step)\n\nif pair_data:\n\n # Create pandas data frame and clean/format data\n names = ['time', 'open', 'close', 'high', 'low', 'volume']\n df = pd.DataFrame(pair_data, columns=names)\n df.drop_duplicates(inplace=True)\n df['time'] = pd.to_datetime(df['time'], unit='ms')\n df.set_index('time', inplace=True)\n df.sort_index(inplace=True)\n df.to_csv('./recolect/data_1.csv')","sub_path":"get_historical_data_crypto.py","file_name":"get_historical_data_crypto.py","file_ext":"py","file_size_in_byte":2200,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"398060690","text":"import logging\nimport os\nimport tempfile\nimport time\nfrom urllib.request import urlopen as urllib_urlopen\nfrom urllib.error import URLError\nimport requests\nfrom .cache import CachingSession, FileCache # noqa\n\n\n__version__ = \"1.2.0\"\n_user_agent = \" \".join((\"scrapelib\", __version__, requests.utils.default_user_agent()))\n\n\n_log = logging.getLogger(\"scrapelib\")\n_log.addHandler(logging.NullHandler())\n\n\nclass HTTPMethodUnavailableError(requests.RequestException):\n \"\"\"\n Raised when the supplied HTTP method is invalid or not supported\n by the HTTP backend.\n \"\"\"\n\n def __init__(self, message, method):\n super(HTTPMethodUnavailableError, self).__init__(message)\n self.method = method\n\n\nclass HTTPError(requests.HTTPError):\n \"\"\"\n Raised when urlopen encounters a 4xx or 5xx error code and the\n raise_errors option is true.\n \"\"\"\n\n def __init__(self, response, body=None):\n message = \"%s while retrieving %s\" % (response.status_code, response.url)\n super(HTTPError, self).__init__(message)\n self.response = response\n self.body = body or self.response.text\n\n\nclass FTPError(requests.HTTPError):\n def __init__(self, url):\n message = \"error while retrieving %s\" % url\n super(FTPError, self).__init__(message)\n\n\nclass ThrottledSession(requests.Session):\n def _throttle(self):\n now = time.time()\n diff = self._request_frequency - (now - self._last_request)\n if diff > 0:\n _log.debug(\"sleeping for %fs\" % diff)\n time.sleep(diff)\n self._last_request = time.time()\n else:\n self._last_request = now\n\n @property\n def requests_per_minute(self):\n return self._requests_per_minute\n\n @requests_per_minute.setter\n def requests_per_minute(self, value):\n if value > 0:\n self._throttled = True\n self._requests_per_minute = value\n self._request_frequency = 60.0 / value\n self._last_request = 0\n else:\n self._throttled = False\n self._requests_per_minute = 0\n self._request_frequency = 0.0\n self._last_request = 0\n\n def request(self, method, url, **kwargs):\n if self._throttled:\n self._throttle()\n return super(ThrottledSession, self).request(method, url, **kwargs)\n\n\n# this object exists because Requests assumes it can call\n# resp.raw._original_response.msg.getheaders() and we need to cope with that\nclass DummyObject(object):\n def getheaders(self, name):\n return \"\"\n\n def get_all(self, name, default):\n return default\n\n\n_dummy = DummyObject()\n_dummy._original_response = DummyObject()\n_dummy._original_response.msg = DummyObject()\n\n\nclass FTPAdapter(requests.adapters.BaseAdapter):\n def send(\n self, request, stream=False, timeout=None, verify=False, cert=None, proxies=None\n ):\n if request.method != \"GET\":\n raise HTTPMethodUnavailableError(\n \"FTP requests do not support method '%s'\" % request.method,\n request.method,\n )\n try:\n real_resp = urllib_urlopen(request.url, timeout=timeout)\n # we're going to fake a requests.Response with this\n resp = requests.Response()\n resp.status_code = 200\n resp.url = request.url\n resp.headers = {}\n resp._content = real_resp.read()\n resp.raw = _dummy\n return resp\n except URLError:\n raise FTPError(request.url)\n\n\nclass RetrySession(requests.Session):\n def __init__(self):\n super(RetrySession, self).__init__()\n self._retry_attempts = 0\n self.retry_wait_seconds = 10\n\n # retry_attempts is a property so that it can't go negative\n @property\n def retry_attempts(self):\n return self._retry_attempts\n\n @retry_attempts.setter\n def retry_attempts(self, value):\n self._retry_attempts = max(value, 0)\n\n def accept_response(self, response, **kwargs):\n return response.status_code < 400\n\n def request(self, method, url, retry_on_404=False, **kwargs):\n # the retry loop\n tries = 0\n exception_raised = None\n\n while tries <= self.retry_attempts:\n exception_raised = None\n\n try:\n resp = super(RetrySession, self).request(method, url, **kwargs)\n # break from loop on an accepted response\n if self.accept_response(resp) or (\n resp.status_code == 404 and not retry_on_404\n ):\n break\n\n # note: This is a pretty broad catch-all, but given the plethora of things that can\n # happen during a requests.request it is used to try to be complete &\n # future-proof this as much as possible.\n # Should it become a problem we could either alter to exclude a few others\n # the way we handle SSLError or we could go back to enumeration of all types.\n except Exception as e:\n if isinstance(e, requests.exceptions.SSLError):\n raise\n exception_raised = e\n\n # if we're going to retry, sleep first\n tries += 1\n if tries <= self.retry_attempts:\n # twice as long each time\n wait = self.retry_wait_seconds * (2 ** (tries - 1))\n _log.debug(\"sleeping for %s seconds before retry\" % wait)\n if exception_raised:\n _log.warning(\n \"got %s sleeping for %s seconds before retry\",\n exception_raised,\n wait,\n )\n else:\n _log.warning(\"sleeping for %s seconds before retry\", wait)\n time.sleep(wait)\n\n # out of the loop, either an exception was raised or we had a success\n if exception_raised:\n raise exception_raised\n return resp\n\n\n# compose sessions, order matters (cache then throttle then retry)\nclass Scraper(CachingSession, ThrottledSession, RetrySession):\n \"\"\"\n Scraper is the most important class provided by scrapelib (and generally\n the only one to be instantiated directly). It provides a large number\n of options allowing for customization.\n\n Usage is generally just creating an instance with the desired options and\n then using the :meth:`urlopen` & :meth:`urlretrieve` methods of that\n instance.\n\n :param raise_errors: set to True to raise a :class:`HTTPError`\n on 4xx or 5xx response\n :param requests_per_minute: maximum requests per minute (0 for\n unlimited, defaults to 60)\n :param retry_attempts: number of times to retry if timeout occurs or\n page returns a (non-404) error\n :param retry_wait_seconds: number of seconds to retry after first failure,\n subsequent retries will double this wait\n \"\"\"\n\n def __init__(\n self,\n raise_errors=True,\n requests_per_minute=60,\n retry_attempts=0,\n retry_wait_seconds=5,\n verify=True,\n header_func=None,\n ):\n\n super(Scraper, self).__init__()\n self.mount(\"ftp://\", FTPAdapter())\n\n # added by this class\n self.raise_errors = raise_errors\n\n # added by ThrottledSession\n self.requests_per_minute = requests_per_minute\n\n # added by RetrySession\n self.retry_attempts = retry_attempts\n self.retry_wait_seconds = retry_wait_seconds\n\n # added by this class\n self._header_func = header_func\n\n # added by CachingSession\n self.cache_storage = None\n self.cache_write_only = True\n\n # non-parameter options\n self.timeout = None\n self.user_agent = _user_agent\n self.verify = verify\n\n # statistics structure\n self.reset_stats()\n\n def reset_stats(self):\n self.stats = {}\n self.stats[\"total_requests\"] = 0\n self.stats[\"total_time\"] = 0\n self.stats[\"average_time\"] = None\n\n @property\n def user_agent(self):\n return self.headers[\"User-Agent\"]\n\n @user_agent.setter\n def user_agent(self, value):\n self.headers[\"User-Agent\"] = value\n\n @property\n def disable_compression(self):\n return self.headers[\"Accept-Encoding\"] == \"text/*\"\n\n @disable_compression.setter\n def disable_compression(self, value):\n # disabled: set encoding to text/*\n if value:\n self.headers[\"Accept-Encoding\"] = \"text/*\"\n # enabled: if set to text/* pop, otherwise leave unmodified\n elif self.headers.get(\"Accept-Encoding\") == \"text/*\":\n self.headers[\"Accept-Encoding\"] = \"gzip, deflate, compress\"\n\n def request(self, method, url, **kwargs):\n _log.info(\"{0} - {1}\".format(method.upper(), url))\n\n # apply global timeout\n timeout = kwargs.pop(\"timeout\", self.timeout)\n\n if self._header_func:\n headers = requests.structures.CaseInsensitiveDict(self._header_func(url))\n else:\n headers = {}\n\n kwarg_headers = kwargs.pop(\"headers\", {})\n headers = requests.sessions.merge_setting(\n headers, self.headers, dict_class=requests.structures.CaseInsensitiveDict\n )\n headers = requests.sessions.merge_setting(\n kwarg_headers, headers, dict_class=requests.structures.CaseInsensitiveDict\n )\n\n _start_time = time.time()\n\n resp = super(Scraper, self).request(\n method, url, timeout=timeout, headers=headers, **kwargs\n )\n self.stats[\"total_requests\"] += 1\n self.stats[\"total_time\"] += time.time() - _start_time\n self.stats[\"average_time\"] = (\n self.stats[\"total_time\"] / self.stats[\"total_requests\"]\n )\n\n if self.raise_errors and not self.accept_response(resp):\n raise HTTPError(resp)\n return resp\n\n def urlretrieve(\n self, url, filename=None, method=\"GET\", body=None, dir=None, **kwargs\n ):\n \"\"\"\n Save result of a request to a file, similarly to\n :func:`urllib.urlretrieve`.\n\n If an error is encountered may raise any of the scrapelib\n `exceptions`_.\n\n A filename may be provided or :meth:`urlretrieve` will safely create a\n temporary file. If a directory is provided, a file will be given a random\n name within the specified directory. Either way, it is the responsibility\n of the caller to ensure that the temporary file is deleted when it is no\n longer needed.\n\n :param url: URL for request\n :param filename: optional name for file\n :param method: any valid HTTP method, but generally GET or POST\n :param body: optional body for request, to turn parameters into\n an appropriate string use :func:`urllib.urlencode()`\n :param dir: optional directory to place file in\n :returns filename, response: tuple with filename for saved\n response (will be same as given filename if one was given,\n otherwise will be a temp file in the OS temp directory) and\n a :class:`Response` object that can be used to inspect the\n response headers.\n \"\"\"\n result = self.request(method, url, data=body, **kwargs)\n result.code = result.status_code # backwards compat\n\n if not filename:\n fd, filename = tempfile.mkstemp(dir=dir)\n f = os.fdopen(fd, \"wb\")\n else:\n f = open(filename, \"wb\")\n\n f.write(result.content)\n f.close()\n\n return filename, result\n\n\n_default_scraper = Scraper(requests_per_minute=0)\n\n\ndef urlopen(url, method=\"GET\", body=None, **kwargs): # pragma: no cover\n return _default_scraper.urlopen(url, method, body, **kwargs)\n","sub_path":"scrapelib/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":11891,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"347871416","text":"print(\"How old are you?\", end=' ')\nage = input()\nprint(\"How tall are you?\", end=' ')\nheight = input()\nprint(\"How much do you weight?\", end=' ')\nweight = input()\n\nprint(f\"So you're {age} old, {height} tall and {weight} heavy.\")\n\n#Пример изученного из интернета\n\nname = input(\"Как вас зовут?: \")\nage = int(input(\"Сколько Вам лет?: \"))\nfruit = input(\"Какой фрукт вы хотите купить?: \")\norange = 65.79\nprint(f\"Цена апельсина {orange}\")\ncount = int(input(\"Сколько апельсинов хотите купить?: \"))\nsumma = count * orange\nbuy = input(\"Вы хотите купить? \")\nif buy == \"yes\":\n print(f\"Вас зовут {name}, вам {age} года и Вы купили {count} штук, за {summa}\")\nelif buy == \"no\":\n print(f\"Вас зовут {name}, вам {age} года и Вы не купили {count}\")\nelse:\n print(f\"Вы ввели неправильное слово\")\n\n\n","sub_path":"exercise/ex11.py","file_name":"ex11.py","file_ext":"py","file_size_in_byte":996,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"496189238","text":"from bs4 import BeautifulSoup\nimport requests\nimport lxml\nheaders = {\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.122 Safari/537.36\"\n}\nurl=\"http://www.weather.com.cn/textFC/xn.shtml\"\nresp=requests.get(url,headers=headers)\npageall=resp.content.decode(\"utf-8\")\ndef chuli(page):\n soup=BeautifulSoup(page,\"lxml\")\n #下面的是整页\n trs=soup.find(\"div\",attrs={\"class\":\"conMidtab\"})\n #下面的把整页按照城市拆分\n trs1=trs.find_all(\"div\",attrs={\"class\":\"conMidtab2\"})\n list_2=[]\n for i in trs1:\n trs2=i.find_all(\"tr\")[2:]\n #print(trs2)\n #break\n k=0\n for j in trs2:\n if k==0:\n trs3 = j.stripped_strings\n list1=(list(trs3))\n list_2.append({\"city\":list1[1],\"temp\":list1[9]})\n else:\n trs3 = j.stripped_strings\n list1 = (list(trs3))\n #print(list(trs3))\n list_2.append({\"city\":list1[0],\"temp\":list1[8]})\n k+=1\n #print(\"接着下一个目录了\")\n for l in list_2:\n print(l)\nchuli(pageall)\n","sub_path":"python爬虫/bs4爬取天气网站.py","file_name":"bs4爬取天气网站.py","file_ext":"py","file_size_in_byte":1173,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"508947290","text":"from comet_ml import Experiment\nimport tensorflow_datasets as tfds\nfrom tensorflow.keras import Sequential\nfrom tensorflow.keras.layers import Embedding, Bidirectional, LSTM, Dense, Dropout\nfrom tensorflow.keras.optimizers import Adam\n\n\nBUFFER_SIZE = 10000\nBATCH_SIZE = 64\nNUM_EPOCHS = 10\n\n\nexperiment = Experiment(project_name='tf2-imdb',\n workspace='koichiro-mori',\n auto_param_logging=False)\n\n\ndef load_dataset():\n dataset, info = tfds.load('imdb_reviews/subwords8k', with_info=True, as_supervised=True)\n train_ds, test_ds = dataset['train'], dataset['test']\n\n # padded_batch()はミニバッチで最長の系列に合わせてpaddingする\n train_ds = train_ds.shuffle(BUFFER_SIZE).padded_batch(BATCH_SIZE, padded_shapes=([None], []))\n test_ds = test_ds.padded_batch(BATCH_SIZE, padded_shapes=([None], []))\n\n encoder = info.features['text'].encoder\n\n return train_ds, test_ds, encoder.vocab_size\n\n\ndef main():\n train_ds, test_ds, vocab_size = load_dataset()\n\n model = Sequential([\n Embedding(vocab_size, 64),\n Bidirectional(LSTM(64, return_sequences=True)),\n # Bidirectionalなので出力ユニット数は64\n Bidirectional(LSTM(32)),\n Dense(64, activation='relu'),\n Dropout(0.5),\n Dense(1, activation='sigmoid')\n ])\n\n model.compile(optimizer=Adam(1e-4),\n loss='binary_crossentropy',\n metrics=['accuracy'])\n model.summary()\n\n with experiment.train():\n model.fit(train_ds, epochs=NUM_EPOCHS, validation_data=test_ds)\n\n with experiment.test():\n test_loss, test_acc = model.evaluate(test_ds)\n print('Test Loss: {}'.format(test_loss))\n print('Test Accuracy: {}'.format(test_acc))\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"imdb.py","file_name":"imdb.py","file_ext":"py","file_size_in_byte":1821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"652595889","text":"from __future__ import print_function # Python 2/3 compatibility\nimport boto3\nimport json\nimport decimal\nfrom boto3.dynamodb.conditions import Key, Attr\nimport random\n\n# Helper class to convert a DynamoDB item to JSON.\nclass DecimalEncoder(json.JSONEncoder):\n def default(self, o):\n if isinstance(o, decimal.Decimal):\n if o % 1 > 0:\n return float(o)\n else:\n return int(o)\n return super(DecimalEncoder, self).default(o)\n\ndynamodb = boto3.resource('dynamodb', region_name='us-west-2')\n\n\ntable = dynamodb.Table('Movies')\nrandom.seed()\nfor i in xrange(10000):\n year = random.randint(1920, 2007)\n response = table.query(\n KeyConditionExpression=Key('year').eq(year)\n )\n\n","sub_path":"MoviesQuery01.py","file_name":"MoviesQuery01.py","file_ext":"py","file_size_in_byte":750,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"220140566","text":"import os\nfrom os.path import join, exists\nimport argparse\nimport pathlib\n\nimport click\nimport numpy as np\nimport pandas as pd\nimport scipy.stats\n\nimport download_data\nimport dataframe\nimport plotter\nfrom plotter import transform_acc, inv_transform_acc\nfrom model_types import ModelTypes, model_types_map\n\n\ndef get_model_type(df_row):\n return model_types_map[df_row.name]\n\n\ndef show_in_plot1(df_row):\n model_name, model_type = df_row.name.lower(), df_row.model_type\n return 'subsample' not in model_name and df_row.val >= 55\n\ndef show_in_plot2(df_row):\n model_name, model_type = df_row.name.lower(), df_row.model_type\n return 'subsample' not in model_name and model_type != ModelTypes.STANDARD # and df_row.val >= 55\n\n\ndef use_for_line_fit(df_row):\n model_name, model_type, in_plot = df_row.name.lower(), df_row.model_type, df_row.show_in_plot\n return 'aws' not in model_name and 'batch64' not in model_name and model_type is ModelTypes.STANDARD and in_plot\n\n\ndef format_eff_robust(df, x_axis, y_axis, y_axis_fit, transform):\n df_line = df[df.use_for_line_fit == True]\n x_acc_line_trans = transform_acc(df_line[x_axis], transform)\n y_acc_line_trans = transform_acc(df_line[y_axis_fit], transform)\n\n lin_fit = scipy.stats.linregress(x_acc_line_trans, y_acc_line_trans)\n intercept, slope = lin_fit[1], lin_fit[0]\n lin_fit_ys_trans = transform_acc(df[x_axis], transform) * slope + intercept\n lin_fit_ys = inv_transform_acc(lin_fit_ys_trans, transform)\n\n df['eff_robust_y'] = df[y_axis_fit] - lin_fit_ys\n\n df_line = df[df.use_for_line_fit == True]\n x_acc_line_trans = transform_acc(df_line[x_axis], transform)\n y_acc_line_trans = transform_acc(df_line[y_axis], transform)\n\n lin_fit = scipy.stats.linregress(x_acc_line_trans, y_acc_line_trans)\n intercept, slope = lin_fit[1], lin_fit[0]\n lin_fit_ys_trans = transform_acc(df[x_axis], transform) * slope + intercept\n lin_fit_ys = inv_transform_acc(lin_fit_ys_trans, transform)\n\n df['eff_robust_x'] = df[y_axis] - lin_fit_ys\n return df\n\n\n@click.command()\n@click.option('--x_axis', type=str, default='val')\n@click.option('--y_axis', type=str, default='avg_corruptions')\n@click.option('--y_axis_fit', type=str, default='imagenetv2-matched-frequency-format-val')\n@click.option('--transform', type=str, default='logit')\n@click.option('--num_bootstrap_samples', type=int, default=1000) \n@click.option('--output_dir', type=str, default=str((pathlib.Path(__file__).parent / '../outputs').resolve()))\n@click.option('--output_file_dir', type=str, default=str((pathlib.Path(__file__).parent / '../paper/figs').resolve()))\n@click.option('--skip_download', is_flag=True, type=bool)\ndef generate_xy_plot(x_axis, y_axis, y_axis_fit, transform, num_bootstrap_samples, output_dir, output_file_dir, skip_download):\n\n if skip_download:\n filename = join(output_dir, 'grid_df.pkl')\n if not exists(filename):\n raise Exception(f'Downloaded data not found at {filename}. Please run python src/plotting/download_data.py first')\n df = pd.read_pickle(filename)\n else:\n df = download_data.download_plotting_data(output_dir, store_data=True, verbose=True)\n\n df, df_metadata = dataframe.extract_metadata(df)\n df, df_metadata = dataframe.replace_10percent_with_metadata(df, df_metadata)\n df, df_metadata = dataframe.aggregate_corruptions_with_metadata(df, df_metadata)\n\n df = prepare_df_for_plotting(df, df_metadata, [x_axis, y_axis, y_axis_fit])\n df = plotter.add_plotting_data(df, [x_axis, y_axis, y_axis_fit])\n\n df = format_eff_robust(df, x_axis, y_axis, y_axis_fit, transform)\n\n # auto set xlim and ylim based on visible points\n df_visible = df[df.show_in_plot == True]\n xlim = [df_visible[x_axis].min() - 1, df_visible[x_axis].max() + 0.5]\n ylim = [df_visible[y_axis].min() - 1, df_visible[y_axis].values.max() + 1]\n\n os.makedirs(output_file_dir, exist_ok=True)\n\n fig, _, legend = plotter.model_scatter_plot(df, x_axis, y_axis, xlim, ylim, ModelTypes, \n transform=transform, tick_multiplier=5, num_bootstrap_samples=num_bootstrap_samples,\n title='Distribution Shift to Corruptions Averaged', x_label='ImageNet', y_label='Corruptions Averaged', \n figsize=(12, 8), include_legend=False, return_separate_legend=True)\n legend.savefig(join(output_file_dir, f'syn_shift_legend.pdf'), dpi='figure', bbox_inches='tight', pad_inches=0.1)\n print(f\"Legend saved to {join(output_file_dir, f'syn_shift_legend.pdf')}\")\n fig.savefig(join(output_file_dir, f'syn_shift_corruptions.pdf'), dpi='figure', bbox_inches='tight', pad_inches=0.1)\n print(f\"Plot saved to {join(output_file_dir, f'syn_shift_corruptions.pdf')}\")\n\n\n df.show_in_plot = df.apply(show_in_plot2, axis=1)\n\n # auto set xlim and ylim based on visible points\n df_visible = df[df.show_in_plot == True]\n xlim = [df_visible['eff_robust_x'].min() - 1, df_visible['eff_robust_x'].max() + 1]\n ylim = [df_visible['eff_robust_y'].min() - 0.5, df_visible['eff_robust_y'].values.max() + 0.5]\n\n fig, _ = plotter.simple_scatter_plot(df, 'eff_robust_x', 'eff_robust_y', xlim, ylim, ModelTypes, \n title='Effective Robustness Scatterplot', \n x_tick_multiplier=5, y_tick_multiplier=1,\n x_label='Corruptions Averaged Effective Robustness', y_label='ImageNetV2 Effective Robustness', \n figsize=(12, 8), include_legend=False, return_separate_legend=False)\n\n fig.savefig(join(output_file_dir, f'eff_robust_corruptions.pdf'), dpi='figure', bbox_inches='tight', pad_inches=0.1)\n print(f\"Plot saved to {join(output_file_dir, f'eff_robust_corruptions.pdf')}\")\n\n\ndef prepare_df_for_plotting(df, df_metadata, columns):\n assert set(columns).issubset(set(df.columns))\n\n df = df[columns]\n df_metadata = df_metadata[[x+'_dataset_size' for x in columns]]\n df = df.merge(df_metadata, right_index=True, left_index=True)\n df = df.dropna()\n\n df['model_type'] = df.apply(get_model_type, axis=1)\n df['show_in_plot'] = df.apply(show_in_plot1, axis=1)\n df['use_for_line_fit'] = df.apply(use_for_line_fit, axis=1)\n\n return df\n\n\nif __name__ == '__main__':\n generate_xy_plot()\n","sub_path":"plotting/paper_eff_robust_corruptions.py","file_name":"paper_eff_robust_corruptions.py","file_ext":"py","file_size_in_byte":6393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"210624199","text":"import json\r\nimport sys\r\n\r\nfrom pathlib import Path\r\nfrom pprint import pprint\r\n\r\nROOT = Path(__file__).absolute().parent.parent\r\nsys.path.insert(0, str(ROOT / \"api\"))\r\nsys.path.insert(0, str(ROOT))\r\n\r\nimport Functions\r\nfrom metadata import generate_metadata\r\n\r\nexitcode = 0\r\n\r\nfor n in dir(Functions):\r\n f = getattr(Functions, n)\r\n if not callable(f):\r\n continue\r\n print(\"-\" * 80)\r\n print(\"Checking\", n)\r\n try:\r\n md = generate_metadata(n, f, tidy=False)\r\n except Exception as ex:\r\n print(\"Failed to calculate metadata\", ex)\r\n exitcode += 1\r\n continue\r\n try:\r\n md = generate_metadata(n, f, tidy=True)\r\n except Exception as ex:\r\n print(\"Failed to calculate tidied metadata\", ex)\r\n exitcode += 1\r\n continue\r\n print(\"Metadata:\")\r\n pprint(md)\r\n try:\r\n json.dumps(md)\r\n except Exception as ex:\r\n print(\"Failed to dump metadata\", ex)\r\n exitcode += 1\r\n continue\r\n\r\nprint(\"=\" * 80)\r\nprint(\"All checks completed with {} failure(s)\".format(exitcode))\r\nsys.exit(exitcode)\r\n","sub_path":".scripts/check.py","file_name":"check.py","file_ext":"py","file_size_in_byte":1089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"260238302","text":"from django.conf.urls import url\nfrom sign.views import views\nfrom sign.views import viewsforapi\n\nurlpatterns = [\n # path('admin/', admin.site.urls),\n url(r'^index/$', views.index),\n url(r'^$', views.index),\n url(r'^login_action/$', views.login_action),\n url(r'^event_manage/$', views.event_manage),\n url(r'^search_name/$', views.search_name),\n url(r'^guest_manage', views.guest_manage),\n url(r'^sign_index/(?P[0-9]+)/$', views.sign_index),\n url(r'^sign_index_action/(?P[0-9]+)/$', views.sign_index_action),\n url(r'^logout/$', views.login_action),\n url(r'^add_event/$',viewsforapi.add_event),\n url(r'^get_evebt_list$',viewsforapi.get_evebt_list),\n url(r'^add_guest$',viewsforapi.add_guest),\n]\n","sub_path":"sign/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"149676678","text":"import ICAOref\r\nimport cpr_global_decoder_fid\r\nimport cpr_local_decoder\r\nimport math\r\nimport numpy as np\r\nimport callsign\r\n\r\n\r\n# Options\r\nIDOn = 0\r\nGroundOn = 1\r\nAirborneOn = 1\r\nSpeedOn = 1\r\nGNSSOn = 1\r\nTC29On = 1\r\n\r\n\r\ndef decode(epoch, msg, nextline, fid, hexx_name, pos_buffer, w): # tc's mean different Horizontal Protection Limit (HPL)\r\n length = len(msg) * 4\r\n binmsg = bin(int(msg, 16))[2:]\r\n while (len(binmsg)) < length:\r\n binmsg = '0' + binmsg\r\n icao0 = msg[2:8] # hexx for AA\r\n capability = int(binmsg[5:8], 2)\r\n mb = binmsg[32:88]\r\n tc = int(binmsg[32:37], 2)\r\n\r\n # if icao0 not in ['342189']:\r\n # return\r\n\r\n if tc in range(1, 5): # ID and category\r\n # print ' DF17 tc 1-4, ID & CAT'\r\n call = callsign.get_callsign(msg)\r\n if len(call) < 8:\r\n w.run(epoch, icao0, call, 'call')\r\n if IDOn:\r\n ICAOref.get_icaoref(epoch, msg, icao0)\r\n # print ''\r\n\r\n elif tc in range(5, 9) and GroundOn: # ground position, BDS 0,6\r\n # print ' DF17 tc 4-8, Ground position'\r\n gr_air = 0 # 0 for ground\r\n # print 'ICAO0', icao0\r\n movement = int(mb[5:12], 2) # speed\r\n ground_track_status = mb[12:13] # 0 invalid, 1 valid\r\n ground_track_value = int(mb[13:20], 2) # direction in deg, 0 for true north\r\n time_sync = mb[20:21] # 0 for not UTC sync\r\n cpr = int(mb[21:22], 2) # 0 for even, 1 for odd\r\n cprlat = int(mb[22:39], 2) # 17 bits\r\n cprlon = int(mb[39:56], 2) # 17 bits\r\n\r\n if nextline > 2:\r\n Rlat, Rlon, time_diff, worked = cpr_global_decoder_fid.decode(epoch, nextline, gr_air, icao0, cpr, cprlat, cprlon, fid)\r\n\r\n if worked:\r\n if 36 < Rlat < 44 and -10 < Rlon < 3: # Spain limits\r\n w.run(epoch, icao0, [Rlat, Rlon, 'GND'], ['lat', 'lon', 'FL'])\r\n\r\n Rlat, Rlon, time_diff, worked = cpr_local_decoder.decode(epoch, nextline, gr_air, icao0, cpr, cprlat, cprlon, pos_buffer)\r\n if worked:\r\n if 36 < Rlat < 44 and -10 < Rlon < 3:\r\n w.run(epoch, icao0, [Rlat, Rlon, 'GND'], ['lat', 'lon', 'FL'])\r\n\r\n elif tc in range(9, 19) and AirborneOn: # airborne position DBS 0,5\r\n gr_air = 1 # 1 for airorne\r\n surveillance_status = int(mb[5:7], 2) # 0 no condition, 1 permanent alert, 2 temp alert, 3 SPI condition\r\n saf = mb[7:8] # SINGLE ANTENNA FLAG, 0 dual transit, 1 single transit\r\n qbit = mb[15:16] # 1 for 25 feet increment\r\n n = int((mb[8:15]+mb[16:20]), 2)\r\n if qbit == '1':\r\n FL = n*25-1000 # ft\r\n else: # not sure if this equation is right\r\n FL = n*100-1000\r\n\r\n time_sync = mb[20:21] # 0 for not UTC sync\r\n cpr = int(mb[21:22], 2) # 0 for even, 1 for odd\r\n cprlat = int(mb[22:39], 2) # 17 bits\r\n cprlon = int(mb[39:56], 2) # 17 bits\r\n if nextline > 2:\r\n Rlat, Rlon, time_diff, global_worked = cpr_global_decoder_fid.decode(epoch, nextline, gr_air, icao0, cpr, cprlat, cprlon, fid)\r\n if global_worked:\r\n if 36 < Rlat < 44 and -10 < Rlon < 3 and -999 < FL < 90000:\r\n pos_buffer.new(icao0, epoch, Rlat, Rlon, FL)\r\n w.run(epoch, icao0, [Rlat, Rlon, FL], ['lat', 'lon', 'FL'])\r\n\r\n if not global_worked:\r\n Rlat, Rlon, time_diff, worked = cpr_local_decoder.decode(epoch, nextline, gr_air, icao0, cpr, cprlat, cprlon, pos_buffer)\r\n if worked:\r\n if 36 < Rlat < 44 and -10 < Rlon < 3 and -999 < FL < 90000:\r\n pos_buffer.new(icao0, epoch, Rlat, Rlon, FL)\r\n w.run(epoch, icao0, [Rlat, Rlon, FL], ['lat', 'lon', 'FL'])\r\n\r\n\r\n elif tc == 19 and SpeedOn: # BDS 0,9\r\n # print ' DF17 tc 19, Speed'\r\n\r\n subtype = int(mb[5:8], 2)\r\n intent_change_flag = mb[8:9]\r\n ifr_cap = mb[9:10]\r\n nav_uncertainty = int(mb[10:13], 2)\r\n v_rate_source = mb[35:36]\r\n v_rate_sign = mb[36:37]\r\n v_rate = int(mb[37:46], 2)\r\n gnss_sign = mb[48:49]\r\n gnss_diff = int(mb[49:56], 2)\r\n\r\n if subtype in [1, 2]: # vel over ground, 2 for supersonic\r\n ew_direction = mb[13:14] # 0 east, 1 west\r\n ew_vel = int(mb[14:24], 2)\r\n ns_direction = mb[24:25] # 0 north, 1 south\r\n ns_vel = int(mb[25:35], 2)\r\n\r\n if ew_vel not in [0, 1023]:\r\n if ew_direction == '0':\r\n ew_vel = float(ew_vel-1)\r\n else:\r\n ew_vel = float(-(ew_vel-1)) # westward (negative)\r\n else:\r\n ew_vel = None # either not available or greater than 1021.5 kt\r\n if ns_vel not in [0, 1023]:\r\n if ns_direction == '0':\r\n ns_vel = float(ns_vel-1)\r\n else:\r\n ns_vel = float(-(ns_vel-1)) # southward (neg)\r\n else:\r\n ns_vel = None # either not available or greater than 1021.5 kt\r\n\r\n if ew_vel is not None and ns_vel is not None:\r\n speed = math.sqrt(ew_vel**2+ns_vel**2)\r\n if ew_vel == 0:\r\n track_angle = 0\r\n else:\r\n # angle = math.atan(ns_vel/ew_vel)\r\n vec = [ew_vel, ns_vel]\r\n track_angle = math.acos(np.dot(vec, [0, 1])/(np.sqrt(np.dot(vec, vec))*np.sqrt(np.dot([0, 1], [0, 1]))))\r\n if ew_direction == '1':\r\n track_angle = 2*np.pi - track_angle\r\n # angle = (angle + np.pi) % (2 * np.pi) - np.pi\r\n track_angle = track_angle*180.0/np.pi\r\n w.run(epoch, icao0, [track_angle, speed], ['ttrack', 'gs'])\r\n\r\n if v_rate not in [0, 511]:\r\n if v_rate_sign == '0': # UP\r\n v_rate = int(64*(v_rate-1)) # ft/min\r\n else:\r\n v_rate = int(-64*(v_rate-1))\r\n w.run(epoch, icao0, v_rate, 'vrate')\r\n\r\n elif subtype in [3, 4]: # airspeed and heading\r\n mh_status = mb[13:14]\r\n if mh_status == '1':\r\n mag_heading = int(mb[14:24], 2)*0.3515625\r\n else:\r\n mag_heading = None\r\n air_type = mb[24:25] # 0 IAS, 1 TAS\r\n airspeed = int(mb[25:35], 2)\r\n\r\n if airspeed not in [0, 1023] and air_type == '1':\r\n air = airspeed - 1 # kts\r\n ias_tas = 'tas'\r\n elif airspeed not in [0, 1023] and air_type == '0':\r\n air = airspeed - 1\r\n ias_tas = 'ias'\r\n else:\r\n air = None\r\n ias_tas = None\r\n\r\n if air is not None:\r\n w.run(epoch, icao0, air, ias_tas)\r\n if mag_heading is not None:\r\n w.run(epoch, icao0, mag_heading, 'mhead')\r\n\r\n # velocity = calc_velocity.get_vel(msg, time)\r\n # print ''\r\n\r\n # elif tc in range(20, 23) and GNSSOn:\r\n # print ' DF17 tc 19, GNSS position'\r\n # print ''\r\n\r\n elif tc == 28: # BDS 6,1\r\n subtype = mb[5:8]\r\n if subtype == '1':\r\n emergency_state = mb[8:11]\r\n elif subtype == '2':\r\n ara = int(mb[8:22], 2)\r\n rac = int(mb[22:26], 2)\r\n rat = int(mb[26:27], 2)\r\n mte = int(mb[27:28], 2)\r\n tti = int(mb[28:30], 2)\r\n\r\n # elif tc == 29 and TC29On:\r\n # print ' DF17 tc 29, Reserved?'\r\n # n29 = n29 + 1\r\n # targetstate = target_state.targetstate(msg)\r\n\r\n # else:\r\n # print ' DF17: unknown tc'\r\n\r\n return\r\n\r\n","sub_path":"df17_decoder_fid.py","file_name":"df17_decoder_fid.py","file_ext":"py","file_size_in_byte":7859,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"440071017","text":"# --------------------------------------------------------\n# Tensorflow Faster R-CNN\n# Licensed under The MIT License [see LICENSE for details]\n# Written by Xinlei Chen\n# --------------------------------------------------------\nfrom collections import OrderedDict\n\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\n\n\nfrom nets.network import Network\nfrom model.config import cfg\nfrom nets.utils import Lambda, CustomSpatialCrossMapLRN\n\n\nclass TorchAlexNetWithGrouping(nn.Module):\n \"\"\"\n This is the BVLC CaffeNet model with optional batch normalization\n \"\"\"\n\n def __init__(self, num_classes=21, add_bn=False):\n super(TorchAlexNetWithGrouping, self).__init__()\n\n if add_bn:\n self.features = nn.Sequential(OrderedDict([\n ('conv1', nn.Conv2d(3, 96, (11, 11), (4, 4))),\n ('batchnorm1', nn.BatchNorm2d(96)),\n ('relu1', nn.ReLU(inplace=True)),\n ('pool1', nn.MaxPool2d((3, 3), (2, 2), (0, 0),\n ceil_mode=True)),\n ('conv2', nn.Conv2d(96, 256, (5, 5), (1, 1), (2, 2), 1, 2)),\n ('batchnorm2', nn.BatchNorm2d(256)),\n ('relu2', nn.ReLU(inplace=True)),\n ('pool2', nn.MaxPool2d((3, 3), (2, 2), (0, 0),\n ceil_mode=True)),\n ('conv3', nn.Conv2d(256, 384, (3, 3), (1, 1), (1, 1))),\n ('batchnorm3', nn.BatchNorm2d(384)),\n ('relu3', nn.ReLU(inplace=True)),\n ('conv4', nn.Conv2d(384, 384, (3, 3), (1, 1), (1, 1), 1, 2)),\n ('batchnorm4', nn.BatchNorm2d(384)),\n ('relu4', nn.ReLU(inplace=True)),\n ('conv5', nn.Conv2d(384, 256, (3, 3), (1, 1), (1, 1), 1, 2)),\n ('batchnorm5', nn.BatchNorm2d(256)),\n ('relu5', nn.ReLU(inplace=True)),\n ('pool5', nn.MaxPool2d((3, 3), (2, 2), (0, 0),\n ceil_mode=True)),\n ]))\n\n self.classifier = nn.Sequential(OrderedDict([\n ('drop5', nn.Dropout()),\n ('fc6', nn.Linear(256 * 6 * 6, 4096)),\n ('batchnorm6', nn.BatchNorm1d(4096)),\n ('relu6', nn.ReLU(inplace=True)),\n ('drop6', nn.Dropout()),\n ('fc7', nn.Linear(4096, 4096)),\n ('batchnorm7', nn.BatchNorm1d(4096)),\n ('relu7', nn.ReLU(inplace=True)),\n ('fc8', nn.Linear(4096, num_classes)),\n ]))\n\n else:\n self.features = nn.Sequential(OrderedDict([\n ('conv1', nn.Conv2d(3, 96, (11, 11), (4, 4))),\n ('relu1', nn.ReLU(inplace=True)),\n ('lrn1', Lambda(lambda x, lrn=CustomSpatialCrossMapLRN(\n *(5, 0.0001, 0.75, 1)): Variable(lrn.forward(x.data)))),\n ('pool1', nn.MaxPool2d((3, 3), (2, 2), (0, 0),\n ceil_mode=True)),\n ('conv2', nn.Conv2d(96, 256, (5, 5), (1, 1), (2, 2), 1, 2)),\n ('relu2', nn.ReLU(inplace=True)),\n ('lrn2', Lambda(lambda x, lrn=CustomSpatialCrossMapLRN(\n *(5, 0.0001, 0.75, 1)): Variable(lrn.forward(x.data)))),\n ('pool2', nn.MaxPool2d((3, 3), (2, 2), (0, 0),\n ceil_mode=True)),\n ('conv3', nn.Conv2d(256, 384, (3, 3), (1, 1), (1, 1))),\n ('relu3', nn.ReLU(inplace=True)),\n ('conv4', nn.Conv2d(384, 384, (3, 3), (1, 1), (1, 1), 1, 2)),\n ('relu4', nn.ReLU(inplace=True)),\n ('conv5', nn.Conv2d(384, 256, (3, 3), (1, 1), (1, 1), 1, 2)),\n ('relu5', nn.ReLU(inplace=True)),\n ('pool5', nn.MaxPool2d((3, 3), (2, 2), (0, 0),\n ceil_mode=True)),\n ]))\n\n self.classifier = nn.Sequential(OrderedDict([\n ('drop5', nn.Dropout()),\n ('fc6', nn.Linear(256 * 6 * 6, 4096)),\n ('relu6', nn.ReLU(inplace=True)),\n ('drop6', nn.Dropout()),\n ('fc7', nn.Linear(4096, 4096)),\n ('relu7', nn.ReLU(inplace=True)),\n ('fc8', nn.Linear(4096, num_classes)),\n ]))\n\n def load(self, checkpoint_file):\n current_state_dict = self.state_dict()\n old_state = torch.load(checkpoint_file)\n new_state = {}\n for idx, i in enumerate([0, 4, 8, 10, 12]):\n for name in ['weight', 'bias']:\n w = np.array(old_state[f'{i}.{name}'])\n new_state[f'features.conv{idx+1}.{name}'] = \\\n torch.from_numpy(w).cuda()\n\n current_state_dict.update(new_state)\n self.load_state_dict(current_state_dict)\n\n def forward(self, x):\n x = self.features(x)\n x = x.view(x.size(0), 256 * 6 * 6)\n x = self.classifier(x)\n return x\n\n\nclass groupalexnet(Network):\n def __init__(self):\n Network.__init__(self)\n self._feat_stride = [16, ]\n self._feat_compress = [1. / float(self._feat_stride[0]), ]\n self._net_conv_channels = 256\n self._fc7_channels = 4096\n\n def _init_head_tail(self):\n self.alexnet = TorchAlexNetWithGrouping(\n add_bn=cfg.GROUPALEXNET.ADD_BN)\n # Remove fc8\n self.alexnet.classifier = nn.Sequential(\n *list(self.alexnet.classifier._modules.values())[:-1])\n\n # # Fix the layers before conv3:\n # for layer in range(6):\n # for p in self.alexnet.features[layer].parameters():\n # p.requires_grad = False\n\n # not using the last maxpool layer\n self._layers['head'] = nn.Sequential(\n *list(self.alexnet.features._modules.values())[:-1])\n\n def _image_to_head(self):\n net_conv = self._layers['head'](self._image)\n self._act_summaries['conv'] = net_conv\n\n return net_conv\n\n def _head_to_tail(self, pool5):\n pool5_flat = pool5.view(pool5.size(0), -1)\n fc7 = self.alexnet.classifier(pool5_flat)\n\n return fc7\n\n def load_pretrained_cnn(self, state_dict):\n if 'state_dict' in state_dict:\n state_dict = state_dict['state_dict']\n\n new_state = {}\n for idx, i in enumerate([0, 4, 8, 10, 12]):\n for name in ['weight', 'bias']:\n w = np.array(state_dict[f'{i}.{name}'])\n new_state[f'features.conv{idx+1}.{name}'] = \\\n torch.from_numpy(w).cuda()\n if cfg.GROUPALEXNET.LOAD_FC:\n idx2newidx = {0: 1, 1: 4}\n if cfg.GROUPALEXNET.ADD_BN:\n idx2newidx[1] = 5\n for idx, i in enumerate([16, 19]):\n for name in ['weight', 'bias']:\n w = np.array(state_dict[f'{i}.1.{name}'])\n new_state[f'classifier.{idx2newidx[idx]}.{name}'] = \\\n torch.from_numpy(w).cuda()\n\n current_state_dict = self.alexnet.state_dict()\n current_state_dict.update(new_state)\n self.alexnet.load_state_dict(current_state_dict)\n","sub_path":"lib/nets/groupalexnet.py","file_name":"groupalexnet.py","file_ext":"py","file_size_in_byte":7229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"77735107","text":"import ctypes# do not delete this comment!\nimport os\nimport numpy as np\nimport numpy.ctypeslib as npct\nimport sys\n\ndef getSysId():\n\n # determine library name\n if sys.platform.startswith('win'):\n if sys.maxsize > 2**32:\n libname = 'usysidw64.dll'\n else:\n libname = 'usysidw32.dll'\n elif sys.platform == \"darwin\":\n libname = 'libusysidm64.so'\n elif sys.platform.startswith('linux'):\n if sys.maxsize > 2**32:\n libname = 'libusysidl64.so'\n else:\n libname = 'libusysidl32.so'\n else:\n raise Exception(\"Unknown platform\")\n\t\n\n # function to call\n try:\n _lib = ctypes.CDLL(os.path.join(os.path.dirname(os.path.abspath(__file__)),'lib',libname))\n cfunc = getattr(_lib,'__FORCESsolver___computesystemuniqueid')\n except:\n _lib = ctypes.CDLL(os.path.join(os.path.dirname(os.path.abspath(__file__)),libname))\n cfunc = getattr(_lib,'__FORCESsolver___computesystemuniqueid')\n\n # determine data types for solver function prototype \n intarrayType = ctypes.c_ushort * 5\n cfunc.restype = ctypes.POINTER(intarrayType)\n\n UID = intarrayType()\n P_UID = ctypes.pointer(UID)\n\t\n P_UID = _lib.__FORCESsolver___computesystemuniqueid()\n\t\n sysid = npct.as_array(P_UID.contents)\n return sysid\n \n","sub_path":"parameter-learning_nd_disc/FORCES_client/usysid.py","file_name":"usysid.py","file_ext":"py","file_size_in_byte":1328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"474148824","text":"# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-\n# vi: set ft=python sts=4 ts=4 sw=4 et:\n### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the PyMVPA package for the\n# copyright and license terms.\n#\n### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Unit tests for PyMVPA ZScore mapper\"\"\"\n\n\nimport unittest\n\nfrom mvpa.base import externals\nexternals.exists('scipy', raiseException=True)\n\nfrom mvpa.support.copy import deepcopy\nimport numpy as N\n\nfrom mvpa.datasets import Dataset\nfrom mvpa.mappers.zscore import ZScoreMapper\nfrom mvpa.datasets.miscfx import zscore\n\nfrom tests_warehouse import datasets\n\nclass ZScoreMapperTests(unittest.TestCase):\n \"\"\"Test simple ZScoreMapper\n \"\"\"\n\n def setUp(self):\n \"\"\"Setup sample datasets\n \"\"\"\n # data: 40 sample feature line in 20d space (40x20; samples x features)\n self.dss = [\n Dataset(samples=N.concatenate(\n [N.arange(40) for i in range(20)]).reshape(20,-1).T,\n labels=1, chunks=1),\n ] + datasets.values()\n\n\n def testCompareToZscore(self):\n \"\"\"Test by comparing to results of elderly z-score function\n \"\"\"\n for ds in self.dss:\n ds1 = deepcopy(ds)\n ds2 = deepcopy(ds)\n\n zsm = ZScoreMapper()\n zsm.train(ds1)\n ds1z = zsm.forward(ds1.samples)\n\n zscore(ds2, perchunk=False)\n self.failUnless(N.linalg.norm(ds1z - ds2.samples) < 1e-12)\n self.failUnless((ds1.samples == ds.samples).all(),\n msg=\"It seems we modified original dataset!\")\n\n ds0 = zsm.reverse(ds1z)\n self.failUnless(N.linalg.norm(ds0 - ds.samples) < 1e-12,\n msg=\"Can't reconstruct from z-scores\")\n\n\ndef suite():\n return unittest.makeSuite(ZScoreMapperTests)\n\n\nif __name__ == '__main__':\n import runner\n\n","sub_path":"mvpa/tests/test_zscoremapper.py","file_name":"test_zscoremapper.py","file_ext":"py","file_size_in_byte":2022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"550032244","text":"# Author: mdavis2@scu.edu\n# Date: 2020-04-20\n# Context: SCU OMIS 30 Spring 2020 - LAB-IfNameMain\n# Collaborators: None\n\n# Weights of each of the individual grades\nparticipation_weight = 0.10\nchallenge_activities_weight = 0.10\nzylabs_weight = 0.15\nhomeworks_weight = 0.10\npre_read_quizzes_weight = 0.05\nmidterm_weight = 0.20\nfinal_project_weight = 0.30\n\n\n# Create the grade dictionary to pass to the other program\nmy_grade_dict = dict()\n\nmy_grade_dict['student'] = 'mdavis2@scu.edu'\n\nmy_grade_dict['grade_components'] = dict()\nmy_grade_dict['grade_components']['participation'] = 80\nmy_grade_dict['grade_components']['challenge_activities'] = 85\nmy_grade_dict['grade_components']['zylabs'] = 90\nmy_grade_dict['grade_components']['homeworks'] = 95\nmy_grade_dict['grade_components']['pre_read_quizzes'] = 75\nmy_grade_dict['grade_components']['midterm'] = 97\nmy_grade_dict['grade_components']['final_project'] = 100\n\nmy_grade_dict['calculated_course_grade'] = ( (my_grade_dict['grade_components']['participation'] * participation_weight) + (my_grade_dict['grade_components']['challenge_activities'] * challenge_activities_weight) + (my_grade_dict['grade_components']['zylabs'] * zylabs_weight) + (my_grade_dict['grade_components']['homeworks'] * homeworks_weight) + (my_grade_dict['grade_components']['pre_read_quizzes'] * pre_read_quizzes_weight) + (my_grade_dict['grade_components']['midterm'] * midterm_weight) + (my_grade_dict['grade_components']['final_project'] * final_project_weight) )\n\n'''\nThis is an example of the docstring-style multi-line comment.\n'''\n\n'''\nBefore adding any code - run this file, see what happens. What do you think should happen?\nThen, create a new python file in the same folder. Call it \"LAB_IfNameMain_Testing.py\".\nIn that new file: paste this code: \n \nfrom LAB_IfNameMain import my_grade_dict as student_1_grade_dict\nprint(student_1_grade_dict)\n\nThen change the grades, and re-run your new file \"LAB-IfNameMain_Testing.py\". See that your final grade changes.\nNow, delete the \"print(student_1_grade_dict)\" line. See that nothing prints now!\n\nThis is how someone who would be importing your code would work.\nFor example: I'm going to download all 30 submissions. I'm going to run\n\nfrom student1 import my_grade_dict as student_1_grade_dict\nfrom student2 import my_grade_dict as student_2_grade_dict\n...\n\nThen, in order to calculate the class average, I'll run: (student_1_grade_dict['calculated_course_grade'] + student_2_grade_dict['calculated_course_grade'] + ... ) / 30\n\n'''\n\n# Below, add your code for the lab.","sub_path":"assignments/LAB_IfNameMain.py","file_name":"LAB_IfNameMain.py","file_ext":"py","file_size_in_byte":2551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"205975158","text":"import warnings\nwarnings.filterwarnings('ignore')\n\nimport argparse\nimport glob\nimport os\nimport json\nimport time\nimport logging\nimport random\nimport re\nfrom itertools import chain\nfrom string import punctuation\nfrom torch import nn\nimport torch.nn.functional as F #jz\n\nimport nltk\nnltk.download('punkt')\nfrom nltk.tokenize import sent_tokenize\n\nimport pandas as pd\nimport numpy as np\nimport torch\nfrom torch import optim\nfrom torch.utils.data import Dataset, DataLoader\nimport pytorch_lightning as pl\nfrom tqdm import tqdm\n# from tqdm.notebook import tqdm_notebook as tqdm\n\nimport pytorch_lightning as pl\nfrom pytorch_lightning.callbacks import ModelCheckpoint\nfrom pytorch_lightning.callbacks.early_stopping import EarlyStopping\n\nfrom transformers import AdamW, get_linear_schedule_with_warmup, Adafactor\n\nfrom table_bert import TableBertModel\nfrom table_bert import Table, Column\n\nfrom torch.optim.lr_scheduler import ReduceLROnPlateau\n\n# fine-tune\nflag = 2\nmethod = 'tanh'\nlearningrate = 2.5e-4\nnum_epoch= 1000\nmax_len = 15 \n\ndef set_seed(seed):\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n if torch.cuda.is_available():\n torch.cuda.manual_seed_all(seed)\n\n\nclass WikiDataset(Dataset):\n def __init__(self, path, model):\n self.path = path\n self.model = model\n\n self.data = pd.read_json(self.path)\n #self.data = pd.read_json(open(self.path, \"r\", encoding=\"utf8\"))\n\n for i in range(len(self.data)):\n if self.data['sql_query'][i].get('agg_index') !=0:\n self.data = self.data.drop([i])\n\n self.data = self.data.reset_index(drop=True)\n\n self.data['title'] = self.data['title'].fillna('unknown')\n \n self.data['header_label'] = self.data['header'].apply(lambda x: [0]*len(x)) #jz\n self.data['select_column'] = self.data['sql'].apply(lambda x: x['sel']) #jz\n self.data['length'] = self.data['header'].apply(lambda x: len(x)) #jz\n \n for i in range(len(self.data)): \n self.data['header_label'][i][self.data['select_column'][i]] = 1 #jz\n \n # pad header \n for i in range(len(self.data)):\n self.data['header'][i]=self.data['header'][i] + (max_len-len(self.data['header'][i]))*[['pad','text','zzz']]\n\n #jz: drop length > max_len\n self.data = self.data[self.data['length'] <= max_len] \n self.data = self.data.reset_index(drop=True)\n \n #jz: padding the header_label with 2, and padding the table with empty string\n self.data['header_label'] = self.data['header_label'].apply(lambda x: x+[2]*(max_len-len(x)))\n \n for i in range(len(self.data)):\n rows = self.data['rows'][i]\n length = self.data['length'][i]\n \n for j in range(len(rows)):\n rows[j] = rows[j] + ['zzz']*(max_len-length)\n self.data['rows'][i] = rows\n #print(self.data.shape)\n\n self.tabs = []\n self.context = []\n #self.answers = []\n self.label = [] #jz\n\n self._build()\n\n def __len__(self):\n return len(self.context)\n\n def __getitem__(self, index):\n tabi = self.tabs[index]\n conti = self.context[index]\n #ansi = self.answers[index]\n la = self.label[index] #jz\n #return {\"table\": tabi, \"context\": conti, \"answer\": ansi, \"label\": la} #jz\n return {\"table\": tabi, \"context\": conti, \"label\": la}\n\n def _build(self):\n for idx in tqdm(range(len(self.data))):\n qs = self.data.loc[idx, 'context']\n #ans = self.data.loc[idx, 'answer']\n heads = self.data.loc[idx, 'header']\n tit = self.data.loc[idx, 'title']\n rs = self.data.loc[idx, 'rows']\n label = self.data.loc[idx, 'header_label'] #jz: if use binary classification\n #label = self.data.loc[idx, 'select_column'] #jz: if use multi-class classification, here is a number in [0,14].\n\n col = [Column(z[0], z[1], sample_value=z[2]) for z in heads]\n\n table = Table(\n id=tit,\n header=col,\n data=rs\n ).tokenize(self.model.tokenizer)\n\n self.tabs.append(table)\n self.context.append(self.model.tokenizer.tokenize(qs))\n #self.answers.append(self.model.tokenizer.convert_tokens_to_ids(self.model.tokenizer.tokenize(str(ans[0])))[0])\n self.label.append(label) \n\ndef get_dataset(path, model):\n return WikiDataset(path=path, model=model)\n\ndef collate_fn(batch):\n batch_0 = [batch[i]['table'] for i in range(len(batch))]\n batch_1 = [batch[i]['context'] for i in range(len(batch))]\n #batch_2 = torch.tensor([batch[i]['answer'] for i in range(len(batch))])\n batch_3 = torch.tensor([batch[i]['label'] for i in range(len(batch))])\n return batch_0, batch_1, batch_3\n\n\nclass TaBERTTuner(pl.LightningModule):\n def __init__(self, hparams):\n super(TaBERTTuner, self).__init__()\n self.hparams = hparams\n\n self.model = TableBertModel.from_pretrained('tabert_base_k1/model.bin') #jz\n \n #first layer #jz\n self.l1 = nn.Linear(768, 500)\n self.l1_cat = nn.Linear(1536, 500)\n \n #second layer\n self.l2 = nn.Linear(500, 2) #jz\n\n #softmax\n # self.sm = nn.LogSoftmax(dim=1)\n self.sm = nn.Softmax(dim=2)\n\n #loss\n weight_try = torch.FloatTensor([1,0.01])\n #weight_try = torch.FloatTensor([1,0.167])\n self.l = nn.CrossEntropyLoss(ignore_index = 2,weight=weight_try) #jz: 2 is index for padding\n #self.l = nn.CrossEntropyLoss(ignore_index = 2)\n self.l = self.l.to('cuda')\n\n\n def forward(self, context_list, table_list):\n context_encoding, column_encoding, info_dict = self.model.encode(contexts=context_list, tables=table_list)\n #print(context_encoding.shape)\n ctx_enc_sum = torch.sum(context_encoding, axis=1)\n ctx_enc_sum = ctx_enc_sum.unsqueeze(dim =1) #jz: unsqueeze (2, 768) to (2, 1, 768) if batch size=2\n col_enc_sum = column_encoding #jz: for binary (2, 15, 768)\n\n if flag == 0: #ignore question embedding\n out = self.l1(col_enc_sum)\n if method == 'relu':\n out = F.relu(out)\n if method == 'tanh':\n out = F.tanh(out)\n out = self.l2(out)\n out = self.sm(out)\n \n if flag == 1: #add question embedding and column embedding\n ctx_col_sum = ctx_enc_sum + col_enc_sum\n out = self.l1(ctx_col_sum)\n if method == 'relu':\n out = F.relu(out)\n if method == 'tanh':\n out = F.tanh(out)\n out = self.l2(out)\n out = self.sm(out)\n \n if flag == 2: #concating question embedding and column embedding\n ctx_enc_sum = ctx_enc_sum.repeat(1, max_len, 1) #repeat max_len times\n concat = torch.cat([ctx_enc_sum, col_enc_sum], dim=2) #concat at dimension 2 (2, 15, 1536)\n out = self.l1_cat(concat)\n if method == 'relu':\n out = F.relu(out)\n if method == 'tanh':\n out = F.tanh(out)\n out = self.l2(out)\n out = self.sm(out)\n\n return out\n\n def _step(self, batch):\n #if torch.cuda.is_available():\n tbl, ctx, label = batch[0], batch[1], torch.tensor(batch[2]).to('cuda')\n #else:\n #tbl, ctx, ans, label = batch[0], batch[1], torch.tensor(batch[2]), torch.tensor(batch[3])\n\n # print(ans)\n outputs = self(ctx, tbl)\n outputs = outputs.view(-1, 2) #jz: reshape (2, 15, 2) -> (30, 2)\n label = label.view(-1) #jz: reshape (2, 15, 1) -> (30, 1)\n #print('output', outputs.size())\n # print('label', label)\n loss = self.l(outputs, label)\n\n return loss\n\n def training_step(self, batch, batch_idx):\n loss = self._step(batch)\n tensorboard_logs = {\"train_loss\": loss}\n return {\"loss\": loss, \"log\": tensorboard_logs}\n\n def training_epoch_end(self, outputs):\n avg_train_loss = torch.stack([x['loss'] for x in outputs]).mean()\n tensorboard_logs = {\"avg_train_loss\": avg_train_loss}\n return {\"avg_train_loss\": avg_train_loss, \"log\": tensorboard_logs, \"progress_bar\": tensorboard_logs}\n\n def validation_step(self, batch, batch_idx):\n loss = self._step(batch)\n return {\"val_loss\": loss}\n\n def validation_epoch_end(self, outputs):\n #print(outputs)\n avg_loss = torch.stack([x['val_loss'] for x in outputs]).mean()\n tensorboard_logs = {\"val_loss\": avg_loss}\n return {\"avg_val_loss\": avg_loss, \"log\": tensorboard_logs, \"progress_bar\": tensorboard_logs}\n\n def configure_optimizers(self):\n model = self.model\n no_decay = [\"bias\", \"LayerNorm.weight\"]\n optimizer_grouped_parameters = [\n {\n \"params\": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],\n \"weight_decay\": self.hparams.weight_decay,\n },\n\n {\n \"params\": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],\n \"weight_decay\": 0.0,\n\n }, ]\n\n #optimizer = AdamW(optimizer_grouped_parameters, lr=self.hparams.learning_rate, eps=self.hparams.adam_epsilon)\n optimizer = optim.SGD(optimizer_grouped_parameters, lr=self.hparams.learning_rate, nesterov=True, momentum=self.hparams.momentum)\n self.opt = optimizer\n\n scheduler = [\n {'scheduler': ReduceLROnPlateau(optimizer, mode=\"min\", min_lr=7.5e-5, patience=5, verbose=True),\n # might need to change here\n 'monitor': \"val_loss\", # Default: val_loss\n 'interval': 'epoch',\n 'frequency': 1\n }\n ]\n\n self.lr_scheduler = scheduler\n\n return [optimizer], scheduler\n\n #def optimizer_step(self, epoch, batch_idx, optimizer, optimizer_idx, second_order_closure=None, on_tpu=False,\n # using_native_amp=False, using_lbfgs=False):\n # optimizer.step()\n # optimizer.zero_grad()\n # self.lr_scheduler.step()\n\n def get_tqdm_dict(self):\n tqdm_dict = {\"loss\": \"{:.3f}\".format(self.trainer.avg_loss), \"lr\": self.lr_scheduler.get_last_lr()[-1]}\n return tqdm_dict\n\n def train_dataloader(self):\n train_dataset = get_dataset(path=self.hparams.train_data, model=self.model)\n dataloader = DataLoader(train_dataset, batch_size=self.hparams.train_batch_size, drop_last=True, shuffle=True,\n num_workers=4, collate_fn=collate_fn)\n #t_total = (\n # (len(dataloader.dataset) // (self.hparams.train_batch_size * max(1, self.hparams.n_gpu)))\n # // self.hparams.gradient_accumulation_steps * float(self.hparams.num_train_epochs)\n\n # )\n # scheduler = get_linear_schedule_with_warmup(\n # self.opt, num_warmup_steps=self.hparams.warmup_steps, num_training_steps=t_total\n # )\n # self.lr_scheduler = scheduler\n return dataloader\n\n def val_dataloader(self):\n val_dataset=get_dataset(path=self.hparams.dev_data, model = self.model)\n return DataLoader(val_dataset, batch_size=self.hparams.eval_batch_size, num_workers=4,collate_fn=collate_fn)\n\n def test_dataloader(self):\n test_dataset=get_dataset(path=self.hparams.test_data, model = self.model)\n return DataLoader(test_dataset, batch_size=self.hparams.eval_batch_size, num_workers=4,collate_fn=collate_fn)\n\nif __name__=='__main__':\n set_seed(42)\n args_dict = dict(\n train_data=\"train_tabert.json\",\n dev_data=\"dev_tabert.json\",\n test_data=\"test_tabert.json\",\n output_dir=\"./\",\n learning_rate=learningrate,\n momentum = 0.99, \n weight_decay=0.0,\n adam_epsilon=1e-8,\n warmup_steps=0,\n train_batch_size=16,\n eval_batch_size=16,\n # change epoch here\n num_train_epochs=num_epoch,\n gradient_accumulation_steps=16,\n n_gpu=1,\n #early_stop_callback=False,#early stop\n fp_16=False,\n opt_level='O1',\n # max_grad_norm=1.0,\n max_grad_norm=0.3,\n seed=42,\n )\n args = argparse.Namespace(**args_dict)\n print(args_dict)\n checkpoint_callback =ModelCheckpoint(\n filepath=args.output_dir,\n prefix= str('binary_sm') +'_checkpoint_-{epoch:02d}',\n monitor=\"val_loss\", mode=\"min\", save_top_k=1)\n\n #checkpoint_callback = pl.callbacks.ModelCheckpoint(\n # filepath=args.output_dir, prefix=\"checkpoint\", monitor=\"val_loss\", mode=\"min\", save_top_k=1)\n early_stop_callback = EarlyStopping(\n monitor='val_loss',\n min_delta=0.00,\n patience=3,\n verbose=True,\n mode='min'\n )\n\n train_params = dict(\n accumulate_grad_batches=args.gradient_accumulation_steps,\n gpus=1,\n #gpus=0,\n max_epochs=args.num_train_epochs,\n # early_stop_callback=False,\n precision=32,\n amp_level=args.opt_level,\n gradient_clip_val=args.max_grad_norm,\n checkpoint_callback=checkpoint_callback,\n callbacks=[early_stop_callback],\n )\n model = TaBERTTuner(args)\n trainer = pl.Trainer(**train_params)\n torch.cuda.empty_cache()\n trainer.fit(model)\n\n # \n\n \n","sub_path":"try_binary.py","file_name":"try_binary.py","file_ext":"py","file_size_in_byte":13479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"162558074","text":"# -*- coding: utf-8 -*-\n# 爬取豆瓣TOP250电影\nimport scrapy\nfrom douban.items import DoubanItem\n\nclass DoubanSpiderSpider(scrapy.Spider):\n # 爬虫的名字\n name = 'douban_spider'\n # 允许的域名\n allowed_domains = ['movie.douban.com']\n # 入口url,扔到调度器里面,再由引擎丢到解析器里面,也就是parse函数\n start_urls = ['https://movie.douban.com/top250']\n\n # 默认解析方法,这里是返回数据的解析\n def parse(self, response):\n # 循环电影的条目,比如第一页25条数据\n movies_list = response.xpath('//div[@class=\"article\"]//ol[@class=\"grid_view\"]/li')\n for i_item in movies_list:\n # item文件导进来\n doubanitem = DoubanItem()\n # 详细的数据的解析\n doubanitem[\"rank_num\"] = i_item.xpath('.//div[@class=\"item\"]//em/text()').extract_first()\n doubanitem[\"title\"] = i_item.xpath('.//div[@class=\"item\"]/div[@class=\"info\"]/div[@class=\"hd\"]/a/span[1]/text()').extract_first()\n content = i_item.xpath('.//div/div[2]/div[2]/p[1]/text()').extract()\n # 数据的处理\n for i_content in content:\n content_s = \"\".join(i_content.split())\n doubanitem[\"introduce\"] = content_s\n\n doubanitem[\"star\"] = i_item.xpath('.//div[@class=\"item\"]/div[@class=\"info\"]/div[@class=\"bd\"]/div[@class=\"star\"]/span[2]/text()').extract_first()\n doubanitem[\"comments\"] = i_item.xpath('.//div[@class=\"item\"]/div[@class=\"info\"]/div[@class=\"bd\"]/div[@class=\"star\"]/span[4]/text()').extract_first()\n doubanitem[\"describe\"] = i_item.xpath('.//div[@class=\"item\"]/div[@class=\"info\"]/div[@class=\"bd\"]/p[@class=\"quote\"]/span/text()').extract_first()\n # print(doubanitem)\n # 需要将数据yield到pipeline(管道)去,后续可进行数据的存储,清洗,去重\n yield doubanitem\n # 获取下一页的条目\n next_link = response.xpath('//span[@class=\"next\"]/link/@href').extract()\n # 如果取得到\n if next_link:\n next_link = next_link[0]\n yield scrapy.Request(\"https://movie.douban.com/top250\" + next_link, callback=self.parse)\n","sub_path":"douban/spiders/douban_spider.py","file_name":"douban_spider.py","file_ext":"py","file_size_in_byte":2251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"401096076","text":"from saef.connections import ConnectionFormHelper\nfrom ..models import Connection\nfrom ..forms import ConnectionTypeForm\n\nfrom saefportal.settings import MSG_SUCCESS_CONNECTION_UPDATE, MSG_SUCCESS_CONNECTION_VALID, \\\n MSG_ERROR_CONNECTION_INVALID, MSG_SUCCESS_CONNECTION_SAVED, MSG_ERROR_CONNECTION_SELECT_INVALID, \\\n MSG_SUCCESS_CONNECTION_DELETED\n\nfrom django.contrib import messages\nfrom django.shortcuts import redirect, render, get_object_or_404\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.views import generic\n\nADD_CONNECTION_TITLE = 'Add Connection'\n\nADD_CONNECTION_TEMPLATE_NAME = \"connection/add_connection.html\"\nEDIT_CONNECTION_TEMPLATE_NAME = \"connection/edit_connection_detail.html\"\nPOSTGRESQL_NAME = \"PostgreSQL\"\n\n\nclass ConnectionView(LoginRequiredMixin, generic.ListView):\n template_name = 'connection/connection_list.html'\n model = Connection\n context_object_name = 'connections'\n\n\n@login_required()\ndef update_connection(request, connection_id):\n helper = ConnectionFormHelper()\n connection = get_object_or_404(Connection, id=connection_id)\n\n if request.method == \"POST\":\n if request.POST[\"Operation\"] == 'Delete':\n instance = Connection.objects.get(pk=connection_id)\n instance.delete()\n messages.success(request, MSG_SUCCESS_CONNECTION_DELETED)\n return redirect('connection')\n else:\n edit_method = helper.lookup_connection(connection.connection_type.name, 'edit')\n edit_form = edit_method(post_request=request.POST)\n if edit_form.is_valid():\n save_edit_method = helper.lookup_connection(connection.connection_type.name, 'save_edit')\n save_edit_method(edit_form, connection_id)\n messages.success(request, MSG_SUCCESS_CONNECTION_UPDATE)\n return redirect(\"connection\")\n else:\n messages.error(request, MSG_ERROR_CONNECTION_SELECT_INVALID)\n context = {\"connection_form\": edit_form}\n return render(request, EDIT_CONNECTION_TEMPLATE_NAME, context)\n\n edit_method = helper.lookup_connection(connection.connection_type.name, 'edit')\n edit_form = edit_method(connection_pk=connection.pk)\n context = {\"connection_form\": edit_form}\n return render(request, EDIT_CONNECTION_TEMPLATE_NAME, context)\n\n\n@login_required()\ndef test_database_connection(request, form):\n helper = ConnectionFormHelper()\n connection_type = form.cleaned_data['connection_type'].name\n\n add_form_method = helper.lookup_connection(connection_type, 'add')\n connection_form = add_form_method(request.POST)\n if connection_form.is_valid():\n test_connection_method = helper.lookup_connection(connection_type, 'test')\n result = test_connection_method(connection_form.cleaned_data, form.cleaned_data)\n if result is True:\n messages.success(request, MSG_SUCCESS_CONNECTION_VALID)\n else:\n messages.error(request, MSG_ERROR_CONNECTION_INVALID(result))\n\n context = {\n 'form': form,\n 'connection_form': add_form_method(request.POST),\n 'connection_type': connection_type\n }\n return render(request, ADD_CONNECTION_TEMPLATE_NAME, context)\n\n\n@login_required()\ndef save_connection(request, form):\n helper = ConnectionFormHelper()\n connection_type = form.cleaned_data['connection_type'].name\n\n add_form_method = helper.lookup_connection(connection_type, 'add')\n connection_form = add_form_method(request.POST)\n if connection_form.is_valid():\n save_method = helper.lookup_connection(connection_type, 'save')\n save_method(connection_form.cleaned_data, form.cleaned_data)\n messages.success(request, MSG_SUCCESS_CONNECTION_SAVED)\n return redirect(\"connection\")\n\n messages.error(request, MSG_ERROR_CONNECTION_SELECT_INVALID)\n connection_type = form.cleaned_data['connection_type'].name\n\n context = {\n \"form\": ConnectionTypeForm(request.POST),\n \"connection_form\": add_form_method(request.POST),\n \"connection_type\": connection_type\n }\n return render(request, ADD_CONNECTION_TEMPLATE_NAME, context)\n\n\n@login_required()\ndef add_connection(request):\n helper = ConnectionFormHelper()\n\n if request.method == \"POST\":\n form = ConnectionTypeForm(request.POST)\n if form.is_valid() and form.cleaned_data['connection_type']:\n connection_type = form.cleaned_data['connection_type'].name\n if \"Operation\" not in request.POST:\n add_form_method = helper.lookup_connection(connection_type, 'add')\n context = {\n \"form\": form,\n \"connection_form\": add_form_method(),\n \"connection_type\": connection_type\n }\n return render(request, ADD_CONNECTION_TEMPLATE_NAME, context)\n elif request.POST[\"Operation\"] == \"Test\":\n return test_database_connection(request, form)\n elif request.POST[\"Operation\"] == \"Save\":\n return save_connection(request, form)\n else:\n form = ConnectionTypeForm()\n return render(request, ADD_CONNECTION_TEMPLATE_NAME, {'form': form})\n","sub_path":"saefportal/saef/views/connection_view.py","file_name":"connection_view.py","file_ext":"py","file_size_in_byte":5301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"307107843","text":"\nfrom alpha_vantage.timeseries import TimeSeries\n\nimport sqlite3\n\nfrom datetime import datetime as dt\nimport time\n\nimport pandas as pd\n\n# pd.core.common.is_list_like = pd.api.types.is_list_like\n\n# from pandas_datareader import data as pdr\n# import fix_yahoo_finance as yf\n\n\ndb = '/Users/setor/PycharmProjects/PortfolioManager/PortfolioApplication/Portfolio Data'\n\n\ndef sym_to_name(symbol):\n conn = sqlite3.connect(db)\n\n sql = \"SELECT * FROM stocklist WHERE symbol='{}'\".format(symbol)\n\n c = conn.cursor()\n c.execute(sql)\n\n row = c.fetchone()\n\n conn.commit()\n conn.close()\n\n return row[0]\n\n\n# uses the alphavantae API to receive the stock Data daily\n# returns a pandas DataFRame Object\ndef get_prices(symbol):\n ts = TimeSeries(key='NM4PJC65CNJLYGGU', output_format='pandas')\n data, meta_data = ts.get_daily(symbol=symbol, outputsize='full')\n data = data.reset_index()\n data = data[data['1. open'] != 0]\n data = data.rename(index=str, columns={\"date\": \"Date\", \"1. open\": \"Open\", \"2. high\": \"High\", \"3. low\": 'Low',\n '4. close': 'Close', '5. volume': 'Volume'})\n return data\n\n\n# stores the DataFrame with the given name ino the database as table\ndef store_data(df, name):\n conn = sqlite3.connect(db)\n df.to_sql(name, conn)\n\n conn.commit()\n conn.close()\n\n\ndef get_last_date(table_name):\n conn = sqlite3.connect(db)\n\n sql = 'SELECT * FROM \"{}\" ORDER BY Date DESC LIMIT 1;'.format(table_name)\n\n c = conn.cursor()\n c.execute(sql)\n\n row = c.fetchone()\n\n conn.commit()\n conn.close()\n\n return row[0]\n\n\ndef find_new_values(df, date):\n # creates a Dataframe with values all more recent than the entered date\n df['Date'] = pd.to_datetime(df.Date)\n df = df[df.Date > date]\n\n return df\n\n\ndef update(symbol):\n conn = sqlite3.connect(db)\n\n # receive the corresponding name and the last date in the column\n # name and ticker have to be in stocklist crucial actually\n\n name = sym_to_name(symbol)\n\n last_date = pd.to_datetime(get_last_date(name))\n today = str(dt.today())\n today = today[:10]\n today = dt.strptime(today, '%Y-%m-%d')\n\n print(last_date, today)\n if last_date == today:\n print('Already up to date')\n time.sleep(15)\n return\n else:\n # get new data to append\n new_data = get_prices(symbol)\n try:\n new_data = new_data.set_index('Date')\n store_data(new_data, name)\n except ValueError:\n new_data = new_data.reset_index()\n needed_values = find_new_values(new_data, last_date)\n\n sql = 'INSERT INTO \"{}\" VALUES (?,?,?,?,?,?)'.format(name)\n needed_values['Date'] = needed_values.loc[:, 'Date'].apply(lambda x: (str(x)[:10]))\n conn.executemany(sql, needed_values.values)\n\n conn.commit()\n\n conn.close()\n\n\ndef update_all():\n symbols = get_all_symbols()\n for symbol in symbols:\n name = sym_to_name(symbol)\n print('Updating', name)\n update(symbol)\n print('Finished', name)\n\n\n# need the symbols\n# return a list of the tickers\n\n# returns a list of all symbols in stocklist\ndef get_all_symbols():\n conn = sqlite3.connect(db)\n df = pd.read_sql_query(\"select * from stocklist\", conn)\n\n conn.commit()\n conn.close()\n\n return df['symbol'].values\n\n\nif __name__ == '__main__':\n update_all()\n","sub_path":"PortfolioApplication/DailyUpdate.py","file_name":"DailyUpdate.py","file_ext":"py","file_size_in_byte":3415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"644229776","text":"from time import sleep as sl\nfrom Bio import SeqIO\n\n##search the sequences from BPGA core seq file output in references sequence file. Convert to protein ID.\norg_dict = {}\norg1_seq = SeqIO.parse('/Users/liam/Desktop/analysis_D1a_BPGA_45%true/Org1_ref_core_seq.txt', 'fasta')\nfor i in org1_seq:\n org_dict[str(i.id)] = str(i.seq)\n\nref_dict = {}\nref_seq = SeqIO.parse('/Users/liam/Desktop/analysis_D1a_BPGA_45%true/GCA_000006745.1.fas', 'fasta')\nfor j in ref_seq:\n ref_dict[str(j.seq)] = str(j.id)\n\nbpga_cor_pro_id = []\nfor k in org_dict.values():\n bpga_cor_pro_id.append(ref_dict[k])\n\n## reading in reference .gpff file to convert the protein IDs into VC locus tags. Also find cds region and search in ref .gff to get 3 columns of protein, locus and cds tags.\nfrom time import sleep as sl\nfrom Bio import SeqIO\n\ninfile = open('/Users/liam/Desktop/analysis_D1a_BPGA_45%true/GCA_000006745.1_ASM674v1_protein.gpff', 'r').readlines()\noutfile = open('/Users/liam/Desktop/analysis_D1a_BPGA_45%true/protein_locus_cds.txt', 'w')\n\nmy_dict = {}\nfor i in infile:\n if \"VERSION\" in i:\n protein_ID = i.split(' ')[1]\n protein_ID = protein_ID.rstrip('\\n')\n my_dict[protein_ID] = 'x'\n if \"locus_tag\" in i:\n locus_tag = i.split(' /locus_tag=\"')[1].rstrip('\"\\n')\n ocus_tag = locus_tag.replace('VC_A', 'VCA').replace('VC_', 'VC')\n my_dict[protein_ID] = locus_tag\n # if '/coded_by=' in i:\n # cds_cor = i.split(':')[1].rstrip(')\"\\n').split('..')\n # ref_gff_in = open('/Users/liam/Desktop/analysis_D1a_BPGA_45%true/GCF_000006745.1_ASM674v1_genomic.gff','r').readlines()\n # for line in ref_gff_in:\n # if str(cds_cor[1]) in line:\n\n#searching the protein_ID from bgpga in the reference strain and converting to locus_tags.\n# for l in bpga_cor_pro_id:\n# print(my_dict[l])\n\n#taking cds to locus info from excel file\ninfile1 = open('/Users/liam/Desktop/ref_cds_to_locus.txt','r').read().splitlines()\n\nref_info_dict = {}\nfor i in infile1:\n col = i.split('\\t')\n ref_info_dict[col[0]] = col[1]\n\n#read in roary cds and conert to locus_tag using ref_info_dict\nroary_cds_in = open('/Users/liam/Desktop/roary_cds.txt','r').read().splitlines()\n\nfor cds in roary_cds_in:\n print(ref_info_dict[cds])","sub_path":"Vibrio/2018-05-23-BPGA_analyse_genes.py","file_name":"2018-05-23-BPGA_analyse_genes.py","file_ext":"py","file_size_in_byte":2286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"580361468","text":"\"\"\"\n1584. Min Cost to Connect All Points\n\n\nYou are given an array points representing integer coordinates of some points on a 2D-plane, where points[i] = [xi, yi].\n\nThe cost of connecting two points [xi, yi] and [xj, yj] is the manhattan distance between them: |xi - xj| + |yi - yj|, where |val| denotes the absolute value of val.\n\nReturn the minimum cost to make all points connected. All points are connected if there is exactly one simple path between any two points.\n\n \n\nExample 1:\n\n\n\nInput: points = [[0,0],[2,2],[3,10],[5,2],[7,0]]\nOutput: 20\nExplanation:\n\nWe can connect the points as shown above to get the minimum cost of 20.\nNotice that there is a unique path between every pair of points.\nExample 2:\n\nInput: points = [[3,12],[-2,5],[-4,1]]\nOutput: 18\nExample 3:\n\nInput: points = [[0,0],[1,1],[1,0],[-1,1]]\nOutput: 4\nExample 4:\n\nInput: points = [[-1000000,-1000000],[1000000,1000000]]\nOutput: 4000000\nExample 5:\n\nInput: points = [[0,0]]\nOutput: 0\n \n\nConstraints:\n\n1 <= points.length <= 1000\n-106 <= xi, yi <= 10^6\nAll pairs (xi, yi) are distinct.\n\n\n\"\"\"\n\n\nclass MinCostConnectPoints:\n\n \"\"\"\n O(n^2) Min Spinning Tree\n \"\"\"\n def doit_prims(self, points: list) -> int:\n\n from heapq import heappush, heappop, heapify\n N = len(points)\n visited = [False for _ in range(N)]\n heap = []\n heapify(heap)\n i, res = 0, 0\n \n def mahaten_distance(a, b):\n return abs(points[a][0] - points[b][0]) + abs(points[a][1] - points[b][1])\n \n for _ in range(N):\n \n visited[i] = True\n \n for j in range(N):\n if not visited[j]:\n heappush(heap, (mahaten_distance(i, j), j))\n \n while heap and visited[heap[0][1]]:\n heappop(heap)\n \n \n c , i = heappop(heap)\n res += c\n \n return res\n\n \"\"\"\n There are many articles online describing this O(nlogn) approach, the basic idea is that for each point we don't need to consider all other points as our candidates to send edges to.\n\n In fact, for each point, we can use it as the origin to establish a cartesian coordinate system, and then divide each quadrant into two 45 degree regions (using either line y=x or y=-x). It can be proven that in each region we only need to consider the point closest to the origin (current point of interest) to send our edge to, thus reduce the number of edge candidates from O(n^2) to O(n). We can then use Kruskal algorithm to build the MST in O(nlogn) time.\n\n Let's consider the region enclosed by x>x0 and x-y=1:\n if self.tree[k][0]x:\n self.tree[k]=(x,i)\n k+=k&-k\n\n class UF:\n def __init__(self,n):\n self.p = list(range(n))\n self.s = [1]*n\n \n def find(self,x):\n if self.p[x] != x:\n self.p[x] = self.find(self.p[x])\n return self.p[x]\n\n def union(self,x,y):\n xr, yr = self.find(x), self.find(y)\n if xr == yr:\n return False\n if self.s[xr] < self.s[yr]:\n xr, yr = yr, xr\n self.p[yr] = xr\n self.s[xr] += self.s[yr]\n return True\n\n def doit_kruskal(self, points: list) -> int:\n n = len(points)\n edges = []\n \n # transform the x,y coordinates so 4 of the 8 regions are considered\n for dirc in range(4): \n if dirc == 1 or dirc == 3:\n for i in range(n): points[i].reverse() # swap x,y coordinates\n\n elif dirc == 2:\n for i in range(n): points[i][0] *= -1 # flip x coordinates\n \n # sort by x values (descending x0 0:\r\n print(\"Les positions doivent être entée avec le format : x,y\")\r\n for i in range(nb_pylons):\r\n xy = input(f\"Position du pylône {i+1} : \")\r\n pylons.append([int(i) for i in xy.split(',')])\r\n else:\r\n pylons = []\r\n\r\n database.addValue(start, end, step, avg, pylons)\r\n\r\n#Placement des pylônes\r\nfor pylon in pylons:\r\n x,y = pylon\r\n c_library.preaddPylones(x, y)\r\n\r\n\r\nnb_points = (end-start)//step+1\r\nlist_result = Manager().list([])\r\n\r\n\r\n\"\"\"Multiprocessing\"\"\"\r\nprocess_list = []\r\nfor i in range(avg):\r\n process_list.append(Process(target = run))\r\n\r\nt0 = time()\r\nfor process in process_list:\r\n process.start()\r\nfor process in process_list:\r\n process.join()\r\nprint(f\"Temps d'execution : {time()-t0}\")\r\n\r\n\r\nprint(\"Génération du graphe...\")\r\nplt.title(\"Résultat de la simulation\")\r\nplt.plot(afficher(list_result),list(range(start,end,step)))\r\nplt.xlabel(\"Nombre d'itérations\")\r\nplt.ylabel(\"Nombre d'individus\")\r\n\r\nplt.show()\r\n","sub_path":"Exploitation/Courbes/Python/launcher.py","file_name":"launcher.py","file_ext":"py","file_size_in_byte":2744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"3486538","text":"# function dict for register\nfrom fgo_bluetooth_helper.fgo_function.script.CBA_3T import cba_3t_battle_script, cba_3t_load_parameters\nfrom fgo_bluetooth_helper.fgo_function.script.AOE_3T import aoe_3t_load_parameters, aoe_3t_battle_script\n\n\ndef load_script(script):\n script_dict = {\n \"CBA_3T\": cba_3t_load_parameters,\n \"AOE_3T\": aoe_3t_load_parameters\n }\n method = script_dict.get(script, \"\")\n if method:\n print(\"script:{} loaded,loading parameters now!\".format(script))\n result = method()\n return result\n else:\n print(\"script:{} cannot be loaded,please check script name and register it in script.script_register.py\".format(\n script))\n\n\ndef load_battle_script(script, mouse_instance):\n script_dict = {\n \"CBA_3T\": cba_3t_battle_script,\n \"AOE_3T\": aoe_3t_battle_script\n }\n method = script_dict.get(script, \"\")\n if method:\n print(\"script:{} loaded,running battle script now!\".format(script))\n method(mouse_instance)\n else:\n print(\"script:{} cannot be loaded,please check script name and register it in script.script_register.py\".format(\n script))\n","sub_path":"fgo_bluetooth_helper/fgo_function/script/script_register.py","file_name":"script_register.py","file_ext":"py","file_size_in_byte":1174,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"606420192","text":"from GlobalVariable import *\r\nfrom LocalDriver import LocalDriver\r\n\r\n\r\nprint(\"Start to run\")\r\nprint(\"Logger File is : \" + LoggerFile)\r\nprint(\"Logger Level is : \" + LoggerLevel)\r\n\r\n\r\n#logger = logging.getLogger()\r\n\r\n\r\n\r\n\r\nlocalDriver = LocalDriver() \r\n\r\n\r\nlocalDriver.setWebDriverType(\"Chrome\")\r\n\r\nlocalDriver.initDriver()","sub_path":"src/RunTime.py","file_name":"RunTime.py","file_ext":"py","file_size_in_byte":321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"252374992","text":"#!/usr/bin/env python\n#coding:utf-8\n\nimport os\nimport sys\n\nifile_uniprot_gi = open(\"uniprot_to_gi.txt\",\"r\")\nifile_gi_seqid = open(\"modbase_gi_sequenceID.txt\",\"r\")\nifile_model_sum = open(\"select_model.sum\",\"r\")\n\nofile = open(\"map_model.sum\",\"w\")\n\nd_gi = {}\nfor line in ifile_uniprot_gi:\n line = line.rstrip(\"\\n\")\n l_line = line.split(\"\\t\")\n gi = l_line[1]\n uniprot = l_line[0]\n if gi not in d_gi:\n d_gi[gi] = [uniprot]\n else:\n d_gi[gi].append(uniprot)\n\nd_seqid = {}\nfirstline = ifile_gi_seqid.readline()\nfor line in ifile_gi_seqid:\n line = line.rstrip(\"\\n\")\n l_line = line.split(\"\\t\")\n seqid = l_line[2]\n gi = l_line[1]\n if seqid not in d_seqid:\n d_seqid[seqid] = [gi]\n else:\n d_seqid[seqid].append(gi)\n\nfirstline = ifile_model_sum.readline()\nofile.write(\"uniprotID\\t\"+firstline)\nfor line in ifile_model_sum:\n line = line.rstrip(\"\\n\")\n l_line = line.split(\"\\t\")\n seqid = l_line[2]\n if seqid in d_seqid:\n gi = d_seqid[seqid]\n uniprotID = []\n for i in gi:\n if i in d_gi:\n uniprot = d_gi[i]\n uniprotID = list(set(uniprotID+uniprot))\n \n print >> ofile,\"/\".join(uniprotID)+\"\\t\"+line\n\nifile_uniprot_gi.close()\nifile_gi_seqid.close()\nifile_model_sum.close()\n\n \n","sub_path":"map_download.py","file_name":"map_download.py","file_ext":"py","file_size_in_byte":1313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"594508732","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nimport requests\nimport json\nimport requests\nfrom lib.config.conf import settings\nfrom src.plugins import PluginsManager\nfrom concurrent.futures import ThreadPoolExecutor\nimport os\n\nclass Base():\n def postInfo(self,res):\n print('--------------------------')\n requests.post(settings.API_URL,json=res)\n\n\nclass Agent(Base):\n\n def collect(self):\n\n res = PluginsManager().execute()\n hostname = res['basic']['data']['hostname']\n info = open(os.path.join(settings.BASEDIR,'conf/cert'),'r',encoding='utf8').read()\n\n if not info.strip():\n with open(os.path.join(settings.BASEDIR,'conf/cert'),'w',encoding='utf8') as fp:\n fp.write(hostname)\n else:\n res['basic']['data']['hostname'] = info\n\n\n for k, v in res.items():\n print(k, v)\n\n self.postInfo(res)\n\nclass SSHSalt(Base):\n def get_hostnames(self):\n hostnames = requests.get(settings.API_URL)\n\n return hostnames\n\n def run(self,hostname):\n res = PluginsManager(hostname).execute()\n self.postInfo(res)\n\n\n\n def collect(self):\n hostnames = self.get_hostnames()\n\n\n ### 一台一台去执行 串行\n p = ThreadPoolExecutor(10)\n\n\n for hostname in hostnames:\n # res = PluginsManager(hostname).execute()\n # self.postInfo(res)\n p.submit(self.run,(hostname,))\n","sub_path":"docs/cmdb-3/autoclient/src/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":1450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"509067105","text":"import os\nfrom dsk.base.db_helper.basemedia_info_db import BaseMediaInfoDb\n\n\nSHOT_FIELDS = ['code',\n 'sg_sequence']\n\n\nFRAME_RANGE_SHOT = ['sg_cut_order',\n 'sg_cut_in',\n 'sg_cut_out',\n 'sg_head_in',\n 'sg_tail_out']\n\nclass ShotInfoDb(BaseMediaInfoDb):\n \"\"\" helper shot info db query\n \"\"\"\n @staticmethod\n def compare(a,b):\n if a.shot_order == -1 and b.shot_order == -1:\n return cmp(a.code, b.code)\n return cmp(a.shot_order,b.shot_order)\n\n def __init__(self):\n super(ShotInfoDb, self).__init__()\n self.reset()\n\n def reset(self):\n super(ShotInfoDb, self).reset()\n self.code = \"\"\n self.id = -1\n self.shot_order = -1\n\n def setdata(self,code, idi = -1, cut_order = -1):\n self.code = code\n self.id = idi\n self.shot_order = cut_order\n if self.shot_order == None:\n self.shot_order = -1\n\n def is_valid_code(self):\n sp = self.code.split(\"_\")\n if len(sp) == 3:\n return True\n else:\n return False\n\n def sequence_name(self):\n sp = self.code.split(\"_\")\n if len(sp) > 1:\n return sp[1]\n else:\n return self.code\n\n def get_code(self):\n return self.code\n\n def short_name(self):\n \"\"\"\n return the code name without the show\n \"\"\"\n sp = self.code.split(\"_\")\n if len(sp) > 1:\n return \"_\".join(sp[1:])\n else:\n return self.code\n\n def shot_number(self):\n \"\"\"\n return the code name without the show and seq as a string\n \"\"\"\n sp = self.code.split(\"_\")\n if len(sp) > 2:\n return \"_\".join(sp[2:])\n else:\n return self.code\n\n def get_fields(self,conn,field_names = FRAME_RANGE_SHOT):\n sg_filters = [['code', 'is', self.code]]\n return conn.find('Shot', sg_filters, field_names)\n\n def get_thumbnail_file(self, root_path, dept,version):\n verfile = \"Shot_%s_%s_v%03d.jpg\" % (self.code, dept, version)\n p = os.path.join(root_path, self.code, dept, verfile)\n return p","sub_path":"dsk/base/db_helper/shot_info_db.py","file_name":"shot_info_db.py","file_ext":"py","file_size_in_byte":2219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"547283053","text":"#\n#\n# Name: EDIToXML1.py\n# Author: Neal Walters\n# Date: 6/28/2020\n# Description: Parse EDI File Directly to XML\n# Part of Udemy EDI course\n#\n\nimport sys\nfrom lxml import etree\n\n#\n# Start Reading and Parsing the EDI File\n#\n\ninEdiFilename = \"Samples/Sample_850_01_Orig.edi\"\nwith open(inEdiFilename, 'r') as file:\n fileContents = file.read()\n\nfileContents = fileContents.replace(\"\\n\", \"\").replace(\"\\r\", \"\")\nelementSeparator = fileContents[103:104]\nrowSeparator = fileContents[105:106]\n\nprint(\"ElementSeparator=\" + elementSeparator)\nprint(\" RowSeparator=\" + rowSeparator)\n\n\n#print(fileContents)\n\nrows = fileContents.split(rowSeparator)\n#print(rows)\n\n\n#init temp variables\nlastRefCode = \"\"\nfirstTime = True\nfoundLineItem = False\n\nfor row in rows:\n print(row)\n elements = row.split(elementSeparator)\n for idx, el in enumerate(elements):\n elementIDNum = str.format(\"{0:0=2d}\", idx)\n if idx == 0:\n segmentName = el\n elementID = segmentName + elementIDNum\n print (\" \", elementID, el)\n if elementID == \"BEG02\":\n po850.POType = el\n if elementID == \"BEG03\":\n po850.PONum = el\n if elementID == \"BEG05\":\n po850.PODate = el\n\n if elementID == \"REF01\":\n lastRefCode = el\n\n if lastRefCode == \"VR\" and elementID == \"REF02\" :\n po850.VendorRefNum = el\n if lastRefCode == \"AB\" and elementID == \"REF02\" :\n po850.RefDemo = el\n\n if elementID == \"N101\":\n lastRefCode = el\n if lastRefCode == \"ST\" and elementID == \"N102\" :\n po850.ShipToName = el\n if lastRefCode == \"ST\" and elementID == \"N103\" :\n po850.ShipToCode = el\n\n if elementID == \"PO100\":\n if not firstTime:\n po850.add_lineitem(po850Line)\n po850Line = PurchaseOrder850LineItem();\n firstTime = False\n foundLineItem = True\n\n if elementID == \"PO101\":\n po850Line.LineNum = el\n\n if elementID == \"PO102\":\n po850Line.Qty = el\n\n if elementID == \"PO103\":\n po850Line.UOM = el\n\n if elementID == \"PO104\":\n po850Line.Price = el\n\n if elementID == \"PO105\":\n po850Line.Basis = el\n\n if elementID == \"PO107\":\n po850Line.PartNum = el\n\n if elementID == \"PID05\":\n po850Line.Descr = el\n\n if elementID == \"DTM02\":\n po850Line.DateRequested = el\n\n if elementID == \"CTT01\":\n # Hand the last hanging POLine item that has not been written out yet\n if foundLineItem:\n po850.add_lineitem(po850Line)\n\n\nprint(\"\\n================ End of parsing ====================\")\npo850.print()\n\n#\n# Now you a PO Object in memory, what do you do with it?\n# Basically, the PO needs to be loaded into your system (ERP or database)\n# 1) Serialize it to disk as XML or JSON for some other program to process\n# 2) Build SQL statements, or call Stored Procedures to store in a database\n# 3) Call an API/Library or a Web Service to add the PO to your system\n#\n\n# Demo - Serialize to string and write to disk using Pickle\n# pickle.dump goes to file, pickle.dumps goes to a string\n\noutXMLFilename = \"Samples/Sample_850_01_Orig_Python.xml\"\nfileObj = StringIO()\nstrXML850 = xml_marshaller.dumps(po850);\nprint(strXML850)\n","sub_path":"Archivos/EDIClass_2021_02_17/PythonEDI/EDIToXML2.py","file_name":"EDIToXML2.py","file_ext":"py","file_size_in_byte":3385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"287157785","text":"import CTR_main1 as main\nimport sys\n\ndef initPath(ortho,ortho_data,pickle_file,image_path,output):\n paths ={}\n paths['ortho'] = ortho.replace(\"'\", '')\n paths['ortho_data'] = ortho_data.replace(\"'\", '')\n paths['pickle_file'] = pickle_file.replace(\"'\", '')\n paths['image_path'] = image_path.replace(\"'\", '')\n paths['output'] = output.replace(\"'\", '')\n return paths\ndef run(mode ,ortho,ortho_data,pickle_file,image_path,output):\n paths = initPath(ortho,ortho_data,pickle_file,image_path,output);\n main.main_flow(mode, paths)\n\n\nif __name__ == '__main__':\n run(sys.argv[1][1:-1],sys.argv[2],sys.argv[3],sys.argv[4],sys.argv[5],sys.argv[6])\n\n# python runMe.py 'ortho+images' 'C:/Users/mdwq87/Downloads/project/resizedOrtho.tif' 'C:/Users/mdwq87/Downloads/project/resized.tfw' '\n# ' 'C:/Users/mdwq87/Downloads/project/input' 'C:/Users/mdwq87/Downloads/project/output'","sub_path":"CTR_proj_main/runMe.py","file_name":"runMe.py","file_ext":"py","file_size_in_byte":890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"246426554","text":"import itertools\nucnum = int(input())\nans = list()\nfor i in range(ucnum):\n n,k = map(int,input().split())\n strs = input().split()\n lists = [int(k) for k in strs]\n temp = list()\n for t in itertools.combinations(lists,k):\n index=0\n for j in t:\n index+=j\n temp.append(index)\n ans.append(max(temp))\nfor i in ans:\n print(i)\n","sub_path":"Code/CodeRecords/2676/60716/271409.py","file_name":"271409.py","file_ext":"py","file_size_in_byte":372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"402764263","text":"import requests\nfrom bs4 import BeautifulSoup\n\nrequest = requests.get('https://en.wikipedia.org/wiki/List_of_2019_albums')\ncontent = request.content\n\nsoup = BeautifulSoup(content, 'html.parser')\nall = soup.find_all('table',{'class':'wikitable'})\n\nfor item in all:\n header = all.find('th')\n print(header)\n\nprint(soup.prettify())\n","sub_path":"Web Scrape/scrappy.py","file_name":"scrappy.py","file_ext":"py","file_size_in_byte":334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"574133159","text":"from gestione.models import Carico,Saldo\nfrom django.db.models import Q,F\n \nclass MRim:\n def ModPeso(self,node,colli):\n cll=int(colli)\n rec=Saldo.objects.get(idcod=node)\n saldo=rec.q-cll\n rec.q=saldo\n rec.save()\n self.PushCarico(node,cll) \n return 1\n \n def PushCarico(self,cod,cassa):\n lotto=Carico.objects.filter(Q(idcod__id=cod),cassa__gt=F(\"cassaexit\")).order_by(\"id\")\n ltid=lotto.first()\n num=ltid.cassa-(cassa+ltid.cassaexit)\n if(num>=0):\n ltid.cassaexit=cassa+ltid.cassaexit\n ltid.save()\n else:\n ltid.cassaexit=ltid.cassa\n ltid.save()\n res=self.Rec(lotto,ltid.id,num*(-1),0)\n return \n \n def Rec(self,lotti,lotto,casse,i):\n num=0\n try:\n num=lotti[i].cassa-(lotti[i].cassaexit+casse)\n except IndexError:\n return num\n if(num>=0 and lotti[i].id!=lotto):\n lt1=lotti.get(id=lotti[i].id)\n #lotti[i].cassaexit=lotti[i].cassaexit+casse\n #lotti[i].save()\n lt1.cassaexit=lotti[i].cassaexit+casse\n lt1.save()\n# return bl\n return 0\n else:\n if(lotti[i].id!=lotto):\n lt1=lotti.get(id=lotti[i].id)\n lt1.cassaexit=lotti[i].cassa\n lt1.save()\n #lotti[i].cassaexit=lotti[i].cassa\n #lotti[i].save()\n else:\n num=casse*(-1) \n i=i+1\n res=self.Rec(lotti,lotto,num*(-1),i)\n return 0 ","sub_path":"gestione/Rimanenza/Modifica/RModifica.py","file_name":"RModifica.py","file_ext":"py","file_size_in_byte":1612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"141804051","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri May 31 11:53:56 2019\r\n\r\n@author: warrenc\r\n\"\"\"\r\n\r\nimport numpy as np\r\nfrom scipy.optimize import curve_fit\r\nfrom scipy.integrate import trapz\r\n\r\ndef histogram_data(data, bins=50, range=None):\r\n \"\"\"\r\n Takes in an array of and produces an array of counts for the histogram of\r\n the size as the specified number of bins. If range is None it will auto \r\n determine the size of the bins. By specifying a range you bin within an\r\n interval specified by range=(min,max).\r\n \r\n This method also outputs an array containing the center of the bins to\r\n integrate along so as to normalize the data\r\n \r\n Inputs:\r\n data: 1D array of data\r\n bins: The total number of bins to sort counts into\r\n range: A tuple (bmin,bmax) containing the bounds to form bins over \r\n \r\n Returns:\r\n hist: A 1-D array contianing the counts of the histogram\r\n center: A 1-D array containing the center of the bins of the data\r\n norm: The normalization of the histogram\r\n \"\"\"\r\n \r\n hist, histbins = np.histogram(data, bins=bins, range=range)\r\n center = (histbins[:-1] + histbins[1:])/2\r\n norm = trapz(hist,x=center)\r\n \r\n return hist, center, norm\r\n\r\ndef gaussian(x, A, mu, std):\r\n return A*np.exp(-(x-mu)**2/(2.0*std**2))\r\n\r\ndef double_gauss(x, A1, mu1, std1, A2, mu2, std2):\r\n return gaussian(x,A1,mu1,std1) + gaussian(x,A2,mu2,std2)\r\n\r\ndef fit_histogram(xdata,ydata, bounds=None):\r\n \"\"\"\r\n Fit the histogram to a double Gaussian \r\n \"\"\"\r\n # bounds = ([A1_min, mu1_min, std1_min, A2_min, mu2_min, std2_min],\r\n # [A1_max, mu1_max, std1_max, A2_max, mu2_max, std2_max])\r\n\r\n if bounds is None:\r\n xmin = min(xdata)\r\n xmax = max(xdata)\r\n ymax = max(ydata)\r\n bounds = ([0, xmin, 0, 0, xmin, 0],\r\n [1.1*ymax, xmax, xmax-xmin, 1.1*ymax, xmax, xmax-xmin])\r\n \r\n popt,pcov = curve_fit(double_gauss,xdata,ydata,bounds)\r\n return popt\r\n\r\ndef find_intersections(hist1,hist2):\r\n \"\"\"\r\n hist1 and hist2 must have the same size\r\n \"\"\"\r\n return np.argwhere(np.diff(np.sign(hist1-hist2))).flatten()\r\n\r\ndef calculation_separation_fidelity(centerbins,hist1,hist2):\r\n \"\"\"\r\n hist1 should always be oriented such that its main central Gaussian peak is\r\n always oriented to the left of hist2's main peak\r\n \"\"\"\r\n # Find the intersection of the histograms\r\n idx = find_intersections(hist1,hist2)\r\n idx = idx[int(len(list)/2)]\r\n # Normalize the histograms (may already be normalized)\r\n norm1 = trapz(hist1,x=centerbins)\r\n norm2 = trapz(hist2,x=centerbins)\r\n hist1 = hist1/norm1\r\n hist2 = hist2/norm2\r\n # Integrate the area under the gaussian corresponding to each side of the\r\n # separation axis\r\n err_1 = trapz(hist1[idx:])\r\n err_2 = trapz(hist2[:idx+1])\r\n # Return the separation fidelity\r\n return 1.0-err_1-err_2","sub_path":"Separation Fidelity/separation_fidelity.py","file_name":"separation_fidelity.py","file_ext":"py","file_size_in_byte":2944,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"394456919","text":"# encoding: utf-8\n\nDEFAULT_LOGGING = {\n \"version\": 1,\n \"incremental\": False, # Replaces the existing configuration\n \"root\": {\n \"level\": \"DEBUG\",\n \"propagate\": 0,\n \"handlers\": [\"file\"],\n \"filters\": [],\n },\n \"loggers\": {\n \"default\": {\n \"level\": \"DEBUG\",\n \"propagate\": 0,\n \"handlers\": [\"file\"],\n \"filters\": [],\n }\n },\n \"formatters\": {\n \"simple\": {\n \"format\": \"%(asctime)s - %(pathname)s - %(funcName)s - %(lineno)d - %(levelname)s - %(message)s\",\n \"datefmt\": \"%Y-%m-%d %H:%M:%S\",\n }\n },\n \"filters\": {\n \"all\": {\n \"name\": \"\"\n }\n },\n \"handlers\": {\n \"null\": {\n \"level\": \"DEBUG\",\n \"class\": \"logging.NullHandler\",\n },\n \"console\": {\n \"level\": \"DEBUG\",\n \"class\": \"logging.StreamHandler\",\n \"formatter\": \"simple\",\n \"filters\": [],\n },\n \"file\": {\n \"level\": \"INFO\",\n \"class\": \"logging.handlers.TimedRotatingFileHandler\",\n \"formatter\": \"simple\",\n \"filters\": [],\n\n # Arguments of handler\n \"filename\": \"/var/log/{PROJECT}/{PROJECT}.log\",\n \"when\": \"midnight\", # Backup once each day.\n \"interval\": 1,\n \"backupCount\": 31, # The total number to backup.\n }\n }\n}\n\n\ndef get_config(project, config=None, filepath=None, console=False):\n import copy\n _config = copy.deepcopy(DEFAULT_LOGGING)\n\n f = _config[\"handlers\"][\"file\"]\n f[\"filename\"] = filepath if filepath else f[\"filename\"].format(PROJECT=project)\n\n if console:\n _config[\"root\"][\"handlers\"].append(\"console\")\n _config[\"loggers\"][\"default\"][\"handlers\"].append(\"console\")\n _config[\"loggers\"][project] = _config[\"loggers\"][\"default\"]\n\n if not isinstance(config, dict):\n return _config\n\n data1 = (\"version\", \"incremental\", \"root\")\n for d in data1:\n v = config.get(d, None)\n if v is not None:\n _config[d] = v\n\n data2 = (\"loggers\", \"formatters\", \"handlers\", \"filters\")\n for d in data2:\n for k, v in config.get(d, {}).items():\n if d in _config:\n _config[d].update(v)\n else:\n _config[d][k] = copy.deepcopy(v)\n\n return _config\n\n\ndef setup_logging(project, config=None, filepath=None, console=False):\n from logging.config import dictConfig\n config = get_config(project, config, filepath, console)\n dictConfig(config)\n","sub_path":"xutils/logging_dict_config.py","file_name":"logging_dict_config.py","file_ext":"py","file_size_in_byte":2580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"350575430","text":"# Python code to demonstrate working of iskeyword()\n\n# importing \"keyword\" for keyword operations\nimport keyword\n\n# initializing strings for testing\ns = \"for\"\ns1 = \"geeksforgeeks\"\ns2 = \"elif\"\ns3 = \"elseif\"\ns4 = \"nikhil\"\ns5 = \"assert\"\ns6 = \"shambhavi\"\ns7 = \"True\"\ns8 = \"False\"\n\n# checking which are keywords\nif keyword.iskeyword(s):\n print( s + \" is a python keyword\")\nelse:\n print(s + \" is not a python keyword\")\n\nif keyword.iskeyword(s1):\n print( s1 + \" is a python keyword\")\nelse:\n print(s1 + \" is not a python keyword\")\n\n# Python code to demonstrate working of kwlist()\nprint (\"The list of keywords is : \")\nprint (keyword.kwlist) # new extra keywords await and async\n","sub_path":"Basics/how_to_check_if_a_string_is_valid_keyword_in_python.py","file_name":"how_to_check_if_a_string_is_valid_keyword_in_python.py","file_ext":"py","file_size_in_byte":682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"429683608","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('param', '0008_auto_20150916_1706'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='param',\n name='nome',\n field=models.CharField(unique=True, max_length=300, verbose_name=b'Parametro'),\n ),\n ]\n","sub_path":"weather/proj/param/migrations/0009_auto_20150916_2005.py","file_name":"0009_auto_20150916_2005.py","file_ext":"py","file_size_in_byte":438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"454402281","text":"import pygame\npygame.init()\nwhite=(255,255,255)\nblack=(0,0,0)\nred=(255,0,0)\ngreen=(0,255,0)\nblue=(0,0,255)\naqua =( 0, 255, 255)\nnavy_blue= ( 0, 0, 128)\nyellow =(255, 255, 0)\nlightblue=(157,255,255)\ndw=1024\ndh=640\ntilesize=128\nplatformimg=[pygame.image.load('platform'+str(i)+'.png') for i in range(1,10)]\nlenemyimg=[pygame.image.load('L'+str(i)+'E.png') for i in range(1,12)]\nrenemyimg=[pygame.image.load('R'+str(i)+'E.png') for i in range(1,12)]\nrightwalkimg=[pygame.image.load('R'+str(i)+'.png') for i in range(1,10)]\nleftwalkimg=[pygame.image.load('L'+str(i)+'.png') for i in range(1,10)]\nwaterimg=[pygame.image.load('w'+str(i)+'.png') for i in range(1,18)]\nlaserimg=pygame.image.load('laser.png')\nenemylaserimg=pygame.image.load('elaser.png')\nscreen=pygame.display.set_mode([dw,dh])\npygame.display.set_caption(\"Platformer template\")\nclock=pygame.time.Clock()\nclass Map:\n def __init__(self,filename):\n self.data=[]\n with open(filename,'r+') as f:\n for line in f:\n self.data.append(line.strip())\n self.tilewidth=len(self.data[0])\n self.tileheight=len(self.data)\n self.width=self.tilewidth*tilesize\n self.height=self.tileheight*tilesize\nclass Camera:\n def __init__(self,width,height):\n self.camera=pygame.Rect(0,0,width,height)\n self.width=width\n self.height=height\n def apply(self,entity):\n return entity.rect.move(self.camera.topleft)\n def update(self,target):\n x=-target.rect.x+int(dw/2)\n x=min(0,x)\n x=max(-(self.width-dw),x)\n self.camera=pygame.Rect(x,0,self.width,self.height)\nclass Player(pygame.sprite.Sprite):\n def __init__(self,x,y,game):\n super().__init__()\n self.game=game\n self.image=rightwalkimg[0]\n self.rect=self.image.get_rect()\n self.rect.x=x*tilesize\n self.rect.y=y*tilesize\n self.x=x\n self.y=y\n self.leftbullet=0\n self.rightbullet=1\n self.vx=0\n self.vy=0\n self.lc=0\n self.rc=0\n self.jc=0\n self.bc=0\n self.health=100\n self.lastshoot=pygame.time.get_ticks()\n def gravity(self):\n if self.vy==0:\n self.vy=5\n else:\n self.vy+=0.35\n def shoot(self):\n if self.rightbullet:\n self.bullet=Bullet(self,self.rect.right,self.rect.top+30,10)\n self.game.bullets.add(self.bullet)\n self.game.all_sprites.add(self.bullet)\n if self.leftbullet:\n self.image=leftwalkimg[0]\n self.bullet=Bullet(self,self.rect.left,self.rect.top+30,-10)\n self.game.bullets.add(self.bullet)\n self.game.all_sprites.add(self.bullet)\n def jump(self):\n self.rect.y+=2\n hits=pygame.sprite.spritecollide(self,self.game.platforms,False)\n self.rect.y-=2\n if len(hits) or self.rect.bottom>=dh:\n self.vy=-10\n def update(self):\n self.gravity()\n now=pygame.time.get_ticks()\n self.vx=0 \n keys=pygame.key.get_pressed()\n if keys[pygame.K_LEFT]:\n self.vx=-5\n self.leftbullet=1\n self.rightbullet=0\n if self.lc+1<27:\n self.image=leftwalkimg[self.lc//3]\n self.lc+=1\n else:\n self.lc=0\n elif keys[pygame.K_RIGHT]:\n self.vx=5\n self.leftbullet=0\n self.rightbullet=1\n if self.rc+1<27:\n self.image=rightwalkimg[self.rc//3]\n self.rc+=1\n else:\n self.rc=0\n else:\n if self.rightbullet:\n self.image=rightwalkimg[0]\n if self.leftbullet:\n self.image=leftwalkimg[0]\n if keys[pygame.K_UP]:\n self.jump()\n if keys[pygame.K_SPACE]: \n if now-self.lastshoot>500:\n self.lastshoot=now\n self.shoot()\n self.rect.x+=self.vx\n hits1=pygame.sprite.spritecollide(self,self.game.platforms,False)\n for hit in hits1:\n if self.vx>0:\n self.rect.right=hit.rect.left\n elif self.vx<0:\n self.rect.left=hit.rect.right\n self.vx=0\n self.rect.y+=self.vy\n hits2=pygame.sprite.spritecollide(self,self.game.platforms,False)\n for hit in hits2:\n if self.vy>0:\n self.rect.bottom=hit.rect.top\n elif self.vy<0:\n self.rect.top=hit.rect.bottom\n self.vy=0\n if self.rect.bottom>=dh:\n self.rect.bottom=dh\n if self.rect.left<=0:\n self.rect.left=0\n if self.rect.top<=0:\n self.rect.top=0\n if self.rect.right>=self.game.map.width:\n self.rect.right=self.game.map.width\nclass EBullet(pygame.sprite.Sprite):\n def __init__(self,enemy,x,y,vx):\n super().__init__()\n self.enemy=enemy\n self.image=enemylaserimg\n self.image=pygame.transform.rotate(self.image,90)\n self.rect=self.image.get_rect()\n self.rect.x=x\n self.rect.y=y\n self.vx=vx\n self.last=pygame.time.get_ticks()\n def update(self):\n now=pygame.time.get_ticks()\n if now-self.last>300:\n self.last=now\n self.kill()\n self.rect.x+=self.vx\n hits=pygame.sprite.spritecollide(self,self.enemy.game.platforms,0)\n if hits:\n self.kill()\nclass Bullet(pygame.sprite.Sprite):\n def __init__(self,player,x,y,vx):\n super().__init__()\n self.player=player\n self.image=laserimg\n self.image=pygame.transform.rotate(self.image,90)\n self.rect=self.image.get_rect()\n self.rect.x=x\n self.rect.y=y\n self.vx=vx\n self.last=pygame.time.get_ticks()\n def update(self):\n now=pygame.time.get_ticks()\n self.rect.x+=self.vx\n hits=pygame.sprite.spritecollide(self,self.player.game.platforms,0)\n if now-self.last>300 or hits:\n self.kill()\nclass Platform(pygame.sprite.Sprite):\n def __init__(self,x,y,i):\n super().__init__()\n self.image=platformimg[i]\n self.rect=self.image.get_rect()\n self.rect.x=x*tilesize\n self.rect.y=y*tilesize\nclass MovingPlatform(pygame.sprite.Sprite):\n def __init__(self,x,y):\n super().__init__()\n self.image=platformimg[8]\n self.rect=self.image.get_rect()\n self.rect.x=x*tilesize\n self.rect.y=y*tilesize\n self.temp=x*tilesize\n self.leftlimit=(x-1)*tilesize\n self.rightlimit=(x+1)*tilesize\n self.vx=2\n def update(self):\n if self.rect.x==self.leftlimit:\n self.vx=2\n elif self.rect.x==self.rightlimit:\n self.vx*=-1\n self.rect.x+=self.vx\n\nclass Water(pygame.sprite.Sprite):\n def __init__(self,x,y):\n super().__init__()\n self.image=waterimg[0]\n self.rect=self.image.get_rect()\n self.rect.x=x*tilesize\n self.rect.y=y*tilesize\n self.wc=0\n def update(self):\n if self.wc+1<34:\n self.image=waterimg[self.wc//2]\n self.wc+=1\n else:\n self.wc=0\nclass Enemy(pygame.sprite.Sprite):\n def __init__(self,x,y,game):\n super().__init__()\n self.game=game\n self.image=renemyimg[0].copy()\n self.rect=self.image.get_rect()\n self.rect.x=x*tilesize\n self.rect.y=y*tilesize\n self.lc=0\n self.rc=0\n self.leftlimit=(x-1)*tilesize\n self.rightlimit=(x+1)*tilesize\n self.vx=2\n self.vy=0\n self.health=60\n self.last=pygame.time.get_ticks()\n def shoot(self):\n now=pygame.time.get_ticks()\n if now-self.last>1000:\n self.last=now\n if self.vx>0:\n self.image=renemyimg[0].copy()\n self.ebullet=EBullet(self,self.rect.right,self.rect.top+30,10)\n self.game.ebullets.add(self.ebullet)\n self.game.all_sprites.add(self.ebullet)\n if self.vx<0:\n self.image=lenemyimg[0].copy()\n self.ebullet=EBullet(self,self.rect.left,self.rect.top+30,-10)\n self.game.ebullets.add(self.ebullet)\n self.game.all_sprites.add(self.ebullet)\n \n def gravity(self):\n if self.vy==0:\n self.vy=5\n else:\n self.vy+=0.35\n def draw_health(self):\n if self.health>40:\n col=green\n elif self.health>20:\n col=yellow\n else:\n col=red\n if self.health<60:\n pygame.draw.rect(self.image,col,(0,0,self.health,7))\n if self.health>0 and self.health<60:\n pygame.draw.rect(self.image,white,(0,0,60,7),3)\n def update(self):\n self.gravity()\n self.rect.x+=self.vx\n if int(self.rect.x)==int(self.rightlimit):\n self.vx*=-1 \n if int(self.rect.x)==int(self.leftlimit):\n self.vx=2\n if self.vx<0:\n if self.lc+1<30:\n self.image=lenemyimg[self.lc//3].copy()\n self.lc+=1\n else:\n self.lc=0\n if self.vx>0:\n if self.rc+1<30:\n self.image=renemyimg[self.rc//3].copy()\n self.rc+=1\n else:\n self.rc=0\n self.rect.y+=self.vy\n hits2=pygame.sprite.spritecollide(self,self.game.platforms,False)\n for hit in hits2:\n if self.vy>0:\n self.rect.bottom=hit.rect.top+6\n elif self.vy<0:\n self.rect.top=hit.rect.bottom\n self.vy=0\n self.shoot()\n if self.health<=0:\n self.kill()\n self.health=60\nclass Game:\n def __init__(self):\n self.health=100\n def drawgrid(self):\n for i in range(0,dw,tilesize):\n pygame.draw.line(screen,white,(i,0),(i,dh))\n for j in range(0,dh,tilesize):\n pygame.draw.line(screen,white,(0,j),(dw,j))\n def new(self):\n self.map=Map('map2.txt')\n self.player=Player(2,4,self)\n self.all_sprites=pygame.sprite.Group()\n self.all_sprites.add(self.player)\n self.platforms=pygame.sprite.Group()\n self.enemies=pygame.sprite.Group()\n self.bullets=pygame.sprite.Group()\n self.ebullets=pygame.sprite.Group()\n self.waters=pygame.sprite.Group()\n for row,tiles in enumerate(self.map.data):\n for col,tile in enumerate(tiles):\n if tile=='e':\n self.enemy=Enemy(col,row,self)\n self.enemies.add(self.enemy)\n self.all_sprites.add(self.enemy)\n if tile=='1':\n self.platform=Platform(col,row,0)\n self.platforms.add(self.platform)\n self.all_sprites.add(self.platform)\n if tile=='2':\n self.platform=Platform(col,row,1)\n self.platforms.add(self.platform)\n self.all_sprites.add(self.platform)\n if tile=='3':\n self.platform=Platform(col,row,2)\n self.platforms.add(self.platform)\n self.all_sprites.add(self.platform)\n if tile=='4':\n self.platform=Platform(col,row,3)\n self.platforms.add(self.platform)\n self.all_sprites.add(self.platform)\n if tile=='5':\n self.platform=Platform(col,row,4)\n self.platforms.add(self.platform)\n self.all_sprites.add(self.platform)\n if tile=='6':\n self.platform=Platform(col,row,5)\n self.platforms.add(self.platform)\n self.all_sprites.add(self.platform)\n if tile=='7':\n self.platform=Platform(col,row,6)\n self.platforms.add(self.platform)\n self.all_sprites.add(self.platform)\n if tile=='8':\n self.platform=Platform(col,row,7)\n self.platforms.add(self.platform)\n self.all_sprites.add(self.platform)\n if tile=='9':\n self.platform=Platform(col,row,8)\n self.platforms.add(self.platform)\n self.all_sprites.add(self.platform)\n if tile=='m':\n self.platform=MovingPlatform(col,row)\n self.platforms.add(self.platform)\n self.all_sprites.add(self.platform)\n if tile=='w':\n self.water=Water(col,row)\n self.waters.add(self.water)\n self.all_sprites.add(self.water)\n self.camera=Camera(self.map.width,self.map.height)\n def message(self,text,x,y,size,color):\n self.font=pygame.font.SysFont('arial',size,1)\n self.msg=self.font.render(text,1,color)\n self.msgrect=self.msg.get_rect()\n self.msgrect.x=x\n self.msgrect.y=y\n screen.blit(self.msg,(self.msgrect.x,self.msgrect.y))\n def events(self):\n for event in pygame.event.get():\n if event.type==pygame.QUIT:\n pygame.quit()\n quit()\n def update(self):\n self.all_sprites.update()\n self.camera.update(self.player)\n hits1 = pygame.sprite.spritecollide(self.player,self.ebullets,1)\n if hits1:\n self.health-=10\n if self.health<=0:\n self.player.kill()\n hits2 = pygame.sprite.groupcollide(self.enemies,self.bullets,False,True)\n for hit in hits2:\n hit.health-=10\n def playerhealthbar(self):\n if self.health>60:\n col=green\n elif self.health>30:\n col=yellow\n else:\n col=red\n if self.health>0:\n self.message('Player HP',20,5,20,red)\n pygame.draw.rect(screen,col,(20,30,self.health,30))\n pygame.draw.rect(screen,white,(20,30,100,30),3) \n def draw(self):\n screen.fill(lightblue)\n for sprite in self.all_sprites:\n if isinstance(sprite,Enemy):\n sprite.draw_health()\n screen.blit(sprite.image,self.camera.apply(sprite)) \n self.playerhealthbar()\n def run(self):\n while 1:\n clock.tick(60)\n self.events()\n self.update()\n self.draw()\n pygame.display.flip()\ng=Game()\nwhile g.run:\n g.new()\n g.run()\n","sub_path":"platformer.py","file_name":"platformer.py","file_ext":"py","file_size_in_byte":13637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"294451814","text":"import numpy as np\n\ndef sigmoid(x):\n return 1 / (1 + np.exp(-x))\n\nvsigmoid = np.vectorize(sigmoid)\n\n# The cost function is in the form: \n# J = class * log(hypothesis) + (1-class) * log(1 - hypothesis)\n# If the class is 1 the cost will be given by log(hypothesis), so the cost will be close to 0 if the \n# hypothesis is close to 1 and close to infinite if hypothesis is too far from 1. When class is 0, \n# the cost will be log(1-hypothesis), then the cost will be low when hypothesis approaches 0.\ndef cost_function(training, classes, theta):\n m = len(classes)\n\n hypothesis = vsigmoid(np.dot(training,theta))\n\n j_1 = np.dot(np.transpose(classes), np.log(hypothesis))\n j_2 = np.dot(np.transpose(np.subtract(1, classes)), np.log(np.subtract(1,hypothesis)))\n J = -1/m * (j_1[0][0] + j_2[0][0])\n \n return J\n\n# The regularized cost funtion adds a regularization term lambda to the previous cost function \n# in order to avoid overfitting\ndef cost_function_reg(training, classes, theta, regLambda):\n m = len(classes)\n\n # theta2 excludes the first parameter in orther to be regularized\n theta2 = theta[range(1,theta.size)]\n\n hypothesis = vsigmoid(np.dot(training,theta))\n\n j_1 = np.dot(np.transpose(classes), np.log(hypothesis))\n j_2 = np.dot(np.transpose(np.subtract(1,classes)), np.log(np.subtract(1,hypothesis)))\n j_3 = (regLambda/(2*m)) * np.sum(theta2**2)\n J = (1/m) * (-1) * np.add(j_1, j_2) + j_3 \n\n return J\n\n# gradient_descent updates the value of theta by subtracting a gradient of the cost from the \n# previous value of theta \ndef gradient_descent(training, classes, theta, alpha, num_iterations):\n m = len(classes)\n\n for i in range(0,num_iterations):\n\n hypothesis = vsigmoid(np.dot(training,theta))\n \n gradient = np.multiply(1/m, np.dot(np.transpose(np.subtract(hypothesis, classes)),training))\n\n theta = np.subtract(theta, np.transpose(np.multiply(alpha, gradient)))\n\n return theta\n\n# Regularized gradient descent\ndef gradient_descent_reg(training, classes, theta, alpha, num_iterations, regLambda):\n m = len(classes)\n\n for i in range(0,num_iterations):\n\n hypothesis = vsigmoid(np.dot(training,theta))\n\n gradient_1 = np.multiply(1/m, np.dot(np.transpose(np.subtract(hypothesis, classes)),training))\n gradient_2 = np.multiply((regLambda/m), theta)\n gradient = np.transpose(gradient_1) + gradient_2\n gradient[0] = (1/m) * np.dot(np.transpose(np.subtract(hypothesis, classes)),np.array(training[:,0]))\n\n theta = np.subtract(theta, np.multiply(alpha, gradient))\n\n return theta\n\ndef lr_classify(testing, theta, threshold):\n hypothesis = vsigmoid(np.dot(testing,theta))\n return ['positive' if h >= threshold else 'negative' for h in hypothesis]\n","sub_path":"logistic_regression.py","file_name":"logistic_regression.py","file_ext":"py","file_size_in_byte":2791,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"27678365","text":"from blog import db\nfrom blog.models import User\n#Create DB and tables\ndb.drop_all()\ndb.create_all()\n\nkyle = User(\"Kyle\",\"wilcox32ahs@gmail.com\", \"hong0322\")\n#INSERT User\ndb.session.add(kyle)\ndb.session.add(User(\"admin\",\"admin@gmail.com\", \"admin\"))\n\ndb.session.commit()\n","sub_path":"db_create_users.py","file_name":"db_create_users.py","file_ext":"py","file_size_in_byte":270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"615943375","text":"from src.diff_patcher import DiffPatcher\nfrom src import muniverse_mysql as m8sql\nfrom src import utility\n\nclass MuniverseDiffPatcher(object):\n def __init__(self, type):\n self.type = type\n\n def __patch(self, chain):\n storage_dir = utility.getStorageDir()\n path_chain = []\n \n for entry in chain:\n path_chain.append(storage_dir + entry)\n\n diff_patcher = DiffPatcher(path_chain)\n diff_patcher.patch()\n\n return diff_patcher.tmp.getFilename()\n\n def patchWithNoteId(self, note_id):\n if self.type is not \"note_id\" : raise Exception('patchWithNoteId called when type is rep_id')\n chain = m8sql.getFileChain(note_id)\n return self.__patch(chain)\n\n def patchWithRepId(self, rep_id):\n if self.type is not \"rep_id\" : raise Exception('patchWithRepId called when type is note_id')\n chain = m8sql.getFileChainWithRepId(rep_id)\n return self.__patch(chain)","sub_path":"src/muniverse_diff_patcher.py","file_name":"muniverse_diff_patcher.py","file_ext":"py","file_size_in_byte":915,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"359834417","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Apr 23 17:06:05 2020\n\n@author: safa\n\"\"\"\n\nfrom CreateConfData import CreateConfData\nfrom NewConfPar import AVLExecution\nimport threading as th\nimport time\nimport numpy as np\n\n\n## References\nRef = {'S':8,'B':10,'C':1}\nSpanRange = np.arange(7,11,1)\nTaperRatioRange = np.arange(0,1.1,0.1)\nConfSearchData = CreateConfData(Ref['S'],SpanRange,TaperRatioRange)\n\nStartTime = time.time()\nBaseAVLInputFileName = 'test.avl'\n\nInDirectory = r'C:\\Users\\Dell\\Documents\\GitHub\\Athena-Vortex-Lattice-Python\\Windows\\AVL Files' \nAlphaRange = [0,3,6,9,12,15] \n\n\nthreads = list()\nif __name__ == '__main__':\n for conf in ConfSearchData.index:\n for alpha in range(len(AlphaRange)):\n ## creating threads\n GeomData = {'Span':ConfSearchData.Span[conf],\\\n 'TaperRatio':ConfSearchData.TaperRatio[conf],\\\n 'Croot':ConfSearchData.Croot[conf],\\\n 'Ctip':ConfSearchData.Ctip[conf],\\\n 'Xtip':ConfSearchData.Xtip[conf]}\n threads.append(th.Thread(target=AVLExecution,args=(conf,alpha,BaseAVLInputFileName,InDirectory,GeomData,Ref)))\n \n for i in range(int(len(threads)/8)): \n ThreadChunk = threads[8*i:8*(i+1)] \n ## starting threads\n for thread in ThreadChunk:\n thread.start()\n \n ## waiting until all threads finish\n for thread in ThreadChunk:\n thread.join()\n ","sub_path":"Windows/ConfSearch/AVLWinParThread.py","file_name":"AVLWinParThread.py","file_ext":"py","file_size_in_byte":1511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"453458397","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Apr 9 18:22:20 2019\n\n@author: JunbinZhang\n\"\"\"\n\"\"\"\nDNL error is defined as the difference between an actual step width and the ideal value of 1 LSB.\nFor an ideal ADC, in which the differential nonlinearity coincides with DNL=0LSB, each analog step equals\n1LSB = VFSR/2^N, where VFSR is the full-scale range and N is the resolution of the ADC and the transition values\nare spaced exactly 1LSB apart.A DNL error specification of less than or equal to 1LSB guarantees a monotonic transfer function with\nno missing codes.An ADC's monotonicity is guaranteed when its digital output increases (or remains constant) with an increasing\ninput signal, thereby avoiding sign changes in the slope of the transfer curve. DNL is specified after the\nstatic gain error has been removed. It is defined as follows:\n DNL = |[(Vd+1 -Vd)/Vlsb-ideal - 1]|, where 0 < d < 2^N -2\n Vd is the physical value corresponding to the digital output code D, N is the ADC resolution, and VLSB-IDEAL is the ideal spacing\n for two adjacent digitlal codes.\n\"\"\"\n\n\"\"\"\nINL error is described as the deviation, in LSB or percent of full-scale range (FSR), of an actual transfer\nfunction from a straight line. The INL-error magnitude then depends directly on the position chosen for\nthis straight line. At least two definitions are common: \"best straight-line INL\" and \"end-point INL\"\n\nBest straight-line INL provides information about offset (intercept) and gain (slope) error, plus the\nposition of the transfer function (discussed below). It determines, in the form of a straight line, the\nclosest approximation to the ADC's actual transfer function. The exact position of the line is not\nclearly defined, but this approach yields the best repeatability, and it serves as a true\nrepresentation of linearity. \nEnd-point INL passes the straight line through end points of the converter's transfer function,\nthereby defining a precise position for the line. Thus, the straight line for an N-bit ADC is defined by\nits zero (all zeros) and its full-scale (all ones) outputs.\n\nThe best straight-line approach is generally preferred, because it produces better results. The INL\nspecification is measured after both static offset and gain errors have been nullified, and can be\ndescribed as follows:\n INL = |[(Vd-Vzero)/VLSB-IDEAL] -d |, where 0 < d < 2^N -1.\n Vd is the analog value represented by the digital output code d, N is the ADC's resolution,Vzero is the minimum analog input\n corresponding to an all-zero output code. and Vlsb-IDEAL is the ideal spacing for two adjacent output codes\n\"\"\"\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport csv\nimport math\nfrom brd_config import Brd_Config\nfrom frame import Frames\n\nchns = [[],[],[],[],[],[],[],[]]\ncounts = [[],[],[],[],[],[],[],[]]\n\nwith open('D:\\python_workspace\\DUNE_COLDADC\\data\\ADC0_all.csv') as csvfile:\n readCSV = csv.reader(csvfile,delimiter=',')\n for row in readCSV: \n #print(row) \n chns[0].append(row[0])\n counts[0].append(row[1])\n \n chns[1].append(row[2])\n counts[1].append(row[3])\n \n chns[2].append(row[4])\n counts[2].append(row[5])\n \n chns[3].append(row[6])\n counts[3].append(row[7])\n \n chns[4].append(row[8])\n counts[4].append(row[9])\n \n chns[5].append(row[10])\n counts[5].append(row[11])\n \n chns[6].append(row[12])\n counts[6].append(row[13])\n \n chns[7].append(row[14])\n counts[7].append(row[15])\ncsvfile.close()\n\nstart = 1\nstop = 4094\n\n\nfor i in range(8):\n chns[i] = chns[i][2:-1]\n counts[i] = counts[i][2:-1]\n#now we have the channels and counts, but they are all in str type\n \n#convert them to lists of int\nfor i in range(8):\n chns[i] = list(map(int,chns[i][start-1:stop]))\n counts[i] = list(map(int,counts[i][start-1:stop]))\n\n\n\n\n\n\nNu = [] #number of times the upper code is hit\nNl = [] #number of times the lower code is hit\nNs = [] #number of samples(total sum of code occurrences)\nN = 12 #is the convert resolution\nfor i in range(8):\n Nu.append(counts[i][-1])\n Nl.append(counts[i][0])\n Ns.append(sum(counts[i]))\n\nXu=[] #\nXl=[] #\n\nfor i in range(8): \n Xu_tp = math.cos(math.pi*Nu[i]/Ns[i])\n Xl_tp = math.cos(math.pi*Nl[i]/Ns[i])\n Xu.append(Xu_tp)\n Xl.append(Xl_tp)\n\n \noffset_lsbs=[] #(LSB)\n \nfor i in range(8): \n offset_lsbs_tp = ((Xl[i] - Xu[i])/(Xl[i]+Xu[i]))*(math.pow(2,(N-1))-1)\n offset_lsbs.append(offset_lsbs_tp)\n \npeak_lsbs=[] #(LSB) \nfor i in range(8): \n peak_lsbs_tp = (math.pow(2,(N-1)) - 1- offset_lsbs[i])/Xu[i]\n peak_lsbs.append(peak_lsbs_tp)\n \n# now we get the offset and amplitude, the ideal distribution of code hits can be calculated.\nIdeal_counts = [[],[],[],[],[],[],[],[]] \nfor i in range(8):\n for code in range(1,4095,1): #1->4094\n res = Ns[i]/math.pi * (math.asin((code+1 - math.pow(2,(N-1))-offset_lsbs[i])/peak_lsbs[i]) - math.asin((code - math.pow(2,(N-1))-offset_lsbs[i])/peak_lsbs[i]) )\n Ideal_counts[i].append(res)\n\nDNL_lsbs= [[],[],[],[],[],[],[],[]] \n\nfor chn in range(8):\n for code in range(len(counts[0])):\n res = (counts[chn][code]/Ideal_counts[chn][code]) -1\n DNL_lsbs[chn].append(res)\n\n\nINL_lsbs= [[],[],[],[],[],[],[],[]] \nfor chn in range(8):\n for code in range(len(DNL_lsbs[0])):# change range here\n if code == 0:\n INL_lsbs[chn].append(DNL_lsbs[chn][0])\n else:\n res = sum(DNL_lsbs[chn][0:code+1])\n INL_lsbs[chn].append(res) \n\n\n\nfig = plt.figure()\nax3 = fig.add_subplot(3,1,1)\n\nplt.ylabel('counts')\nplt.title('ADC0 DNL sine vpp=3.04V freq=11kHz, test input')\nplt.plot(counts[0])\nplt.xlim(start,stop)\nplt.text(start+100,50000,'N=%.2e samples,confident level > 99 percent with resolution = 0.1LSB,12-bit ADC'%Ns[0])\nplt.text(start+100,40000,'code range(%d~%d)'%(start,stop))\n\n\n\nax1 = fig.add_subplot(3,1,2)\nplt.ylabel('LSBs')\nplt.plot(DNL_lsbs[0],'r')\nplt.plot(DNL_lsbs[1],'g')\nplt.plot(DNL_lsbs[2],'b')\nplt.plot(DNL_lsbs[3],'c')\nplt.plot(DNL_lsbs[4],'m')\nplt.plot(DNL_lsbs[5],'y')\nplt.plot(DNL_lsbs[6],'k')\nplt.plot(DNL_lsbs[7],'k')\nplt.xlim(start,stop)\n#plt.ylim(-1,1)\nplt.legend(('chn0','chn1','chn2','chn3','chn4','chn5','chn6','chn7'))\nplt.text(start+100,0.8,'offset=%.3f LSB'%offset_lsbs[0],color='k')\n\n\n#plt.text(start+100,8,'linear range chn0 <0.5lsb(%d~%d)'%(min0,max0))\n#plt.text(start+100,7.5,'linear range chn1 <0.5lsb(%d~%d)'%(min1,max1))\n#plt.text(start+100,7,'linear range chn2 <0.5lsb(%d~%d)'%(min2,max2))\n#plt.text(start+100,6.5,'linear range chn3 <0.5lsb(%d~%d)'%(min3,max3))\n#plt.text(start+100,6,'linear range chn4 <0.5lsb(%d~%d)'%(min4,max5))\n#plt.text(start+100,5.5,'linear range chn5 <0.5lsb(%d~%d)'%(min6,max6))\n#plt.text(start+100,5,'linear range chn6 <0.5lsb(%d~%d)'%(min7,max7))\n\n\nax2 = fig.add_subplot(3,1,3)\nplt.xlabel('Code')\nplt.ylabel('LSBs')\nplt.title('INL')\nplt.plot(INL_lsbs[0],'r')\nplt.plot(INL_lsbs[1],'g')\nplt.plot(INL_lsbs[2],'b')\nplt.plot(INL_lsbs[3],'c')\nplt.plot(INL_lsbs[4],'m')\nplt.plot(INL_lsbs[5],'y')\nplt.plot(INL_lsbs[6],'k')\nplt.plot(INL_lsbs[7],'k')\nplt.xlim(start,stop)\n#plt.xticks(xcode,step = 5000)\n#plt.text(start+100,10,'code range(%d~%d)'%(start,stop))\nplt.legend(('chn0','chn1','chn2','chn3','chn4','chn5','chn6','chn7'))\nplt.show()\n\n\n\n#xtick = np.arange(4096) #0~4095\n\n ","sub_path":"DNL_INL_sine.py","file_name":"DNL_INL_sine.py","file_ext":"py","file_size_in_byte":7395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"176380208","text":"#longitud.py\r\ndef longitudCadena(x):\r\n contador=0\r\n for i in x:\r\n contador+=1\r\n return contador\r\ndef nombrePropio(x):\r\n y=x.lower()\r\n return y[0].upper()+y[1:]\r\nx=input('Indique una palabra: ') or 'mADRId'\r\nprint(nombrePropio(x),'tiene',longitudCadena(x),'caracteres.')\r\n","sub_path":"longitud.py","file_name":"longitud.py","file_ext":"py","file_size_in_byte":279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"207929490","text":"# coding:utf-8\n# --author-- lanhua.zhou\nfrom __future__ import print_function\n\nimport os\nimport json\nimport logging\n\nimport maya.cmds as cmds\n\nimport zfused_maya.core.menu as menu\n\n__all__ = [\"build\", \"delete\", \"rebuild\"]\n\nlogger = logging.getLogger(__name__)\n\n\ndef build():\n \"\"\"\n build zfused maya menu \n\n \"\"\"\n\n # main menu\n cmds.menu(\"zfused_maya_menu\", parent=\"MayaWindow\",\n label=u\"星龙传媒 zFused Maya\", tearOff=True)\n _menu_data = menu.get_menu_data()\n\n for _menu_title in menu.MENU_KEY:\n cmds.menuItem(_menu_title, label=_menu_title.capitalize(),\n parent=\"zfused_maya_menu\", subMenu=True, tearOff=True)\n if _menu_title in _menu_data.keys():\n # load menu\n category = []\n category_cmds = {}\n menu_data = _menu_data[_menu_title]\n for data in menu_data:\n cate = data[\"category\"]\n if not cate in category:\n category.append(cate)\n if not cate in category_cmds:\n category_cmds[cate] = []\n category_cmds[cate].append(data)\n for ca in category:\n cmds.menuItem(label = ca, divider=True, parent = _menu_title)\n for data in category_cmds[ca]:\n cmds.menuItem(data[\"name\"], label=data[\"title\"],\n parent=_menu_title, command=data[\"cmd\"])\n cmds.menuItem(divider=True, parent=\"zfused_maya_menu\")\n\n\ndef delete():\n if cmds.menu(\"zfused_maya_menu\", q=True, exists=True):\n cmds.deleteUI(\"zfused_maya_menu\")\n\n\ndef rebuild():\n delete()\n build()\n","sub_path":"zfused_maya/zfused_maya/interface/menuinterface/menubar.py","file_name":"menubar.py","file_ext":"py","file_size_in_byte":1671,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"170922982","text":"\"\"\"A collection of functions used by tstoolbox, wdmtoolbox, ...etc.\"\"\"\n\nfrom __future__ import division, print_function\n\nimport bz2\nimport datetime\nimport gzip\nimport os\nimport sys\nfrom textwrap import TextWrapper\nfrom functools import reduce\nfrom io import StringIO\nfrom math import gcd\nfrom urllib.parse import urlparse\n\nimport dateparser\nimport numpy as np\nfrom pint import UnitRegistry\nimport pandas as pd\nfrom scipy.stats.distributions import norm\nfrom scipy.stats.distributions import lognorm\nfrom tabulate import simple_separated_format\nfrom tabulate import tabulate as tb\n\nUREG = UnitRegistry()\n\n\nWRAPPER = TextWrapper(initial_indent=\"* \", subsequent_indent=\"* \")\n\n\ndef error_wrapper(estr):\n \"\"\" Wrap estr into error format used by toolboxes. \"\"\"\n nestr = [\"\", \"*\"]\n for paragraph in estr.split(\"\\n\\n\"):\n nestr.append(\"\\n\".join(WRAPPER.wrap(paragraph.strip())))\n nestr.append(\"*\")\n nestr.append(\"\")\n return \"\\n\".join(nestr)\n\n\n_CODES = {}\n_CODES[\"SUB_D\"]: {\n \"N\": \"Nanoseconds\",\n \"U\": \"microseconds\",\n \"L\": \"miLliseconds\",\n \"S\": \"Secondly\",\n \"T\": \"minuTely\",\n \"H\": \"Hourly\",\n}\n_CODES[\"DAILY\"]: {\n \"D\": \"calendar Day\",\n \"B\": \"Business day\",\n \"C\": \"Custom business day (experimental)\",\n}\n_CODES[\"WEEKLY\"]: {\n \"W\": \"Weekly\",\n \"W-SUN\": \"Weekly frequency (SUNdays)\",\n \"W-MON\": \"Weekly frequency (MONdays)\",\n \"W-TUE\": \"Weekly frequency (TUEsdays)\",\n \"W-WED\": \"Weekly frequency (WEDnesdays)\",\n \"W-THU\": \"Weekly frequency (THUrsdays)\",\n \"W-FRI\": \"Weekly frequency (FRIdays)\",\n \"W-SAT\": \"Weekly frequency (SATurdays)\",\n}\n_CODES[\"MONTH\"]: {\n \"M\": \"Month end\",\n \"MS\": \"Month Start\",\n \"BM\": \"Business Month end\",\n \"BMS\": \"Business Month Start\",\n \"CBM\": \"Custom Business Month end\",\n \"CBMS\": \"Custom Business Month Start\",\n}\n_CODES[\"QUARTERLY\"]: {\n \"Q\": \"Quarter end\",\n \"Q-JAN\": \"Quarterly, quarter ends end of JANuary\",\n \"Q-FEB\": \"Quarterly, quarter ends end of FEBruary\",\n \"Q-MAR\": \"Quarterly, quarter ends end of MARch\",\n \"Q-APR\": \"Quarterly, quarter ends end of APRil\",\n \"Q-MAY\": \"Quarterly, quarter ends end of MAY\",\n \"Q-JUN\": \"Quarterly, quarter ends end of JUNe\",\n \"Q-JUL\": \"Quarterly, quarter ends end of JULy\",\n \"Q-AUG\": \"Quarterly, quarter ends end of AUGust\",\n \"Q-SEP\": \"Quarterly, quarter ends end of SEPtember\",\n \"Q-OCT\": \"Quarterly, quarter ends end of OCTober\",\n \"Q-NOV\": \"Quarterly, quarter ends end of NOVember\",\n \"Q-DEC\": \"Quarterly, quarter ends end of DECember\",\n \"QS\": \"Quarter Start\",\n \"QS-JAN\": \"Quarterly, quarter Starts end of JANuary\",\n \"QS-FEB\": \"Quarterly, quarter Starts end of FEBruary\",\n \"QS-MAR\": \"Quarterly, quarter Starts end of MARch\",\n \"QS-APR\": \"Quarterly, quarter Starts end of APRil\",\n \"QS-MAY\": \"Quarterly, quarter Starts end of MAY\",\n \"QS-JUN\": \"Quarterly, quarter Starts end of JUNe\",\n \"QS-JUL\": \"Quarterly, quarter Starts end of JULy\",\n \"QS-AUG\": \"Quarterly, quarter Starts end of AUGust\",\n \"QS-SEP\": \"Quarterly, quarter Starts end of SEPtember\",\n \"QS-OCT\": \"Quarterly, quarter Starts end of OCTober\",\n \"QS-NOV\": \"Quarterly, quarter Starts end of NOVember\",\n \"QS-DEC\": \"Quarterly, quarter Starts end of DECember\",\n \"BQ\": \"Business Quarter end\",\n \"BQS\": \"Business Quarter Start\",\n}\n_CODES[\"ANNUAL\"]: {\n \"A\": \"Annual end\",\n \"A-JAN\": \"Annual, year ends end of JANuary\",\n \"A-FEB\": \"Annual, year ends end of FEBruary\",\n \"A-MAR\": \"Annual, year ends end of MARch\",\n \"A-APR\": \"Annual, year ends end of APRil\",\n \"A-MAY\": \"Annual, year ends end of MAY\",\n \"A-JUN\": \"Annual, year ends end of JUNe\",\n \"A-JUL\": \"Annual, year ends end of JULy\",\n \"A-AUG\": \"Annual, year ends end of AUGust\",\n \"A-SEP\": \"Annual, year ends end of SEPtember\",\n \"A-OCT\": \"Annual, year ends end of OCTober\",\n \"A-NOV\": \"Annual, year ends end of NOVember\",\n \"A-DEC\": \"Annual, year ends end of DECember\",\n \"AS\": \"Annual Start\",\n \"AS-JAN\": \"Annual, year Starts end of JANuary\",\n \"AS-FEB\": \"Annual, year Starts end of FEBruary\",\n \"AS-MAR\": \"Annual, year Starts end of MARch\",\n \"AS-APR\": \"Annual, year Starts end of APRil\",\n \"AS-MAY\": \"Annual, year Starts end of MAY\",\n \"AS-JUN\": \"Annual, year Starts end of JUNe\",\n \"AS-JUL\": \"Annual, year Starts end of JULy\",\n \"AS-AUG\": \"Annual, year Starts end of AUGust\",\n \"AS-SEP\": \"Annual, year Starts end of SEPtember\",\n \"AS-OCT\": \"Annual, year Starts end of OCTober\",\n \"AS-NOV\": \"Annual, year Starts end of NOVember\",\n \"AS-DEC\": \"Annual, year Starts end of DECember\",\n \"BA\": \"Business Annual end\",\n \"BA-JAN\": \"Business Annual, business year ends end of JANuary\",\n \"BA-FEB\": \"Business Annual, business year ends end of FEBruary\",\n \"BA-MAR\": \"Business Annual, business year ends end of MARch\",\n \"BA-APR\": \"Business Annual, business year ends end of APRil\",\n \"BA-MAY\": \"Business Annual, business year ends end of MAY\",\n \"BA-JUN\": \"Business Annual, business year ends end of JUNe\",\n \"BA-JUL\": \"Business Annual, business year ends end of JULy\",\n \"BA-AUG\": \"Business Annual, business year ends end of AUGust\",\n \"BA-SEP\": \"Business Annual, business year ends end of SEPtember\",\n \"BA-OCT\": \"Business Annual, business year ends end of OCTober\",\n \"BA-NOV\": \"Business Annual, business year ends end of NOVember\",\n \"BA-DEC\": \"Business Annual, business year ends end of DECember\",\n \"BAS\": \"Business Annual Start\",\n \"BS-JAN\": \"Business Annual Start, business year starts end of JANuary\",\n \"BS-FEB\": \"Business Annual Start, business year starts end of FEBruary\",\n \"BS-MAR\": \"Business Annual Start, business year starts end of MARch\",\n \"BS-APR\": \"Business Annual Start, business year starts end of APRil\",\n \"BS-MAY\": \"Business Annual Start, business year starts end of MAY\",\n \"BS-JUN\": \"Business Annual Start, business year starts end of JUNe\",\n \"BS-JUL\": \"Business Annual Start, business year starts end of JULy\",\n \"BS-AUG\": \"Business Annual Start, business year starts end of AUGust\",\n \"BS-SEP\": \"Business Annual Start, business year starts end of SEPtember\",\n \"BS-OCT\": \"Business Annual Start, business year starts end of OCTober\",\n \"BS-NOV\": \"Business Annual Start, business year starts end of NOVember\",\n \"BS-DEC\": \"Business Annual Start, business year starts end of DECember\",\n}\n\ndocstrings = {\n \"target_units\": r\"\"\"target_units: str\n [optional, default is None, transformation]\n\n The purpose of this option is to specify target units for unit\n conversion. The source units are specified in the header line\n of the input or using the 'source_units' keyword.\n\n The units of the input time-series or values are specified as\n the second field of a ':' delimited name in the header line of\n the input or in the 'source_units' keyword.\n\n Any unit string compatible with the 'pint' library can be used.\n\n This option will also add the 'target_units' string to the\n column names.\"\"\",\n \"source_units\": r\"\"\"source_units: str\n [optional, default is None, transformation]\n\n If unit is specified for the column as the second field of a ':'\n delimited column name, then the specified units and the\n 'source_units' must match exactly.\n\n Any unit string compatible with the 'pint' library can be\n used.\"\"\",\n \"names\": r\"\"\"names: str\n [optional, default is None, transformation]\n\n If None, the column names are taken from the first row after\n 'skiprows' from the input dataset.\n\n MUST include a name for all columns in the input dataset,\n including the index column.\"\"\",\n \"index_type\": r\"\"\"index_type : str\n [optional, default is 'datetime', output format]\n\n Can be either 'number' or 'datetime'. Use 'number' with index\n values that are Julian dates, or other epoch reference.\"\"\",\n \"input_ts\": r\"\"\"input_ts : str\n [optional though required if using within Python, default is '-'\n (stdin)]\n\n Whether from a file or standard input, data requires a header of\n column names. The default header is the first line of the\n input, but this can be changed using the 'skiprows' option.\n\n Most separators will be automatically detected. Most common date\n formats can be used, but the closer to ISO 8601 date/time\n standard the better.\n\n Command line:\n\n +-------------------------+------------------------+\n | Keyword Example | Description |\n +=========================+========================+\n | --input_ts=filename.csv | to read 'filename.csv' |\n +-------------------------+------------------------+\n | --input_ts='-' | to read from standard |\n | | input (stdin) |\n +-------------------------+------------------------+\n\n In many cases it is better to use redirection rather that\n use `--input_ts=filename.csv`. The following are identical:\n\n From a file:\n\n command subcmd --input_ts=filename.csv\n\n From standard input:\n\n command subcmd --input_ts=- < filename.csv\n\n The BEST way since you don't have to include `--input_ts=-`\n because that is the default:\n\n command subcmd < file.csv\n\n Can also combine commands by piping:\n\n command subcmd < filein.csv | command subcmd1 > fileout.csv\n\n As Python Library::\n\n You MUST use the `input_ts=...` option where `input_ts` can\n be one of a [pandas DataFrame, pandas Series, dict, tuple,\n list, StringIO, or file name].\n\n If result is a time series, returns a pandas DataFrame.\"\"\",\n \"columns\": r\"\"\"columns\n [optional, defaults to all columns, input filter]\n\n Columns to select out of input. Can use column names from the\n first line header or column numbers. If using numbers, column\n number 1 is the first data column. To pick multiple columns;\n separate by commas with no spaces. As used in `tstoolbox pick`\n command.\n\n This solves a big problem so that you don't have to create\n a data set with a certain column order, you can rearrange\n columns when data is read in.\"\"\",\n \"start_date\": r\"\"\"start_date : str\n [optional, defaults to first date in time-series, input filter]\n\n The start_date of the series in ISOdatetime format, or 'None'\n for beginning.\"\"\",\n \"end_date\": r\"\"\"end_date : str\n [optional, defaults to last date in time-series, input filter]\n\n The end_date of the series in ISOdatetime format, or 'None' for\n end.\"\"\",\n \"dropna\": r\"\"\"dropna : str\n [optional, defauls it 'no', input filter]\n\n Set `dropna` to 'any' to have records dropped that have NA value\n in any column, or 'all' to have records dropped that have NA in\n all columns. Set to 'no' to not drop any records. The default\n is 'no'.\"\"\",\n \"print_input\": r\"\"\"print_input\n [optional, default is False, output format]\n\n If set to 'True' will include the input columns in the output\n table.\"\"\",\n \"round_index\": r\"\"\"round_index\n [optional, default is None which will do nothing to the index,\n output format]\n\n Round the index to the nearest time point. Can significantly\n improve the performance since can cut down on memory and\n processing requirements, however be cautious about rounding to\n a very course interval from a small one. This could lead to\n duplicate values in the index.\"\"\",\n \"float_format\": r\"\"\"float_format\n [optional, output format]\n\n Format for float numbers.\"\"\",\n \"tablefmt\": r\"\"\"tablefmt : str\n [optional, default is 'csv', output format]\n\n The table format. Can be one of 'csv', 'tsv', 'plain',\n 'simple', 'grid', 'pipe', 'orgtbl', 'rst', 'mediawiki', 'latex',\n 'latex_raw' and 'latex_booktabs'.\"\"\",\n \"header\": r\"\"\"header : str\n [optional, default is 'default', output format]\n\n This is if you want a different header than is the default for\n this output table. Pass a list of strings for each column in\n the table.\"\"\",\n \"pandas_offset_codes\": r\"\"\"+-------+---------------+\n | Alias | Description |\n +=======+===============+\n | N | Nanoseconds |\n +-------+---------------+\n | U | microseconds |\n +-------+---------------+\n | L | milliseconds |\n +-------+---------------+\n | S | Secondly |\n +-------+---------------+\n | T | Minutely |\n +-------+---------------+\n | H | Hourly |\n +-------+---------------+\n | D | calendar Day |\n +-------+---------------+\n | W | Weekly |\n +-------+---------------+\n | M | Month end |\n +-------+---------------+\n | MS | Month Start |\n +-------+---------------+\n | Q | Quarter end |\n +-------+---------------+\n | QS | Quarter Start |\n +-------+---------------+\n | A | Annual end |\n +-------+---------------+\n | AS | Annual Start |\n +-------+---------------+\n\n Business offset codes.\n\n +-------+------------------------------------+\n | Alias | Description |\n +=======+====================================+\n | B | Business day |\n +-------+------------------------------------+\n | BM | Business Month end |\n +-------+------------------------------------+\n | BMS | Business Month Start |\n +-------+------------------------------------+\n | BQ | Business Quarter end |\n +-------+------------------------------------+\n | BQS | Business Quarter Start |\n +-------+------------------------------------+\n | BA | Business Annual end |\n +-------+------------------------------------+\n | BAS | Business Annual Start |\n +-------+------------------------------------+\n | C | Custom business day (experimental) |\n +-------+------------------------------------+\n | CBM | Custom Business Month end |\n +-------+------------------------------------+\n | CBMS | Custom Business Month Start |\n +-------+------------------------------------+\n\n Weekly has the following anchored frequencies:\n\n +-------+-------------+-------------------------------+\n | Alias | Equivalents | Description |\n +=======+=============+===============================+\n | W-SUN | W | Weekly frequency (SUNdays) |\n +-------+-------------+-------------------------------+\n | W-MON | | Weekly frequency (MONdays) |\n +-------+-------------+-------------------------------+\n | W-TUE | | Weekly frequency (TUEsdays) |\n +-------+-------------+-------------------------------+\n | W-WED | | Weekly frequency (WEDnesdays) |\n +-------+-------------+-------------------------------+\n | W-THU | | Weekly frequency (THUrsdays) |\n +-------+-------------+-------------------------------+\n | W-FRI | | Weekly frequency (FRIdays) |\n +-------+-------------+-------------------------------+\n | W-SAT | | Weekly frequency (SATurdays) |\n +-------+-------------+-------------------------------+\n\n Quarterly frequencies (Q, BQ, QS, BQS) and annual frequencies\n (A, BA, AS, BAS) replace the \"x\" in the \"Alias\" column to have\n the following anchoring suffixes:\n\n +-------+----------+-------------+----------------------------+\n | Alias | Examples | Equivalents | Description |\n +=======+==========+=============+============================+\n | x-DEC | A-DEC | A | year ends end of DECember |\n | | Q-DEC | Q | |\n | | AS-DEC | AS | |\n | | QS-DEC | QS | |\n +-------+----------+-------------+----------------------------+\n | x-JAN | | | year ends end of JANuary |\n +-------+----------+-------------+----------------------------+\n | x-FEB | | | year ends end of FEBruary |\n +-------+----------+-------------+----------------------------+\n | x-MAR | | | year ends end of MARch |\n +-------+----------+-------------+----------------------------+\n | x-APR | | | year ends end of APRil |\n +-------+----------+-------------+----------------------------+\n | x-MAY | | | year ends end of MAY |\n +-------+----------+-------------+----------------------------+\n | x-JUN | | | year ends end of JUNe |\n +-------+----------+-------------+----------------------------+\n | x-JUL | | | year ends end of JULy |\n +-------+----------+-------------+----------------------------+\n | x-AUG | | | year ends end of AUGust |\n +-------+----------+-------------+----------------------------+\n | x-SEP | | | year ends end of SEPtember |\n +-------+----------+-------------+----------------------------+\n | x-OCT | | | year ends end of OCTober |\n +-------+----------+-------------+----------------------------+\n | x-NOV | | | year ends end of NOVember |\n +-------+----------+-------------+----------------------------+\"\"\",\n \"plotting_position_table\": r\"\"\"+------------+--------+----------------------+-----------------------+\n | Name | a | Equation | Description |\n | | | (i-a)/(n+1-2*a) | |\n +============+========+======================+=======================+\n | weibull | 0 | i/(n+1) | mean of sampling |\n | (default) | | | distribution |\n +------------+--------+----------------------+-----------------------+\n | benard | 0.3 | (i-0.3)/(n+0.4) | approx. median of |\n | | | | sampling distribution |\n +------------+--------+----------------------+-----------------------+\n | filliben | 0.3175 | (i-0.3175)/(n+0.365) | |\n +------------+--------+----------------------+-----------------------+\n | yu | 0.326 | (i-0.326)/(n+0.348) | |\n +------------+--------+----------------------+-----------------------+\n | tukey | 1/3 | (i-1/3)/(n+1/3) | approx. median of |\n | | | | sampling distribution |\n +------------+--------+----------------------+-----------------------+\n | blom | 0.375 | (i-0.375)/(n+0.25) | |\n +------------+--------+----------------------+-----------------------+\n | cunnane | 2/5 | (i-2/5)/(n+1/5) | subjective |\n +------------+--------+----------------------+-----------------------+\n | gringorton | 0.44 | (1-0.44)/(n+0.12) | |\n +------------+--------+----------------------+-----------------------+\n | hazen | 1/2 | (i-1/2)/n | midpoints of n equal |\n | | | | intervals |\n +------------+--------+----------------------+-----------------------+\n | larsen | 0.567 | (i-0.567)/(n-0.134) | |\n +------------+--------+----------------------+-----------------------+\n | gumbel | 1 | (i-1)/(n-1) | mode of sampling |\n | | | | distribution |\n +------------+--------+----------------------+-----------------------+\n | california | NA | i/n | |\n +------------+--------+----------------------+-----------------------+\n\n Where 'i' is the sorted rank of the y value, and 'n' is the\n total number of values to be plotted.\"\"\",\n \"clean\": r\"\"\"clean\n [optional, default is False, input filter]\n\n The 'clean' command will repair a input index, removing\n duplicate index values and sorting.\"\"\",\n \"skiprows\": r\"\"\"skiprows: list-like or integer or callable\n [optional, default is None which will infer header from first\n line, input filter]\n\n Line numbers to skip (0-indexed) or number of lines to skip\n (int) at the start of the file.\n\n If callable, the callable function will be evaluated against the\n row indices, returning True if the row should be skipped and\n False otherwise. An example of a valid callable argument would\n be\n\n ``lambda x: x in [0, 2]``.\"\"\",\n \"groupby\": r\"\"\"groupby: str\n [optional, default is None, transformation]\n\n The pandas offset code to group the time-series data into.\n A special code is also available to group 'months_across_years'\n that will group into twelve monthly categories across the entire\n time-series.\"\"\",\n \"force_freq\": r\"\"\"force_freq: str\n [optional, output format]\n\n Force this frequency for the output. Typically you will only\n want to enforce a smaller interval where tstoolbox will insert\n missing values as needed. WARNING: you may lose data if not\n careful with this option. In general, letting the algorithm\n determine the frequency should always work, but this option will\n override. Use PANDAS offset codes.\"\"\",\n \"output_names\": r\"\"\"output_names: str\n [optional, output_format]\n\n The tstoolbox will change the names of the output columns to include\n some record of the operations used on each column. The `output_names`\n will override that feature. Must be a list or tuple equal to the\n number of columns in the output data.\"\"\",\n}\n\n# Decided this was inelegant, but left here in case I figure out what I want\n# and how I want it.\n# ntables = {}\n# for key in [\"SUB_D\", \"DAILY\", \"WEEKLY\", \"QUATERLY\", \"ANNUAL\"]:\n# ntables[key] = tb(_CODES[key].items(), tablefmt=\"grid\", headers=[\"Alias\", \"Description\"],)\n# ntables[key] = \" \".join(ntables[key].splitlines(True))\n# codes_table = f\"\"\"{ntables[\"SUB_D\"]}\n#\n# {ntables[\"DAILY\"]}\n# \"\"\"\n#\n# docstrings[\"pandas_offset_codes\"] = codes_table\n\n\ndef stride_and_unit(sunit):\n \"\"\"Split a stride/unit combination into component parts.\"\"\"\n if sunit is None:\n return sunit\n unit = sunit.lstrip(\"+-. 1234567890\")\n stride = sunit[: sunit.index(unit)]\n if len(stride) > 0:\n stride = int(stride)\n else:\n stride = 1\n return unit, stride\n\n\ndef set_ppf(ptype):\n \"\"\"Return correct Percentage Point Function for `ptype`.\"\"\"\n if ptype == \"norm\":\n return norm.ppf\n elif ptype == \"lognorm\":\n return lognorm.freeze(0.5, loc=0).ppf\n elif ptype == \"weibull\":\n\n def ppf(y):\n \"\"\"Percentage Point Function for the weibull distibution.\"\"\"\n return np.log(-np.log((1 - np.array(y))))\n\n return ppf\n elif ptype is None:\n\n def ppf(y):\n return y\n\n return ppf\n\n\ndef _plotting_position_equation(i, n, a):\n \"\"\"Parameterized, generic plotting position equation.\"\"\"\n return (i - a) / float(n + 1 - 2 * a)\n\n\nPPDICT = {\n \"weibull\": 0,\n \"benard\": 0.3,\n \"filliben\": 0.3175,\n \"yu\": 0.326,\n \"tukey\": 1 / 3,\n \"blom\": 0.375,\n \"cunnane\": 2 / 5,\n \"gringorton\": 0.44,\n \"hazen\": 1 / 2,\n \"larsen\": 0.567,\n \"gumbel\": 1,\n}\n\n\ndef set_plotting_position(n, plotting_position=\"weibull\"):\n \"\"\"Create plotting position 1D array using linspace.\"\"\"\n if plotting_position == \"california\":\n return np.linspace(1.0 / n, 1.0, n)\n try:\n a = PPDICT[plotting_position]\n except KeyError:\n a = float(plotting_position)\n return _plotting_position_equation(np.arange(1, n + 1), n, a)\n\n\ndef b(s):\n \"\"\"Make sure strings are correctly represented in Python 2 and 3.\"\"\"\n try:\n return s.encode(\"utf-8\")\n except AttributeError:\n return s\n\n\ndef doc(fdict, **kwargs):\n \"\"\"Return a decorator that formats a docstring.\"\"\"\n\n def f(fn):\n fn.__doc__ = fn.__doc__.format(**fdict)\n # kwargs is currently always empty.\n # Could remove, but keeping in case useful in future.\n for attr in kwargs:\n setattr(fn, attr, kwargs[attr])\n return fn\n\n return f\n\n\ndef convert_keyword_to_postional(keyword_name, *args, **kwargs):\n \"\"\" When complete will convert keyword_name from **kwargs to end of *args. \"\"\"\n\n def f(fn):\n return fn\n\n return f\n\n\ndef parsedate(dstr, strftime=None, settings=None):\n \"\"\"Use dateparser to parse a wide variety of dates.\n\n Used for start and end dates.\n \"\"\"\n if dstr is None:\n return dstr\n\n # The API should boomerang a datetime.datetime instance and None.\n if isinstance(dstr, datetime.datetime):\n if strftime is None:\n return dstr\n return dstr.strftime(strftime)\n\n if dstr is None:\n return dstr\n\n try:\n pdate = pd.to_datetime(dstr)\n except ValueError:\n pdate = dateparser.parse(dstr, settings=settings)\n\n if pdate is None:\n raise ValueError(\n error_wrapper(\n \"\"\"\nCould not parse date string '{0}'.\n\"\"\".format(\n dstr\n )\n )\n )\n\n if strftime is None:\n return pdate\n return pdate.strftime(strftime)\n\n\ndef merge_dicts(*dict_args):\n \"\"\"Merge multiple dictionaries.\"\"\"\n result = {}\n for d in dict_args:\n result.update(d)\n return result\n\n\ndef about(name):\n \"\"\"Return generic 'about' information used across all toolboxes.\"\"\"\n import platform\n import pkg_resources\n\n namever = str(pkg_resources.get_distribution(name.split(\".\")[0]))\n print(\"package name = {0}\\npackage version = {1}\".format(*namever.split()))\n\n print(\"platform architecture = {0}\".format(platform.architecture()))\n print(\"platform machine = {0}\".format(platform.machine()))\n print(\"platform = {0}\".format(platform.platform()))\n print(\"platform processor = {0}\".format(platform.processor()))\n print(\"platform python_build = {0}\".format(platform.python_build()))\n print(\"platform python_compiler = {0}\".format(platform.python_compiler()))\n print(\"platform python branch = {0}\".format(platform.python_branch()))\n print(\n \"platform python implementation = {0}\".format(platform.python_implementation())\n )\n print(\"platform python revision = {0}\".format(platform.python_revision()))\n print(\"platform python version = {0}\".format(platform.python_version()))\n print(\"platform release = {0}\".format(platform.release()))\n print(\"platform system = {0}\".format(platform.system()))\n print(\"platform version = {0}\".format(platform.version()))\n\n\ndef _round_index(ntsd, round_index=None):\n \"\"\"Round the index, typically time, to the nearest interval.\"\"\"\n if round_index is None:\n return ntsd\n ntsd.index = ntsd.index.round(round_index)\n return ntsd\n\n\ndef _pick_column_or_value(tsd, var):\n \"\"\"Return a keyword value or a time-series.\"\"\"\n try:\n var = np.array([float(var)])\n except ValueError:\n var = tsd.loc[:, var].values\n return var\n\n\ndef make_list(*strorlist, **kwds):\n \"\"\"Normalize strings, converting to numbers or lists.\"\"\"\n try:\n n = kwds.pop(\"n\")\n except KeyError:\n n = None\n if n is not None:\n n = int(n)\n\n try:\n sep = kwds.pop(\"sep\")\n except KeyError:\n sep = \",\"\n\n try:\n kwdname = kwds.pop(\"kwdname\")\n except KeyError:\n kwdname = \"\"\n\n if isinstance(strorlist, (list, tuple)):\n # The following will fix ((tuples, in, a, tuple, problem),)\n strorlist = list(pd.core.common.flatten(strorlist))\n if len(strorlist) == 1:\n # Normalize lists and tuples of length 1 to scalar for\n # further processing.\n strorlist = strorlist[0]\n\n if isinstance(strorlist, (list, tuple)):\n if n is not None:\n if len(strorlist) != n:\n raise ValueError(\n error_wrapper(\n \"\"\"\nThe list {0} for \"{2}\" should have {1} members according to function requirements.\n\"\"\".format(\n strorlist, n, kwdname\n )\n )\n )\n\n try:\n strorlist = strorlist.strip()\n except AttributeError:\n pass\n\n if strorlist is None or isinstance(strorlist, (type(None))):\n ### None -> None\n ###\n return None\n\n if isinstance(strorlist, (int, float)):\n ### 1 -> [1]\n ### 1.2 -> [1.2]\n ###\n return [strorlist]\n\n if isinstance(strorlist, (str, bytes)) and (strorlist in [\"None\", \"\"]):\n ### 'None' -> None\n ### '' -> None\n ###\n return None\n\n if isinstance(strorlist, (str, bytes)):\n ### '1' -> [1]\n ### '5.7' -> [5.7]\n\n ### Anything other than a scalar int or float continues.\n ###\n try:\n return [int(strorlist)]\n except ValueError:\n try:\n return [float(strorlist)]\n except ValueError:\n pass\n\n try:\n strorlist = strorlist.split(sep)\n except AttributeError:\n pass\n\n if n is None:\n n = len(strorlist)\n\n # At this point 'strorlist' variable should be a list or tuple.\n if len(strorlist) != n:\n raise ValueError(\n error_wrapper(\n \"\"\"\nThe list {0} for \"{2}\" should have {1} members according to function requirements.\n\"\"\".format(\n strorlist, n, kwdname\n )\n )\n )\n\n ### [1, 2, 3] -> [1, 2, 3]\n ### ['1', '2'] -> [1, 2]\n\n ### [1, 'er', 5.6] -> [1, 'er', 5.6]\n ### [1,'er',5.6] -> [1, 'er', 5.6]\n ### ['1','er','5.6'] -> [1, 'er', 5.6]\n\n ### ['1','','5.6'] -> [1, None, 5.6]\n ### ['1','None','5.6'] -> [1, None, 5.6]\n\n ret = []\n for each in strorlist:\n if isinstance(each, (type(None), int, float)):\n ret.append(each)\n continue\n if each is None or each.strip() == \"\" or each == \"None\":\n ret.append(None)\n continue\n try:\n ret.append(int(each))\n except ValueError:\n try:\n ret.append(float(each))\n except ValueError:\n ret.append(each)\n return ret\n\n\ndef make_iloc(columns, col_list):\n \"\"\"Imitates the .ix option with subtracting one to convert.\"\"\"\n\n # [\"1\", \"Value2\"] -> [0, \"Value2\"]\n # [1, 2, 3] -> [0, 1, 2]\n col_list = make_list(col_list)\n ntestc = []\n for i in col_list:\n try:\n ntestc.append(int(i) - 1)\n except ValueError:\n ntestc.append(columns.index(i))\n return ntestc\n\n\n# NOT SET YET...\n#\n# Take `air_pressure` from df.loc[:, 1]\n# Take `short_wave_rad` from df.loc[:, 'swaverad']\n# The `temperature` keyword is set to 23.4 for all time periods\n# The `wind_speed` keyword is set to 2.4 and 3.1 in turn\n#\n# Will output two columns, one with wind_speed equal to 2.4, the next\n# with wind_speed equal to 3.1.\n#\n# API:\n# testfunction(input_ts=df,\n# air_pressure='_1',\n# short_wave_rad='swaverad',\n# temperature=23.4,\n# wind_speed=[2.4, 3.1])\n# )\n#\n# CLI:\n# mettoolbox testfunction --air_pressure=_1 \\\n# --short_wave_rad=swaverad \\\n# --temperature 23.4 \\\n# --wind_speed 2.4,3.1 < df.csv\n\n\ndef Coerce(ntype, msg=None):\n \"\"\"Coerce a value to a type.\n\n float:\n 1 -> 1.0\n '1.1' -> 1.1\n '1,' -> [1.0, None]\n int:\n 1 -> 1\n '1' -> 1\n '1,' -> [1, None]\n str:\n 1 -> '1'\n '1' -> '1'\n '1,' -> ['1', None]\n bool:\n True -> True\n False -> False\n 1 -> True\n 0 -> False\n '' -> False\n 'a' -> True\n '1,' -> [True, False]\n \"\"\"\n\n def f(v):\n if v is None or v == \"\":\n return None\n if isinstance(v, str):\n if \",\" in v:\n v = v.split(\",\")\n try:\n if isinstance(v, (list, tuple)):\n rl = []\n for i in v:\n if i is None or i == \"\":\n rl.append(i)\n else:\n rl.append(ntype(i))\n return rl\n return ntype(v)\n except ValueError:\n raise ValueError(msg or (\"Cannot coerce {0} to {1}.\".format(v, ntype)))\n\n return f\n\n\ndef _vhead(funcname, argname, nargs, nvar, vlen):\n if not isinstance(nvar, list):\n nvar = [nvar]\n if vlen is not None and len(nvar) != vlen:\n items = \"item\" if vlen == 1 else \"items\"\n raise ValueError(\n error_wrapper(\n \"\"\"\nThe argument {argname} can only be {vlen} {items} long.\n\nYou gave {nvar}.\n\"\"\".format(\n **locals()\n )\n )\n )\n return nvar\n\n\ndef _vdomain(funcname, argname, nargs, nvar, vlen):\n nvar = _vhead(funcname, argname, nargs, nvar, vlen)\n for i in nvar:\n if i is None:\n continue\n if i not in nargs:\n raise ValueError(\n error_wrapper(\n \"\"\"\nThe argument \"{argname}\" should be one of the terms in {nargs}.\n\nYou gave \"{i}\".\n\"\"\".format(\n **locals()\n )\n )\n )\n\n\ndef _vrange(funcname, argname, nargs, nvar, vlen):\n nvar = _vhead(funcname, argname, nargs, nvar, vlen)\n for i in nvar:\n if i is None:\n continue\n if nargs[0] is None:\n if i > nargs[1]:\n raise ValueError(\n error_wrapper(\n \"\"\"\nThe argument \"{1}\" should be less than or equal to {4}.\n\nYou gave \"{2}\".\n\"\"\".format(\n funcname, argname, i, nargs[0], nargs[1]\n )\n )\n )\n continue\n if nargs[1] is None:\n if i < nargs[0]:\n raise ValueError(\n error_wrapper(\n \"\"\"\nThe argument \"{1}\" should be greater than or equal to {3}.\n\nYou gave \"{2}\".\n\"\"\".format(\n funcname, argname, i, nargs[0], nargs[1]\n )\n )\n )\n continue\n if i < nargs[0] or i > nargs[1]:\n raise ValueError(\n error_wrapper(\n \"\"\"\nThe argument \"{1}\" should be between {3} to {4}, inclusive.\n\nYou gave \"{2}\".\n\"\"\".format(\n funcname, argname, i, nargs[0], nargs[1]\n )\n )\n )\n\n\ndef _vpass(funcname, argname, nargs, nvar, vlen):\n pass\n\n\nvalidator_func = {\"domain\": _vdomain, \"range\": _vrange, \"pass\": _vpass}\n\n\ndef validator(**argchecks): # validate ranges for both+defaults\n def onDecorator(func): # onCall remembers func and argchecks\n if not __debug__: # True if \"python -O main.py args..\"\n return func # wrap if debugging else use original\n code = func.__code__\n allargs = code.co_varnames[: code.co_argcount]\n funcname = func.__name__\n\n def onCall(*pargs, **kargs):\n # all pargs match first N args by position\n # the rest must be in kargs or omitted defaults\n positionals = list(allargs)\n positionals = positionals[: len(pargs)]\n\n for (argname, comb) in argchecks.items():\n collect_errors = []\n incomb = comb\n if callable(comb[0]):\n incomb = [comb]\n for ctype, (valid, (nargs)), vlen in incomb:\n # for all args to be checked\n iffinally = True\n if argname in kargs:\n # was passed by name\n cval = kargs[argname]\n elif argname in positionals:\n # was passed by position\n position = positionals.index(argname)\n cval = pargs[position]\n else:\n iffinally = False\n\n if iffinally is True:\n try:\n nvar = Coerce(ctype)(cval)\n validator_func[valid](funcname, argname, nargs, nvar, vlen)\n collect_errors.append(None)\n break\n except ValueError as e:\n collect_errors.append(str(e))\n if len(collect_errors) > 0 and all(collect_errors) is True:\n raise ValueError(\"\\n\\n\".join(collect_errors))\n\n return func(*pargs, **kargs) # okay: run original call\n\n return onCall\n\n return onDecorator\n\n\ndef _normalize_units(ntsd, source_units, target_units):\n \"\"\"\n\n The following is aspirational and may not reflect the code.\n\n +--------------+--------------+--------------+--------------+--------------+\n | INPUT | INPUT | INPUT | RETURN | RETURN |\n | ntsd.columns | source_units | target_units | source_units | target_units |\n +==============+==============+==============+==============+==============+\n | [\"col1:cm\", | [\"ft\", | [\"m\", | ValueError | |\n | \"col2:cm\"] | \"cm\"] | \"cm\"] | | |\n +--------------+--------------+--------------+--------------+--------------+\n | [\"col1:cm\", | [\"cm\"] | [\"ft\"] | ValueError | |\n | \"col2:cm\"] | | | | |\n +--------------+--------------+--------------+--------------+--------------+\n | [\"col1:cm\", | [\"cm\"] | [\"ft\"] | [\"cm\", | [\"ft\", |\n | \"col2\"] | | | \"\"] | \"\"] |\n +--------------+--------------+--------------+--------------+--------------+\n | [\"col1\", | [\"\", \"cm\"] | [\"\", \"ft\"] | [\"\", | [\"\", |\n | \"col2:cm\"] | | | \"cm\"] | \"ft\"] |\n +--------------+--------------+--------------+--------------+--------------+\n | | [\"cm\"] | [\"ft\"] | [\"cm\"] | [\"ft\"] |\n +--------------+--------------+--------------+--------------+--------------+\n | [\"cm\"] | None | [\"ft\"] | [\"cm\"] | [\"ft\"] |\n +--------------+--------------+--------------+--------------+--------------+\n\n \"\"\"\n\n target_units = make_list(target_units, n=len(ntsd.columns))\n source_units = make_list(source_units, n=len(ntsd.columns))\n\n if source_units is not None:\n names = []\n for inx in list(range(len(ntsd.columns))):\n words = ntsd.columns[inx].split(\":\")\n testunits = source_units[inx]\n if len(words) > 1:\n names.append(ntsd.columns[inx])\n if words[1] != testunits:\n raise ValueError(\n error_wrapper(\n \"\"\"\nIf 'source_units' specified must match units from column name. Column\nname units are specified as the second ':' delimited field.\nYou specified 'source_units' as {0}, but column units are {1}.\n\"\"\".format(\n source_units[inx], words[1]\n )\n )\n )\n else:\n names.append(\"{0}:{1}\".format(ntsd.columns[inx], testunits))\n ntsd.columns = names\n else:\n source_units = []\n for nu in ntsd.columns:\n try:\n source_units.append(nu.split(\":\")[1])\n except (AttributeError, IndexError):\n source_units.append(\"\")\n\n if source_units is None and target_units is not None:\n raise ValueError(\n error_wrapper(\n \"\"\"\nTo specify target_units, you must also specify source_units. You can\nspecify source_units either by using the `source_units` keyword or placing\nin the name of the data column as the second ':' separated field.\n\nThe `source_units` keyword must specify units that are convertible\nto the `target_units`: {target_units}\n\"\"\".format(\n **locals()\n )\n )\n )\n\n # Convert source_units to target_units.\n if target_units is not None:\n ncolumns = []\n for inx, colname in enumerate(ntsd.columns):\n words = colname.split(\":\")\n if len(words) > 1:\n # convert words[1] to target_units[inx]\n Q_ = UREG.Quantity\n try:\n ntsd[colname] = Q_(ntsd[colname].to_numpy(), UREG(words[1])).to(\n target_units[inx]\n )\n words[1] = target_units[inx]\n except AttributeError:\n raise ValueError(\n error_wrapper(\n \"\"\"\nNo conversion between {0} and {1}.\"\"\".format(\n words[1], target_units[inx]\n )\n )\n )\n ncolumns.append(\":\".join(words))\n ntsd.columns = ncolumns\n\n return ntsd\n\n\n@validator(\n start_date=[parsedate, [\"pass\", []], 1],\n end_date=[parsedate, [\"pass\", []], 1],\n force_freq=[str, [\"pass\", []], 1],\n groupby=[str, [\"pass\", []], 1],\n dropna=[str, [\"domain\", [\"no\", \"any\", \"all\"]], 1],\n round_index=[str, [\"pass\", []], 1],\n clean=[bool, [\"domain\", [True, False]], 1],\n target_units=[str, [\"pass\", []], None],\n source_units=[str, [\"pass\", []], None],\n bestfreq=[bool, [\"domain\", [True, False]], 1],\n)\ndef common_kwds(\n input_tsd=None,\n start_date=None,\n end_date=None,\n pick=None,\n force_freq=None,\n groupby=None,\n dropna=\"no\",\n round_index=None,\n clean=False,\n target_units=None,\n source_units=None,\n bestfreq=True,\n):\n \"\"\"Process all common_kwds across sub-commands into single function.\n\n Parameters\n ----------\n input_tsd: DataFrame\n Input data which should be a DataFrame.\n\n Returns\n -------\n df: DataFrame\n DataFrame altered according to options.\n\n \"\"\"\n ntsd = input_tsd\n\n ntsd = _pick(ntsd, pick)\n\n ntsd = _normalize_units(ntsd, source_units, target_units)\n\n if clean is True:\n ntsd = ntsd.sort_index()\n ntsd = ntsd[~ntsd.index.duplicated()]\n\n ntsd = _round_index(ntsd, round_index=round_index)\n\n if bestfreq is True:\n ntsd = asbestfreq(ntsd, force_freq=force_freq)\n\n ntsd = _date_slice(ntsd, start_date=start_date, end_date=end_date)\n\n if ntsd.index.is_all_dates is True:\n ntsd.index.name = \"Datetime\"\n\n if dropna in [\"any\", \"all\"]:\n ntsd = ntsd.dropna(axis=\"index\", how=dropna)\n else:\n try:\n ntsd = asbestfreq(ntsd)\n except ValueError:\n pass\n\n if groupby is not None:\n if groupby == \"months_across_years\":\n return ntsd.groupby(lambda x: x.month)\n return ntsd.resample(groupby)\n\n return ntsd\n\n\ndef _pick(tsd, columns):\n columns = make_list(columns)\n if columns is None:\n return tsd\n ncolumns = []\n\n for i in columns:\n if i in tsd.columns:\n # if using column names\n ncolumns.append(tsd.columns.tolist().index(i))\n continue\n\n if i == tsd.index.name:\n # if wanting the index\n # making it -1 that will be evaluated later...\n ncolumns.append(-1)\n continue\n\n # if using column numbers\n try:\n target_col = int(i) - 1\n except ValueError:\n raise ValueError(\n error_wrapper(\n \"\"\"\nThe name {0} isn't in the list of column names\n{1}.\n\"\"\".format(\n i, tsd.columns\n )\n )\n )\n if target_col < -1:\n raise ValueError(\n error_wrapper(\n \"\"\"\nThe requested column \"{0}\" must be greater than or equal to 0.\nFirst data column is 1, index is column 0.\n\"\"\".format(\n i\n )\n )\n )\n if target_col > len(tsd.columns):\n raise ValueError(\n error_wrapper(\n \"\"\"\nThe request column index {0} must be less than the\nnumber of columns {1}.\n\"\"\".format(\n i, len(tsd.columns)\n )\n )\n )\n\n # columns names or numbers or index organized into\n # numbers in ncolumns\n ncolumns.append(target_col)\n\n if len(ncolumns) == 1 and ncolumns[0] != -1:\n return pd.DataFrame(tsd[tsd.columns[ncolumns]])\n\n newtsd = pd.DataFrame()\n for index, col in enumerate(ncolumns):\n if col == -1:\n # Use the -1 marker to indicate index\n jtsd = pd.DataFrame(tsd.index, index=tsd.index)\n else:\n try:\n jtsd = pd.DataFrame(tsd.iloc[:, col], index=tsd.index)\n except IndexError:\n jtsd = pd.DataFrame(tsd.loc[:, col], index=tsd.index)\n\n newtsd = newtsd.join(jtsd, lsuffix=\"_{0}\".format(index), how=\"outer\")\n return newtsd\n\n\ndef _date_slice(input_tsd, start_date=None, end_date=None):\n \"\"\"Private function to slice time series.\"\"\"\n if input_tsd.index.is_all_dates:\n accdate = []\n for testdate in [start_date, end_date]:\n if testdate is None:\n tdate = None\n else:\n if input_tsd.index.tz is None:\n tdate = pd.Timestamp(testdate)\n else:\n tdate = pd.Timestamp(testdate, tz=input_tsd.index.tz)\n # Is this comparison cheaper than the .join?\n if not np.any(input_tsd.index == tdate):\n # Create a dummy column at the date I want, then delete\n # Not the best, but...\n row = pd.DataFrame([np.nan], index=[tdate])\n row.columns = [\"deleteme\"]\n input_tsd = input_tsd.join(row, how=\"outer\")\n input_tsd.drop(\"deleteme\", inplace=True, axis=1)\n accdate.append(tdate)\n\n return input_tsd[slice(*accdate)]\n return input_tsd\n\n\n_ANNUALS = {\n 0: \"DEC\",\n 1: \"JAN\",\n 2: \"FEB\",\n 3: \"MAR\",\n 4: \"APR\",\n 5: \"MAY\",\n 6: \"JUN\",\n 7: \"JUL\",\n 8: \"AUG\",\n 9: \"SEP\",\n 10: \"OCT\",\n 11: \"NOV\",\n 12: \"DEC\",\n}\n\n_WEEKLIES = {0: \"MON\", 1: \"TUE\", 2: \"WED\", 3: \"THU\", 4: \"FRI\", 5: \"SAT\", 6: \"SUN\"}\n\n\ndef asbestfreq(data, force_freq=None):\n \"\"\"Test to determine best frequency to represent data.\n\n This uses several techniques.\n 0.5. If index is not DateTimeIndex, return\n 1. If force_freq is set use .asfreq.\n 2. If data.index.freq is not None, just return.\n 3. If data.index.inferred_freq is set use .asfreq.\n 4. Use pd.infer_freq - fails if any missing\n 5. Use .is_* functions to establish A, AS, A-*, AS-*, Q, QS, M, MS\n 6. Use minimum interval to establish the fixed time periods up to weekly\n 7. Gives up returning None for PANDAS offset string\n\n \"\"\"\n if not isinstance(data.index, pd.DatetimeIndex):\n return data\n\n if force_freq is not None:\n return data.asfreq(force_freq)\n\n ndiff = (\n data.index.values.astype(\"int64\")[1:] - data.index.values.astype(\"int64\")[:-1]\n )\n if np.any(ndiff <= 0):\n raise ValueError(\n error_wrapper(\n \"\"\"\nDuplicate or time reversal index entry at\nrecord {1} (start count at 0):\n\"{0}\".\n\"\"\".format(\n data.index[:-1][ndiff <= 0][0], np.where(ndiff <= 0)[0][0] + 1\n )\n )\n )\n\n if data.index.freq is not None:\n return data\n\n # Since pandas doesn't set data.index.freq and data.index.freqstr when\n # using .asfreq, this function returns that PANDAS time offset alias code\n # also. Not ideal at all.\n #\n # This gets most of the frequencies...\n if data.index.inferred_freq is not None:\n try:\n return data.asfreq(data.index.inferred_freq)\n except ValueError:\n pass\n\n # pd.infer_freq would fail if given a large dataset\n if len(data.index) > 100:\n slic = slice(None, 99)\n else:\n slic = slice(None, None)\n try:\n infer_freq = pd.infer_freq(data.index[slic])\n except ValueError:\n infer_freq = None\n if infer_freq is not None:\n return data.asfreq(infer_freq)\n\n # At this point pd.infer_freq failed probably because of missing values.\n # The following algorithm would not capture things like BQ, BQS\n # ...etc.\n if np.alltrue(data.index.is_year_end):\n infer_freq = \"A\"\n elif np.alltrue(data.index.is_year_start):\n infer_freq = \"AS\"\n elif np.alltrue(data.index.is_quarter_end):\n infer_freq = \"Q\"\n elif np.alltrue(data.index.is_quarter_start):\n infer_freq = \"QS\"\n elif np.alltrue(data.index.is_month_end):\n if np.all(data.index.month == data.index[0].month):\n # Actually yearly with different ends\n infer_freq = \"A-{0}\".format(_ANNUALS[data.index[0].month])\n else:\n infer_freq = \"M\"\n elif np.alltrue(data.index.is_month_start):\n if np.all(data.index.month == data.index[0].month):\n # Actually yearly with different start\n infer_freq = \"A-{0}\".format(_ANNUALS[data.index[0].month] - 1)\n else:\n infer_freq = \"MS\"\n\n if infer_freq is not None:\n return data.asfreq(infer_freq)\n\n # Use the minimum of the intervals to test a new interval.\n # Should work for fixed intervals.\n ndiff = sorted(set(ndiff))\n mininterval = np.min(ndiff)\n if mininterval <= 0:\n raise ValueError\n if len(ndiff) == 1:\n ngcd = ndiff[0]\n else:\n ngcd = reduce(gcd, ndiff)\n if ngcd < 1000:\n infer_freq = \"{0}N\".format(ngcd)\n elif ngcd < 1000000:\n infer_freq = \"{0}U\".format(ngcd // 1000)\n elif ngcd < 1000000000:\n infer_freq = \"{0}L\".format(ngcd // 1000000)\n elif ngcd < 60000000000:\n infer_freq = \"{0}S\".format(ngcd // 1000000000)\n elif ngcd < 3600000000000:\n infer_freq = \"{0}T\".format(ngcd // 60000000000)\n elif ngcd < 86400000000000:\n infer_freq = \"{0}H\".format(ngcd // 3600000000000)\n elif ngcd < 604800000000000:\n infer_freq = \"{0}D\".format(ngcd // 86400000000000)\n elif ngcd < 2419200000000000:\n infer_freq = \"{0}W\".format(ngcd // 604800000000000)\n if np.all(data.index.dayofweek == data.index[0].dayofweek):\n infer_freq = infer_freq + \"-{0}\".format(_WEEKLIES[data.index[0].dayofweek])\n else:\n infer_freq = \"D\"\n\n if infer_freq is not None:\n return data.asfreq(infer_freq)\n\n # Give up\n return data\n\n\ndef dedupIndex(idx, fmt=None, ignoreFirst=True):\n # fmt: A string format that receives two arguments:\n # name and a counter. By default: fmt='%s.%03d'\n # ignoreFirst: Disable/enable postfixing of first element.\n idx = pd.Series(idx)\n duplicates = idx[idx.duplicated()].unique()\n fmt = \"%s.%03d\" if fmt is None else fmt\n for name in duplicates:\n dups = idx == name\n ret = [\n fmt % (name, i) if (i != 0 or not ignoreFirst) else name\n for i in range(dups.sum())\n ]\n idx.loc[dups] = ret\n return pd.Index(idx)\n\n\ndef renamer(xloc, suffix=\"\"):\n \"\"\"Print the suffix into the third \":\" separated field of the header.\"\"\"\n if suffix is None:\n suffix = \"\"\n try:\n words = xloc.split(\":\")\n except AttributeError:\n words = [str(xloc)]\n if len(words) == 1:\n words.append(\"\")\n words.append(suffix)\n elif len(words) == 2:\n words.append(suffix)\n elif len(words) == 3:\n if suffix:\n if words[2]:\n words[2] = words[2] + \"_\" + suffix\n else:\n words[2] = suffix\n return \":\".join(words)\n\n\n# Utility\ndef print_input(\n iftrue,\n intds,\n output,\n suffix=\"\",\n date_format=None,\n float_format=\"g\",\n tablefmt=\"csv\",\n showindex=\"never\",\n):\n \"\"\"Print the input time series also.\"\"\"\n return _printiso(\n return_input(iftrue, intds, output, suffix=suffix),\n date_format=date_format,\n float_format=float_format,\n tablefmt=tablefmt,\n showindex=showindex,\n )\n\n\ndef return_input(\n iftrue, intds, output, suffix=\"\", reverse_index=False, output_names=[]\n):\n \"\"\"Print the input time series also.\"\"\"\n output.columns = output_names or [renamer(i, suffix) for i in output.columns]\n if iftrue:\n return intds.join(output, lsuffix=\"_1\", rsuffix=\"_2\", how=\"outer\")\n if reverse_index is True:\n return output.iloc[::-1]\n return output\n\n\ndef _apply_across_columns(func, xtsd, **kwds):\n \"\"\"Apply a function to each column in turn.\"\"\"\n for col in xtsd.columns:\n xtsd[col] = func(xtsd[col], **kwds)\n return xtsd\n\n\ndef _printiso(\n tsd,\n date_format=None,\n sep=\",\",\n float_format=\"g\",\n showindex=\"never\",\n headers=\"keys\",\n tablefmt=\"csv\",\n):\n \"\"\"Separate so can use in tests.\"\"\"\n if isinstance(tsd, (pd.DataFrame, pd.Series)):\n if isinstance(tsd, pd.Series):\n tsd = pd.DataFrame(tsd)\n\n if tsd.columns.empty:\n tsd = pd.DataFrame(index=tsd.index)\n\n # Not perfectly true, but likely will use showindex for indices\n # that are not time stamps.\n if showindex is True:\n if not tsd.index.name:\n tsd.index.name = \"UniqueID\"\n else:\n if not tsd.index.name:\n tsd.index.name = \"Datetime\"\n\n print_index = True\n if tsd.index.is_all_dates is True:\n if not tsd.index.name:\n tsd.index.name = \"Datetime\"\n # Someone made the decision about the name\n # This is how I include time zone info by tacking on to the\n # index.name.\n elif \"datetime\" not in tsd.index.name.lower():\n tsd.index.name = \"Datetime\"\n else:\n # This might be overkill, but tstoolbox is for time-series.\n # Revisit if necessary.\n print_index = False\n\n if tsd.index.name == \"UniqueID\":\n print_index = False\n\n if showindex in [\"always\", \"default\"]:\n print_index = True\n\n elif isinstance(tsd, (int, float, tuple, np.ndarray)):\n tablefmt = None\n\n if tablefmt in [\"csv\", \"tsv\", \"csv_nos\", \"tsv_nos\"]:\n sep = {\"csv\": \",\", \"tsv\": \"\\t\", \"csv_nos\": \",\", \"tsv_nos\": \"\\t\"}[tablefmt]\n if isinstance(tsd, pd.DataFrame):\n try:\n tsd.to_csv(\n sys.stdout,\n float_format=\"%{0}\".format(float_format),\n date_format=date_format,\n sep=sep,\n index=print_index,\n )\n return\n except IOError:\n return\n else:\n tablefmt = simple_separated_format(sep)\n\n if tablefmt is None:\n print(str(list(tsd))[1:-1])\n\n all_table = tb(\n tsd,\n tablefmt=tablefmt,\n showindex=showindex,\n headers=headers,\n floatfmt=float_format,\n )\n if tablefmt in [\"csv_nos\", \"tsv_nos\"]:\n print(all_table.replace(\" \", \"\"))\n else:\n print(all_table)\n\n\n# Make _printiso public. Keep both around until convert all toolboxes.\nprintiso = _printiso\n\n\ndef open_local(filein):\n \"\"\"\n Open the given input file.\n\n It can decode various formats too, such as gzip and bz2.\n\n \"\"\"\n ext = os.path.splitext(filein)[1]\n if ext in [\".gz\", \".GZ\"]:\n return gzip.open(filein, \"rb\")\n if ext in [\".bz\", \".bz2\"]:\n return bz2.BZ2File(filein, \"rb\")\n return open(filein, \"r\")\n\n\ndef reduce_mem_usage(props):\n for col in props.columns:\n try:\n if props[col].dtype == object: # Exclude strings\n continue\n except AttributeError:\n continue\n\n # make variables for Int, max and min\n mx = props[col].max()\n mn = props[col].min()\n\n # test if column can be converted to an integer\n try:\n asint = props[col].astype(np.int64)\n result = all((props[col] - asint) == 0)\n except ValueError:\n # Want missing values to remain missing so\n # they need to remain float.\n result = False\n\n # Make Integer/unsigned Integer datatypes\n if result is True:\n if mn >= 0:\n if mx < np.iinfo(np.uint8).max:\n props[col] = props[col].astype(np.uint8)\n elif mx < np.iinfo(np.uint16).max:\n props[col] = props[col].astype(np.uint16)\n elif mx < np.iinfo(np.uint32).max:\n props[col] = props[col].astype(np.uint32)\n else:\n props[col] = props[col].astype(np.uint64)\n else:\n if mn > np.iinfo(np.int8).min and mx < np.iinfo(np.int8).max:\n props[col] = props[col].astype(np.int8)\n elif mn > np.iinfo(np.int16).min and mx < np.iinfo(np.int16).max:\n props[col] = props[col].astype(np.int16)\n elif mn > np.iinfo(np.int32).min and mx < np.iinfo(np.int32).max:\n props[col] = props[col].astype(np.int32)\n elif mn > np.iinfo(np.int64).min and mx < np.iinfo(np.int64).max:\n props[col] = props[col].astype(np.int64)\n\n else:\n # Put here whatever I come up with for float types.\n # Algorithm problem because just looking at limits doesn't help\n # with precision.\n pass\n\n return props\n\n\ndef memory_optimize(tsd):\n \"\"\"Convert all columns to known types.\n\n \"infer_objects\" replaced some code here such that the \"memory_optimize\"\n function might go away. Kept in case want to add additional\n optimizations.\n \"\"\"\n tsd.index = pd.Index(tsd.index, dtype=None)\n tsd = tsd.infer_objects()\n tsd = reduce_mem_usage(tsd)\n if tsd.index.is_all_dates == False:\n tsd.index = reduce_mem_usage(pd.DataFrame(data=tsd.index)).iloc[:, 0]\n try:\n tsd.index.freq = pd.infer_freq(tsd.index)\n except (TypeError, ValueError):\n # TypeError: Not datetime like index\n # ValueError: Less than three rows\n pass\n return tsd\n\n\ndef is_valid_url(url, qualifying=None):\n \"\"\"Return whether \"url\" is valid.\"\"\"\n min_attributes = (\"scheme\", \"netloc\")\n qualifying = min_attributes if qualifying is None else qualifying\n token = urlparse(url)\n return all((getattr(token, qualifying_attr) for qualifying_attr in qualifying))\n\n\n@validator(\n parse_dates=[bool, [\"domain\", [True, False]], 1],\n extended_columns=[bool, [\"domain\", [True, False]], 1],\n dropna=[str, [\"domain\", [\"no\", \"any\", \"all\"]], 1],\n force_freq=[str, [\"pass\", []], 1],\n index_type=[str, [\"domain\", [\"datetime\", \"number\"]], 1],\n names=[str, [\"pass\", []], 1],\n skiprows=[int, [\"pass\", []], 1],\n)\ndef read_iso_ts(\n indat,\n parse_dates=True,\n extended_columns=False,\n dropna=None,\n force_freq=None,\n skiprows=None,\n index_type=\"datetime\",\n names=None,\n):\n \"\"\"Read the format printed by 'printiso' and maybe other formats.\n\n Parameters\n ----------\n indat: str, bytes, StringIO, file pointer, file name, DataFrame,\n Series, tuple, list, dict\n\n The input data.\n\n Returns\n -------\n df: DataFrame\n\n Returns a DataFrame.\n\n \"\"\"\n try:\n skiprows = int(skiprows)\n except (ValueError, TypeError):\n skiprows = make_list(skiprows)\n\n result = {}\n if isinstance(indat, (str, bytes, StringIO)):\n if indat in [\"-\", b\"-\"]:\n # if from stdin format must be the tstoolbox standard\n # pandas read_csv supports file like objects\n header = 0\n sep = None\n fpi = sys.stdin\n fname = \"_\"\n elif isinstance(indat, StringIO):\n header = \"infer\"\n sep = None\n fpi = indat\n fname = \"\"\n elif b\"\\n\" in b(indat) or b\"\\r\" in b(indat):\n # a string?\n header = \"infer\"\n sep = None\n fpi = StringIO(b(indat).decode())\n fname = \"\"\n elif os.path.exists(indat):\n # a local file\n header = \"infer\"\n sep = None\n fpi = open_local(indat)\n fname = os.path.splitext(os.path.basename(indat))[0]\n elif is_valid_url(indat):\n # a url?\n header = \"infer\"\n sep = None\n fpi = indat\n fname = \"\"\n else:\n raise ValueError(\n error_wrapper(\n \"\"\"\nCan't figure out what \"input_ts={0}\" is.\nI tested if it was a string or StringIO object, DataFrame, local file,\nor an URL. If you want to pull from stdin use \"-\" or redirection/piping.\n\"\"\".format(\n indat\n )\n )\n )\n\n fstr = \"{1}\"\n if extended_columns is True:\n fstr = \"{0}.{1}\"\n\n index_col = 0\n if parse_dates is False:\n index_col = False\n\n # Would want this to be more generic...\n na_values = []\n for spc in range(20)[1:]:\n spcs = \" \" * spc\n na_values.append(spcs)\n na_values.append(spcs + \"nan\")\n\n if not result:\n if names is not None:\n header = 0\n names = make_list(names)\n if index_type == \"number\":\n parse_dates = False\n result = pd.io.parsers.read_csv(\n fpi,\n header=header,\n names=names,\n index_col=index_col,\n infer_datetime_format=True,\n parse_dates=parse_dates,\n na_values=na_values,\n keep_default_na=True,\n sep=sep,\n skipinitialspace=True,\n engine=\"python\",\n skiprows=skiprows,\n )\n first = [i.split(\":\")[0] for i in result.columns]\n first = [fstr.format(fname, i) for i in first]\n first = [[i.strip()] for i in dedupIndex(first)]\n\n rest = [i.rstrip(\".0123456789 \").split(\":\")[1:] for i in result.columns]\n\n result.columns = [\":\".join(i + j) for i, j in zip(first, rest)]\n\n tmpc = result.columns.values\n for index, i in enumerate(result.columns):\n if \"Unnamed:\" in i:\n words = i.split(\":\")\n tmpc[index] = words[0].strip() + words[1].strip()\n result.columns = tmpc\n\n elif isinstance(indat, pd.DataFrame):\n result = indat\n\n elif isinstance(indat, (pd.Series, dict)):\n result = pd.DataFrame(indat)\n\n elif isinstance(indat, (tuple, list)):\n result = pd.DataFrame({\"values\": indat})\n\n elif isinstance(indat, (int, float)):\n result = pd.DataFrame({\"values\": indat}, index=[0])\n\n else:\n raise ValueError(\n error_wrapper(\n \"\"\"\nCan't figure out what was passed to read_iso_ts.\nYou gave me {0}, of\n{1}.\n\"\"\".format(\n indat, type(indat)\n )\n )\n )\n\n result = memory_optimize(result)\n\n if result.index.is_all_dates is False:\n try:\n result.set_index(0, inplace=True)\n except KeyError:\n pass\n\n if result.index.is_all_dates is True:\n try:\n words = result.index.name.split(\":\")\n except AttributeError:\n words = \"\"\n if len(words) > 1:\n try:\n result.index = result.index.tz_localize(words[1])\n except TypeError:\n pass\n result.index.name = \"Datetime:{0}\".format(words[1])\n else:\n result.index.name = \"Datetime\"\n\n try:\n return asbestfreq(result, force_freq=force_freq)\n except ValueError:\n return result\n\n if dropna in [b(\"any\"), b(\"all\")]:\n result.dropna(how=dropna, inplace=True)\n\n return result\n","sub_path":"tstoolbox/tsutils.py","file_name":"tsutils.py","file_ext":"py","file_size_in_byte":65984,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"321241460","text":"# -*- coding: utf-8 -*-\n\"\"\"\n@create: 2017-10-26 14:02:55.\n\n@author: name\n\n@desc: http_base\n\"\"\"\nimport os\nimport six\nimport json\nimport time\nimport base64\nimport datetime\nimport decimal\nimport traceback\nimport hashlib\nimport binascii\nimport logging\nfrom collections import OrderedDict\nfrom tornado import gen\nfrom tornado.web import HTTPError\nfrom restkit.rest import http_app\nfrom restkit.rest import RestHandler\nfrom restkit.handle_redis import RedisHandler\nfrom restkit.handle_database import HttpDBaseHandler\nfrom restkit.transactions import Transaction\nfrom restkit.tools.field_checker import FeildOption\nfrom restkit.tools.mongo_query import QueryParames\nfrom restkit.tools.string_unit import FeildInVaild\nfrom restkit.tools.error_info import error_info\nfrom restkit.tools.error_info import TextString\nfrom restkit.tools.configbase import LoginLimitSettings\n\n\ntry:\n from sqlalchemy.engine import RowProxy\nexcept ImportError:\n RowProxy = dict\n\n\nIS_DEBUG = bool(os.environ.get('IS_DEBUG', 'false'))\n\n\nclass ErrorFinish(Exception):\n pass\n\n\nclass HttpBaseHandler(RestHandler,\n RedisHandler,\n HttpDBaseHandler):\n\n # def check_origin(self, origin):\n # return True\n\n def initialize(self, **kwargs): # noqa\n \"\"\"initialize.\"\"\"\n self.api_key_check = True\n self.need_logged = True\n self.need_signed = True\n self.need_otp_logged = True\n self.product_code = True\n self.product_check = True\n self.is_debug = False\n super(HttpBaseHandler, self).initialize(**kwargs)\n\n @property\n def logger(self) -> logging.Logger:\n return self.settings['logger']\n\n @property\n def gsettings(self) -> LoginLimitSettings:\n return self.settings['gsettings']\n\n @gen.coroutine\n def rest_initialize(self, api_key, rest_option, *args, **kwargs):\n yield super(HttpBaseHandler, self).rest_initialize(\n api_key, rest_option, *args, **kwargs\n )\n self.api_key = api_key\n self.rest_option = rest_option\n self.need_logged = rest_option.kwargs.get('need_logged', True)\n self.need_signed = rest_option.kwargs.get('need_signed', True)\n self.need_otp_logged = rest_option.kwargs.get('need_otp_logged', True)\n self.is_debug = rest_option.kwargs.get('is_debug', False)\n\n if self.is_debug and not IS_DEBUG:\n raise HTTPError(404)\n\n @staticmethod\n def is_has_field(dictobj, key):\n assert isinstance(dictobj, dict)\n assert isinstance(key, six.string_types)\n return key in dictobj and dictobj[key] is not None\n\n def get_remove_ip(self):\n return self.request.headers.get(\n 'X-Forwarded-For', self.request.headers.get(\n 'X-Real-Ip', self.request.remote_ip\n )\n )\n\n def get_language(self):\n if \"Accept-Language\" in self.request.headers:\n languages = self.request.headers[\"Accept-Language\"].split(\",\")\n locales = []\n for language in languages:\n parts = language.strip().split(\";\")\n if len(parts) > 1 and parts[1].startswith(\"q=\"):\n try:\n score = float(parts[1][2:])\n except (ValueError, TypeError):\n score = 0.0\n else:\n score = 1.0\n locales.append((parts[0], score))\n\n if locales:\n locales.sort(key=lambda pair: pair[1], reverse=True)\n codes = [l[0] for l in locales]\n return error_info.get_langs(*codes)\n\n return error_info.default_lang()\n\n # ----------------------------------------------\n # signed\n # ----------------------------------------------\n\n def sign_json(self, data, skey=None):\n if not isinstance(data, dict) or not data:\n return None\n\n if 'sign' in data:\n data = data.copy()\n data.pop('sign')\n\n if '@skey' in data:\n data = data.copy()\n skey = data.pop('@skey')\n\n if not skey:\n skey = self.settings.get('cookie_secret', '')\n\n if not skey:\n raise TypeError('cookie_secret not set')\n\n if 'utc' not in data:\n raise TypeError('data invail, utc not set')\n\n if isinstance(skey, six.string_types):\n skey = six.b(skey)\n\n keys = list(data.keys())\n keys.sort()\n\n sdata = '&'.join(\n ['{0}={1}'.format(key, data[key]) for key in keys]\n )\n\n return binascii.hexlify(hashlib.pbkdf2_hmac(\n 'sha256', sdata.encode('utf8'), skey, 1024)\n ).decode('utf8')\n\n def is_sign_invaild(self, data, sign, skey=None):\n if not isinstance(data, dict) or not data:\n return True\n\n if not isinstance(sign, six.string_types) or not sign:\n return True\n\n if 'utc' not in data or \\\n not isinstance(data['utc'], six.integer_types) or \\\n (time.time() - data['utc']) > self.gsettings.sign_timeout:\n return True\n\n return sign != self.sign_json(data, skey)\n\n def decode_json_from_body(self):\n \"\"\"b64decode=>json.\"\"\"\n if not self.request.body:\n return {}\n\n return json.loads(\n self.request.body,\n parse_float=decimal.Decimal,\n object_pairs_hook=OrderedDict\n )\n\n def base64_or_json_decode(self, data):\n try:\n # data = json.loads(data, parse_float=str, parse_int=str)\n data = json.loads(\n data, parse_float=decimal.Decimal,\n object_pairs_hook=OrderedDict\n )\n except json.JSONDecodeError:\n data = base64.b64decode(data)\n data = json.loads(\n data, parse_float=decimal.Decimal,\n object_pairs_hook=OrderedDict\n )\n else:\n return data\n\n def get_where_parames_from_json(self):\n where = self.decode_json_from_body()\n\n _where = where.get('where', {})\n _where = {\n key: val\n for key, val in _where.items()\n if val\n }\n\n return {\n 'where': _where,\n 'page': where.get('page', [0, 50]),\n 'sort': where.get('sort', {}),\n 'csv': where.get('csv', {}),\n }\n\n def get_where_parames(self):\n where = self.get_argument('where', '{}')\n page = self.get_argument('page', '[0, 50]')\n sort = self.get_argument('sort', '{}')\n csv_parame = self.get_argument('csv', '{}')\n\n _where = self.base64_or_json_decode(where)\n _where = {\n key: val\n for key, val in _where.items()\n if val\n }\n\n return {\n 'where': _where,\n 'page': self.base64_or_json_decode(page),\n 'sort': self.base64_or_json_decode(sort),\n 'csv': self.base64_or_json_decode(csv_parame),\n }\n\n def pop_where_key(self, where, key):\n try:\n value = where.pop(key)\n except KeyError:\n value = None\n\n if value and not isinstance(value, six.string_types):\n self.write_error_json_raise(\n error_info.ERROR_KEY_INVAILD,\n {'key': key}\n )\n\n return value\n\n def package_rsp(self, code, text=None, **kwargs):\n # TAG - maybe step language\n if isinstance(code, dict):\n return code\n\n req_data = {}\n try:\n append_text = kwargs.pop('append_text')\n except KeyError:\n append_text = ''\n\n if isinstance(text, dict):\n error_text = error_info.error_text(code, **text)\n elif isinstance(text, (six.string_types, TextString)):\n error_text = text\n else:\n error_text = error_info.error_text(code)\n\n req_data.update({\n 'error': code,\n 'error_text': error_text + append_text\n })\n req_data.update(kwargs)\n return req_data\n\n def json_serial(self, obj):\n \"\"\"JSON serializer for objects not serializable by default json code\"\"\"\n if isinstance(obj, datetime.datetime):\n return obj.strftime(\"%Y%m%dT%H%M%S\")\n\n elif isinstance(obj, datetime.date):\n return obj.strftime(\"%Y%m%d\")\n\n elif isinstance(obj, decimal.Decimal):\n return float(obj)\n\n elif isinstance(obj, FeildOption):\n return obj.to_dict()\n\n elif isinstance(obj, TextString):\n return obj.to_string(self.get_language())\n\n elif isinstance(obj, RowProxy):\n return dict(obj)\n\n else:\n raise TypeError(\"Type %s not serializable\" % type(obj))\n\n def json_dumps(self, data, **kwargs):\n if 'default' not in kwargs:\n kwargs['default'] = self.json_serial\n return json.dumps(data, **kwargs)\n\n def json_loads(self, data):\n return json.loads(\n data, parse_float=decimal.Decimal,\n object_pairs_hook=OrderedDict\n )\n\n def json_serial_reload(self, data):\n return self.json_loads(self.json_dumps(data))\n\n def write_error_json(self, code, text=None, **kwargs):\n req_data = self.package_rsp(code, text, **kwargs)\n self.write(self.json_dumps(req_data))\n\n def write_error_json_raise(self, code, text=None, **kwargs):\n self.write_error_json(code, text, **kwargs)\n raise ErrorFinish()\n\n def write(self, data):\n if not isinstance(data, (six.string_types, six.binary_type)):\n data = self.json_dumps(data)\n super(HttpBaseHandler, self).write(data)\n\n # ----------------------------------------------\n # filters\n # ----------------------------------------------\n\n @staticmethod\n def filter_keys(field_dict, filter_keys):\n \"\"\"filter_keys.\"\"\"\n assert isinstance(field_dict, dict)\n return {key: val for key, val in field_dict.items()\n if key not in filter_keys}\n\n @staticmethod\n def filter_whitelist(field_dict, filter_keys):\n \"\"\"filter_whitelist.\"\"\"\n assert isinstance(field_dict, dict)\n return {key: val for key, val in field_dict.items()\n if key in filter_keys}\n\n @gen.coroutine\n def trans_spawn(self, trans):\n assert isinstance(trans, Transaction)\n try:\n yield trans.spawn()\n except HTTPError:\n raise\n except ErrorFinish:\n pass\n except (FeildInVaild, QueryParames) as ex:\n if len(ex.args) == 1 and isinstance(ex.args[0], TextString):\n self.write_error_json(\n error_info.ERROR_FIELD_CHECK_ERROR,\n ex.args[0]\n )\n else:\n self.write_error_json(\n error_info.ERROR_FIELD_CHECK_ERROR,\n str(ex)\n )\n # except DBError as ex:\n # except_trac = str(traceback.format_exc())\n # APP_LOGGER.warning(except_trac)\n\n # if IS_DEBUG:\n # self.write_error_json(\n # error_info.ERROR_DN_EXCEPT,\n # append_text='[{}: {}]'.format(type(ex).__name__, str(ex))\n # )\n # else:\n # self.write_error_json(error_info.ERROR_DN_EXCEPT)\n except Exception as ex:\n except_trac = str(traceback.format_exc())\n self.logger.warning(except_trac)\n\n # if run in pyinstaller\n if IS_DEBUG:\n self.write_error_json(\n error_info.ERROR_EXCEPTION,\n '{}[{}]'.format(type(ex).__name__, str(ex))\n )\n else:\n self.write_error_json(error_info.ERROR_EXCEPTION,\n {'desc': 'unknown error'})\n\n # TODO - grpc error\n # grpc._channel._Rendezvous: <_Rendezvous of RPC\n # that terminated with (StatusCode.UNAVAILABLE, Connect Failed)>\n finally:\n try:\n yield self.dbase_rollback()\n except Exception:\n pass\n try:\n yield self.redis_rollback()\n except Exception:\n pass\n","sub_path":"restkit/handlers/http_mrg_conns/http_base.py","file_name":"http_base.py","file_ext":"py","file_size_in_byte":12368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"583213024","text":"import cv2\n\ncap = cv2.VideoCapture(0)\ncap.set(cv2.CAP_PROP_FRAME_WIDTH, 640)\ncap.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)\n\nfor _ in range(10):\n _, frame = cap.read()\n\nframe = cv2.resize(frame, (640, 480))\nframe = cv2.flip(frame, 1)\ngray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\nbackground = gray\n# cv2.imshow(\"BG Frame\", gray)\n\nwhile True:\n # Reading, Resizing, Flipping the Frame\n _, frame = cap.read()\n frame = cv2.resize(frame, (640, 480))\n frame = cv2.flip(frame, 1)\n\n # Processing the Frame\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\n foreground = gray - background\n _, mask = cv2.threshold(foreground, 100, 255, cv2.THRESH_BINARY)\n\n abs_diff = cv2.absdiff(gray, background)\n _, abs_mask = cv2.threshold(abs_diff, 45, 255, cv2.THRESH_BINARY)\n\n cv2.imshow(\"Mask\", mask)\n cv2.imshow(\"Abs Diff\", abs_mask)\n\n if cv2.waitKey(1) == ord('q'):\n break\n\n\ncap.release()\ncv2.destroyAllWindows()","sub_path":"MotionDetect/webcam_motion.py","file_name":"webcam_motion.py","file_ext":"py","file_size_in_byte":938,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"248719437","text":"def traffic():\n with open('traffic.txt', 'r') as f:\n traffic = [data.strip().split() for data in f]\n \n \n d = {}\n for client, room_number, direction, time in traffic:\n if not room_number in d: # only first time room number appears \n d[room_number] = [-int(time)]\n else:\n d[room_number].append(-int(time) if direction == 'I' else int(time))\n for number in d.items():\n visitor = int(len(number[1])//2)\n print('Room {0}, {1} minute average visit, {2} visitor(s) total' \n .format(number[0], int(sum(number[1]))//visitor, visitor))\n \n \n \ntraffic()","sub_path":"read_write_files/weds-2/traffic.py","file_name":"traffic.py","file_ext":"py","file_size_in_byte":656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"420145954","text":"from django.views import generic\nfrom django.http import HttpResponse\nfrom django.views.generic.edit import CreateView,UpdateView,DeleteView\nfrom django.shortcuts import render, redirect\nfrom django.template import loader\nfrom django.contrib.auth import authenticate,login\nfrom django.urls import reverse_lazy\nfrom django.views.generic import View\nfrom .models import Department, Student, Subject, History, LecturerInSection, Register\nfrom .models import Lecturer\nfrom .models import LecturerResearch\nfrom login.forms import UserForm\nfrom django.db.models import Count\n\n\nclass Home(generic.ListView):\n template_name = 'student/home.html'\n def get_queryset(self):\n return Department.objects.all()\n\nclass StudentManageView(generic.ListView):\n template_name = 'student/studentmanage.html'\n context_object_name = 'all_student'\n\n def get_queryset(self):\n return Student.objects.all()\n\ndef AVGdepartment(request):\n header_str = Student.objects.all()\n template = loader.get_template('student/avg-department.html')\n b = Department.objects.all()\n count = b.count()\n sum = 0\n current = 0\n list = []\n divide = []\n departmentname = []\n\n for a in range(count):\n list.append(0)\n divide.append(0)\n departmentname.append(0)\n\n for department in Department.objects.all():\n for student in header_str:\n departmentname[current] = department.DepName\n if student.stu_dep_FK.DepName == department.DepName :\n list[current] = list[current] + student.GPAX\n divide[current] = divide[current] + 1\n current = current + 1\n\n for a in range(count):\n list[a] = list[a] / divide[a]\n\n context = {\n 'count': count,\n 'department_list': Department.objects.all(),\n 'departmentname': departmentname,\n 'list': list,\n 'header_str': header_str,\n }\n\n return HttpResponse(template.render(context, request))\n\ndef totalCostDep(request):\n header_str = Student.objects.all()\n regis_str = Register.objects.all()\n template = loader.get_template('student/totalCost-form.html')\n b = Department.objects.all()\n count = b.count()\n sum = 0\n current = 0\n debugger = 0\n list = []\n departmentname = []\n debugger = []\n\n for a in range(count):\n list.append(0)\n departmentname.append(0)\n debugger.append(0)\n\n for department in Department.objects.all():\n for regis in regis_str:\n departmentname[current] = department.DepName\n if regis.reg_stu_FK.stu_dep_FK.DepName == department.DepName:\n list[current] = list[current] + regis.Cost\n\n debugger[current] = debugger[current] + 1\n current = current + 1\n\n context = {\n 'debug': debugger,\n 'count': count,\n 'department_list': Department.objects.all(),\n 'departmentname': departmentname,\n 'list': list,\n 'header_str': header_str,\n }\n\n return HttpResponse(template.render(context, request))\n\ndef totalCostLec(request):\n header_str = Student.objects.all()\n lec_str = Lecturer.objects.all()\n template = loader.get_template('student/totalCost-lec.html')\n b = Department.objects.all()\n count = b.count()\n sum = 0\n current = 0\n debugger = 0\n list = []\n departmentname = []\n debugger = []\n\n for a in range(count):\n list.append(0)\n departmentname.append(0)\n debugger.append(0)\n\n for department in Department.objects.all():\n for lecturer in lec_str:\n departmentname[current] = department.DepName\n if lecturer.lec_dep_FK.DepName == department.DepName:\n list[current] = list[current] + 1\n current = current + 1\n\n context = {\n 'count': count,\n 'department_list': Department.objects.all(),\n 'departmentname': departmentname,\n 'list': list,\n 'header_str': header_str,\n }\n\n return HttpResponse(template.render(context, request))\n\ndef ResearchCount(request):\n research_str = LecturerResearch.objects.all()\n template = loader.get_template('student/researchcount-form.html')\n extractResearch = LecturerResearch.objects.values('Category').annotate(dcount=Count('Category'))\n lex = list(extractResearch)\n\n nR = list()\n cR = list()\n\n for d in lex:\n nR.append(d['dcount'])\n for d in lex:\n cR.append(d['Category'])\n\n context = {\n 'count' :nR,\n 'Rname' :cR,\n }\n\n return HttpResponse(template.render(context, request))\n\n\ndef StudentFormView(request):\n reg_str = Register.objects.order_by('RegNo')\n template = loader.get_template('student/complex-form.html')\n count = reg_str.count()\n forboss = 0\n loop_str = ''\n looping = 0\n buttonbool = True\n liststudentID = []\n listregNo = []\n listdep = []\n listprice = []\n listdiscount = []\n liststatus = []\n lstudentID = []\n lregNo = []\n ldep = []\n lprice = []\n ldiscount = []\n lstatus = []\n\n username = None\n if request.user.is_authenticated():\n username = request.user.username\n\n for a in range(count):\n liststudentID.append(0)\n listregNo.append(0)\n listdep.append(0)\n listprice.append(0)\n listdiscount.append(0)\n liststatus.append(0)\n lstudentID.append(0)\n lregNo.append(0)\n ldep.append(0)\n lprice.append(0)\n ldiscount.append(0)\n lstatus.append(0)\n\n for reg in reg_str:\n liststudentID[looping] = reg.reg_stu_FK.StudentID\n listregNo[looping] = reg.RegNo\n listdep[looping] = reg.reg_stu_FK.stu_dep_FK.DepName\n listprice[looping] = reg.Cost\n listdiscount[looping] = reg.Discount\n liststatus[looping] = reg.PaymentStatus\n looping = looping + 1\n\n for reg in reg_str:\n if username == reg.reg_stu_FK.StudentID:\n lstudentID.append(reg.reg_stu_FK.StudentID)\n lregNo.append(reg.RegNo)\n ldep.append(reg.reg_stu_FK.stu_dep_FK.DepName)\n lprice.append(reg.Cost)\n ldiscount.append(reg.Discount)\n lstatus.append(reg.PaymentStatus)\n if reg.Semester == reg.reg_stu_FK.CurrentSemester and reg.PaymentStatus == 'Y': #find current semester in his/her history\n buttonbool = False\n forboss = forboss + 1\n\n for a in range(count):\n lstudentID.remove(0)\n lregNo.remove(0)\n ldep.remove(0)\n lprice.remove(0)\n ldiscount.remove(0)\n lstatus.remove(0)\n\n for a in range(forboss):\n loop_str = loop_str + 'x'\n\n context = {\n 'loop_str':loop_str,\n 'count':count,\n 'buttonbool':buttonbool,\n 'liststudentID':lstudentID,\n 'listregNo':lregNo,\n 'listdep':ldep,\n 'listprice':lprice,\n 'listdiscount':ldiscount,\n 'liststatus':lstatus,\n }\n\n return HttpResponse(template.render(context, request))\n\n\nclass IndexView(generic.ListView):\n template_name = 'student/index.html'\n context_object_name = 'all_departments'\n\n def get_queryset(self):\n return Department.objects.all()\n\n\ndef StudentIndex(request):\n all_students = Student.objects.all()\n\n query = request.GET.get(\"q\")\n if query:\n all_students = all_students.filter(name__icontains=query)\n\n return render(request, 'student/Sindex.html',{'all_students': all_students})\n\n\nclass StudentCreate(CreateView):\n model = Student\n fields = ['stu_dep_FK', 'name', 'Degree', 'DOB', 'Tel','StudentProfile']\n success_url = reverse_lazy('student:index')\n\nclass StudentUpdate(UpdateView):\n model = Student\n fields = ['stu_dep_FK', 'name', 'Degree', 'DOB', 'Tel','StudentProfile']\n success_url = reverse_lazy('student:index')\n\nclass StudentDelete(DeleteView):\n model = Student\n success_url = reverse_lazy('student:Sindex')\n\nclass DetailView(generic.DetailView):\n model = Department\n template_name = 'student/detail.html'\n\n\nclass DepartmentCreate(CreateView):\n model = Department\n fields = ['DepName', 'DepHead', 'DepTel', 'DepEmail', 'Dep_logo']\n\n\nclass DepartmentUpdate(UpdateView):\n model = Department\n fields = ['DepName', 'DepHead', 'DepTel', 'DepEmail', 'Dep_logo']\n\n\nclass DepartmentDelete(DeleteView):\n model = Department\n success_url = reverse_lazy('student:index')\n\n\nclass UserFormView(View):\n form_class = UserForm\n template_name = 'student/registration_form.html'\n # display blank form\n def get(self,request):\n form = self.form_class(None)\n return render(request,self.template_name,{'form':form})\n\n # process form data\n def post(self,request):\n form = self.form_class(request.POST)\n\n if form.is_valid():\n\n user = form.save(commit=False)\n\n #cleaned (normalized) data\n username = form.cleaned_data['username']\n password = form.cleaned_data['password']\n user.set_password(password)\n user.save()\n\n # return User objects if credentials are correct\n user = authenticate(username=username,password=password)\n\n if user is not None:\n\n if user.is_active:\n login(request,user)\n return redirect('student:index')\n return render(request,self.template_name,{'form':form})\n\n\n\nclass LIndexView(generic.ListView):\n template_name = 'student/Lindex.html'\n context_object_name = 'all_lecturers'\n def get_queryset(self):\n return Lecturer.objects.all()\n\n\nclass LDetailView(generic.DetailView):\n model = Lecturer\n template_name = 'student/Ldetail.html'\n\n\nclass LecturerCreate(CreateView):\n model = Lecturer\n fields = ['lec_dep_FK','LecturerID','LecturerDegree','LecturerName','LecturerAddress','LecturerTel','LecturerEmail','LecturerProfile']\n success_url = reverse_lazy('student:Lindex')\n\nclass LecturerUpdate(UpdateView):\n model = Lecturer\n fields = ['lec_dep_FK','LecturerID','LecturerDegree','LecturerName','LecturerAddress','LecturerTel','LecturerEmail','LecturerProfile']\n success_url = reverse_lazy('student:Lindex')\n\nclass LecturerDelete(DeleteView):\n model = Lecturer\n success_url = reverse_lazy('student:Lindex')\n\n\nclass ResearchCreate(CreateView):\n model = LecturerResearch\n fields = ['research_lec_FK', 'ResearchName', 'Category', 'Description']\n success_url = reverse_lazy('student:Lindex')\n\n\nclass ResearchDelete(DeleteView):\n model = LecturerResearch\n success_url = reverse_lazy('student:Lindex')\n\nclass ResearchUpdate(UpdateView):\n model = LecturerResearch\n fields = ['research_lec_FK', 'ResearchName', 'Category', 'Description']\n success_url = reverse_lazy('student:Lindex')\n\n\nclass CourseCreate(CreateView):\n model = Subject\n fields = ['sub_sec_FK', 'SubjectID', 'SubjectName', 'Prerequisite', 'Semester', 'Credit']\n success_url = reverse_lazy('student:Lindex')\n\nclass AssignLecturer(CreateView):\n model = LecturerInSection\n fields = ['lecsec_sub_FK', 'lecsec_sec_FK', 'lecsec_lec_FK', 'order']\n success_url = reverse_lazy('student:Lindex')\n\n\nclass CIndexView(generic.ListView):\n template_name = 'student/Courseindex.html'\n context_object_name = 'all_subjects'\n\n def get_queryset(self):\n return Subject.objects.all()\n\nclass CDetailView(generic.DetailView):\n model = Subject\n template_name = 'student/courseDetail.html'\n\n\nclass HistoryUpdate(UpdateView):\n model = History\n fields = ['Grade']\n success_url = reverse_lazy('student:Cindex')\n\n\nclass CourseUpdate(UpdateView):\n model = Subject\n fields = ['sub_sec_FK', 'SubjectID', 'SubjectName', 'Prerequisite', 'Semester', 'Credit']\n success_url = reverse_lazy('student:Cindex')\n\nclass CourseDelete(DeleteView):\n model = Subject\n success_url = reverse_lazy('student:Cindex')\n\nclass StudentHistory(generic.ListView):\n template_name = 'student/history.html'\n context_object_name = 'all_student'\n\n def get_queryset(self):\n return Student.objects.all()\n\nclass HistoryDetail(generic.DetailView):\n model = History\n template_name = 'student/history-detail.html'\n\nclass HistoryDelete(DeleteView):\n model = History\n success_url = reverse_lazy('student:Cindex')\n\nclass AddSubject(generic.ListView):\n template_name = 'student/add-subject.html'\n context_object_name = 'all_student'\n\n def get_queryset(self):\n return Student.objects.all()\n\nclass SubjectUpdate(UpdateView):\n model = History\n fields = ['StudyYet']\n success_url = reverse_lazy('student:Test')\n\nclass SDetailView(generic.ListView):\n template_name = 'student/StudentProfile.html'\n context_object_name = 'all_students'\n def get_queryset(self):\n return Student.objects.all()\n\nclass Test(generic.ListView):\n template_name = 'student/TestCart.html'\n context_object_name = 'all_student'\n\n def get_queryset(self):\n return Student.objects.all()\n\nclass RegisterUpdate(UpdateView):\n model = Register\n fields = ['PaymentMethod', 'PaymentStatus']\n success_url = reverse_lazy('student:studentform')\n","sub_path":"log/student/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":13130,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"10960183","text":"__author__ = 'НКТ'\n# 1 Найти количество уникальных элементов в списке\nargs = [1, 2, 3, 1, 2, 3, 4, 5, 6, 2, 8, 3, 9]\nprint(\"1 --- \", len(set(args)), \"\\n\")\n\n# 2 Найти количество уникальных элементов в строке\nctpp = \"retyret\"\nprint(\"2 --- \", len(set(ctpp)), \"\\n\")\n\n# 3 Найти сумму всех натуральных чисел до 1000, кратных 3 или 5\nprint(\"3 --- \", sum([x for x in range(0, 1000) if not (x % 3 and x % 5)]), \"\\n\")\n\n# 4 Найти сумму всех четных элементов ряда Фибоначчи, которые не превышают\n# 4000000\nsuma = 0\ns1 = 1\ns2 = 0\nwhile s1 < 4000000:\n if s1 % 2 == 0:\n suma += s1\n s1, s2 = s1 + s2, s1\nprint(\"4 --- \", suma, \"\\n\")\n\n# 5 Проверит сколько в строке русских букв, сколько латинских,\n#сколько других символов и вывести другие символы\nctp = str(input())\ni = 0\nj = 0\nz = 0\nlet = len(ctp)\ndip = \"\"\nfor e in ctp:\n if \"a\" <= e <= \"z\" or \"A\" <= e <= \"Z\":\n j += 1\n elif \"а\" <= e <= \"я\" or \"А\" <= e <= \"Я\":\n i += 1\n else:\n z += 1\n dip += e\nprint(\"5 --- \",)\nprint(\"Покрытие русского алфавита : {:.2%}\".format(i / let))\nprint(\"Покрытие латиницы : {:.2%}\".format(j / let))\nprint(\"Другие символы : {0}({1})\".format(z, set(list(dip))))\nprint(\"\\n\")\n\n# 6 Отсортеровать список по текстовым значениям его подсписков\nctp = [(100, 'сто'), (200, \"двести\"), (300, \"триста\"), (400, \"четыреста\"),\n (500, \"пятьсот\")]\nctp.sort(key=lambda i: i[1])\nprint(\"6 --- \", ctp, \"\\n\")\n\n# 7 Задание http://euler.jakumo.org/problems/view/22.html\nwith open('Name.txt', 'r') as f:\n names = [s.replace('\"', '') for s in f.read().split(',')]\npoints = 0\nnames.sort()\ns = 0\ni = 0\nname = 0\nfor i, name in enumerate(names):\n points += sum([ord(s)-64 for s in name]) * (i + 1)\nprint(\"7 --- \", points, \"\\n\")\n\n# 8 В строке проверить не пустая ли она и длина ее не меньше 10\na = str(input())\nif a:\n print(\"good1\")\nelse:\n print(\"error1\")\nif len(a) > 10:\n print(\"good2\")\nelse:\n print(\"error2\")\n\n# 9 В словаре проверить не пустой ли он, есть ли элемент с ключом key,\n# элемент с ключом key не None\na = {\"key\": 'er'}\nif a:\n print(\"good3\")\nelse:\n print(\"error3\")\nif \"key\" in a:\n print(\"good4\")\nelse:\n print(\"error4\")\nif a.get('key'):\n print(\"good5\")\nelse:\n print(\"error5\")\n\n# 10 В списке проверить не пустой ли он, есть ли 5ый элемент, 5ый элемент не 0\na = [1, 5, 4, 4, 6, 6, 8, 9, 0, 33]\nif a:\n print(\"good6\")\nelse:\n print(\"error6\")\nif len(a) >= 6:\n print(\"good7\")\nelse:\n print(\"error7\")\nif a[5]:\n print(\"good8\")\nelse:\n print(\"error8\")\n","sub_path":"ппк.py","file_name":"ппк.py","file_ext":"py","file_size_in_byte":3076,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"515526698","text":"from typing import Tuple\r\nimport torch\r\nimport torch.nn as nn\r\nfrom torch.nn import Parameter\r\nfrom torch.nn import init\r\nfrom torch import Tensor\r\nimport torch.nn.functional as F\r\nimport math\r\n\r\n\r\nclass PhyaLSTM(nn.Module):\r\n \"\"\"Implementation of the physics-aware-LSTM (PhyaLSTM)\r\n Parameters\r\n ----------\r\n input_size : int\r\n Number of dynamic features, which are those, passed to the LSTM at each time step---x_t\r\n Sca_size : int\r\n Number of static catchment attributes---Sca(t)\r\n hidden_size : int\r\n Number of hidden/memory cells.\r\n \"\"\"\r\n def __init__(self, input_size: int, Sca_size: int, hidden_size: int, with_AM: bool=False):\r\n super(PhyaLSTM, self).__init__()\r\n self.input_size = input_size\r\n self.Sca_size = Sca_size\r\n self.hidden_size = hidden_size\r\n self.ME = True # Memory Enhancement\r\n self.with_am = with_AM\r\n\r\n # input gate\r\n self.w_ix = Parameter(Tensor(hidden_size, input_size))\r\n self.w_ih = Parameter(Tensor(hidden_size, hidden_size))\r\n self.w_ia = Parameter(Tensor(hidden_size, hidden_size))\r\n self.b_i = Parameter(Tensor(hidden_size, 1))\r\n\r\n # forget gate\r\n self.w_fx = Parameter(Tensor(hidden_size, input_size))\r\n self.w_fo = Parameter(Tensor(hidden_size, hidden_size))\r\n self.w_fa = Parameter(Tensor(hidden_size, hidden_size))\r\n self.b_f = Parameter(Tensor(hidden_size, 1))\r\n\r\n # output gate\r\n self.w_ox = Parameter(Tensor(hidden_size, input_size))\r\n self.w_oh = Parameter(Tensor(hidden_size, hidden_size))\r\n self.w_oa = Parameter(Tensor(hidden_size, hidden_size))\r\n self.b_o = Parameter(Tensor(hidden_size, 1))\r\n\r\n # cell\r\n self.w_gx = Parameter(Tensor(hidden_size, input_size))\r\n self.w_gh = Parameter(Tensor(hidden_size, hidden_size))\r\n self.b_g = Parameter(Tensor(hidden_size, 1))\r\n\r\n # sca(t)\r\n self.w_a = Parameter(Tensor(hidden_size, Sca_size))\r\n self.b_a = Parameter(Tensor(hidden_size, 1))\r\n\r\n # assimilation obs(AM)\r\n if self.with_am:\r\n self.w_sim = Parameter(Tensor(hidden_size, hidden_size))\r\n self.w_obs = Parameter(Tensor(hidden_size, hidden_size))\r\n self.b_k = Parameter(Tensor(hidden_size, 1))\r\n\r\n self.reset_weigths()\r\n\r\n def reset_weigths(self):\r\n \"\"\"reset weights\r\n \"\"\"\r\n stdv = 1.0 / math.sqrt(self.hidden_size)\r\n for weight in self.parameters():\r\n init.uniform_(weight, -stdv, stdv)\r\n\r\n def calc_corr(self, a, b):\r\n \"\"\"Calculate the correlation coefficient of two vectors: a, b\r\n \"\"\"\r\n a_avg = sum(a) / len(a)\r\n b_avg = sum(b) / len(b)\r\n # Calculate the molecule, covariance --- according to the covariance formula,\r\n # it should be divided by n, because it reduces n up and down in the correlation coefficient, so it can not be divided by n.\r\n cov_ab = sum([(x - a_avg) * (y - b_avg) for x, y in zip(a, b)])\r\n # Calculate the denominator, variance product --- variance should be divided by N, so it can not be divided by n.\r\n sq = math.sqrt(sum([(x - a_avg) ** 2 for x in a]) * sum([(x - b_avg) ** 2 for x in b]))\r\n corr_factor = cov_ab / sq\r\n return corr_factor\r\n\r\n\r\n def forward(self,\r\n x_input: Tensor,\r\n x_sca: Tensor,\r\n soil_state: Tensor,\r\n h_obs: Tensor=None,\r\n h_input:Tensor=None,\r\n c_input:Tensor=None) -> Tuple[Tensor, Tensor]:\r\n \"\"\"forward,\r\n Args:\r\n inputs: [batch_size, seq_size, input_size]\r\n input_Sca: [batch_size, seq_size, Sca_size]\r\n soil_state: [batch_size, seq_size]\r\n h_obs: [batch_size, seq_size, hidden_size]\r\n \"\"\"\r\n batch_size, seq_size, input_dim = x_input.size()\r\n\r\n h_output = torch.zeros(batch_size, seq_size, self.hidden_size)\r\n c_output = torch.zeros(batch_size, seq_size, self.hidden_size)\r\n\r\n if h_input==None:\r\n h_t = torch.zeros(batch_size, self.hidden_size).t()\r\n if c_input==None:\r\n c_t = torch.zeros(batch_size, self.hidden_size).t()\r\n\r\n r = torch.zeros(batch_size, self.hidden_size) # correlation coefficient\r\n\r\n for t in range(seq_size):\r\n x_t = x_input[:, t, :].t() # [input_size, batch_size]\r\n sca_t = x_sca[:, t, :].t() # [Sca_size, batch_size]\r\n\r\n a_t = torch.sigmoid(self.w_a @ sca_t + self.b_a)\r\n # input gate\r\n i = torch.sigmoid(self.w_ix @ x_t + self.w_ih @ h_t + self.w_ia @ a_t + self.b_i)\r\n # cell\r\n g = torch.tanh(self.w_gx @ x_t + self.w_gh @ h_t + self.b_g)\r\n # forget gate\r\n f = torch.sigmoid(self.w_fx @ x_t + self.w_fo @ h_t + self.w_fa @ a_t + self.b_f)\r\n # output gate\r\n o = torch.sigmoid(self.w_ox @ x_t + self.w_oh @ h_t + self.w_oa @ a_t + self.b_o)\r\n\r\n c_next = f * c_t + i * g # [hidden_dim, batch_size]\r\n h_next = o * torch.tanh(c_next) # [hidden_dim, batch_size]\r\n\r\n if self.ME:\r\n for i in range(batch_size):\r\n r[i, :] = self.calc_corr(c_output[i, :t], soil_state[i, :t])\r\n c_next = c_next.mul(abs(r)+1)\r\n\r\n if self.with_am:\r\n K = torch.tanh(self.w_sim @ h_next + self.w_obs @ h_obs + self.b_k)\r\n h_next = h_next + K.mul(h_obs - h_next)\r\n\r\n h_output[:, t] = h_next.t() # transpose:[batch_size, hidden_dim]\r\n c_output[:, t] = c_next.t()\r\n\r\n h_t = h_next\r\n c_t = c_next\r\n\r\n return (h_output, c_output)\r\n\r\n\r\n\r\nclass Obs_encoder(nn.Module):\r\n def __init__(self, x_obs_size, sca_size, hidden_size):\r\n super(Obs_encoder, self).__init__()\r\n self.x_obs_size = x_obs_size\r\n self.hidden_size = hidden_size\r\n self.sca_size = sca_size\r\n\r\n self.obs_encoder = PhyaLSTM(x_obs_size, hidden_size, sca_size, with_AM=False)\r\n\r\n def forward(self, x_obs, x_sca, soil_state:Tensor):\r\n h_obs, _ = self.obs_encoder(x_obs, x_sca, soil_state, None)\r\n return h_obs\r\n\r\n\r\n\r\n\r\nclass Prediction_module(nn.Module):\r\n def __init__(self, input_size, sca_size, x_obs_size, hidden_size, window_size):\r\n super(Prediction_module, self).__init__()\r\n self.x_size = input_size\r\n self.sca_size = sca_size\r\n self.x_obs_size = x_obs_size\r\n self.hidden_size = hidden_size\r\n self.window_size = window_size\r\n\r\n self.obs_encoder = PhyaLSTM(x_obs_size, sca_size, hidden_size, with_AM=False) # x_obs_size == input_size\r\n self.prediction = PhyaLSTM(input_size, sca_size, hidden_size, with_AM=True)\r\n self.relu = nn.ReLU()\r\n self.fc1 = nn.Linear(in_features=hidden_size, out_features=1)\r\n self.w_omega = Parameter(Tensor(input_size, input_size))\r\n\r\n\r\n def forward(self, x_input:Tensor,\r\n x_sca:Tensor,\r\n soil_state:Tensor,\r\n x_obs:Tensor):\r\n\r\n h_obs, _ = self.obs_encoder(x_obs, x_sca, soil_state)\r\n h_, c_ = self.prediction(x_input[:,:-1], x_sca[:,:-1], soil_state[:,:-1], h_obs=h_obs[:,:-1])\r\n h_t = h_[:,-1:,:]\r\n\r\n # Attention Net\r\n x_i = x_input[:, -self.window_size:, :] # [batch_size, window_size, input_size]\r\n x_t = x_input[:, -1:, :].transpose(1,2) # [batch_size, input_size, 1]\r\n u = torch.tanh(torch.matmul(x_i, self.w_omega)) # [batch_size, window_size, input_size]\r\n att = torch.matmul(u, x_t) # [batch_size, window_size, 1]\r\n att_score = F.softmax(att, dim=1)\r\n\r\n scored_c = c_[:, -self.window_size:, :] * att_score # [batch_size, window_size, hidden_size]\r\n c_t = torch.sum(scored_c, dim=1) # [batch_size, 1, hidden_size]\r\n\r\n h_t, _ = self.prediction(x_input[:,-1:], x_sca[:,-1:], soil_state[:,-1:], h_obs[:,-1:], h_input=h_t, c_input=c_t)\r\n\r\n pred = self.fc1(self.relu(h_t)) # [batch_size, 1, 1]\r\n pred = pred.squeeze(2) # [batch_size, 1]\r\n\r\n return pred\r\n\r\n\r\n\r\n","sub_path":"1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":8250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"30036365","text":"def get_move(board: list, x: int) -> int:\n if board[x:x+2] == ['b', '_']:\n return 1\n elif board[x:x+3] == ['b', 'w', '_']:\n return 2\n elif board[x-1:x+1] == ['_', 'w']:\n return -1\n elif board[x-2:x+1] == ['_', 'b', 'w']:\n return -2\n return 0\n\ndef solve(board, solution, level=0) -> bool:\n print(' ' * level + ''.join(board))\n if board == solution:\n return True\n\n for x in range(len(board)):\n m = get_move(board, x)\n if m != 0:\n board[x], board[x + m] = board[x + m], board[x]\n\n if solve(board, solution, level + 1):\n return True\n\n # no luck: backtrack\n board[x], board[x + m] = board[x + m], board[x]\n\n return False\n\ndef main():\n n = int(input('n? '))\n board = ['b'] * n + ['_'] + ['w'] * n\n solution = ['w'] * n + ['_'] + ['b'] * n\n print(solve(board, solution))\n\nmain()","sub_path":"Python/esercizi/8.7.py","file_name":"8.7.py","file_ext":"py","file_size_in_byte":919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"156990008","text":"import pyaudio\nimport wave\nimport time\nimport sys\nfrom pkg_resources import resource_filename\n\nfilepath = resource_filename(__name__, 'sounds/' + 'alarm-clock-elapsed.wav')\n\n\nclass Playme():\n def __init__(self):\n self.wf = wave.open(filepath, 'rb')\n\n def play(self):\n p = pyaudio.PyAudio()\n stream = p.open(format=p.get_format_from_width(self.wf.getsampwidth()),\n channels=self.wf.getnchannels(),\n rate=self.wf.getframerate(),\n output=True,\n stream_callback=self.callback)\n\n stream.start_stream()\n\n while stream.is_active():\n time.sleep(0.1)\n\n stream.stop_stream()\n stream.close()\n self.wf.close()\n\n p.terminate()\n\n\n def callback(self, in_data, frame_count, time_info, status):\n data = self.wf.readframes(frame_count)\n return (data, pyaudio.paContinue)\n","sub_path":"bzoing/playme.py","file_name":"playme.py","file_ext":"py","file_size_in_byte":926,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"468725241","text":"# Copyright 2014 Rackspace\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom datetime import datetime\n\nfrom trove.common import cfg\nfrom trove.common import configurations\nfrom trove.common import utils\nfrom trove.common import exception\nfrom trove.common.exception import ModelNotFoundError\nfrom trove.datastore.models import DatastoreVersion\nfrom trove.db import models as dbmodels\nfrom trove.openstack.common import log as logging\nfrom trove.openstack.common.gettextutils import _\nfrom trove.taskmanager import api as task_api\n\nfrom trove.configuration import models\nfrom trove.instance import models as instances_models\n\nfrom trove.common import configurations\n\n\nCONF = cfg.CONF\nLOG = logging.getLogger(__name__)\n\nclass ConfigurationType(object):\n TEMPLATE_CONFIG = \"1\"\n INSTANCE_CONFIG = \"0\"\n ALL_CONFIG = \"10\"\n\nclass KSC_Configurations(models.Configurations):\n @staticmethod\n def load(context, config_type=ConfigurationType.TEMPLATE_CONFIG):\n if context is None:\n raise TypeError(\"Argument context not defined.\")\n elif id is None:\n raise TypeError(\"Argument is not defined.\")\n\n if context.is_admin:\n if config_type == ConfigurationType.ALL_CONFIG:\n db_info = KSC_DBConfiguration.find_all(deleted=False)\n else:\n db_info = KSC_DBConfiguration.find_all(config_type=config_type, deleted=False)\n if db_info is None:\n LOG.debug(\"No configurations found\")\n else:\n if config_type == ConfigurationType.ALL_CONFIG:\n db_info = KSC_DBConfiguration.find_all(tenant_id=context.tenant,\n deleted=False)\n else:\n db_info = KSC_DBConfiguration.find_all(config_type=config_type, tenant_id=context.tenant,\n deleted=False)\n if db_info is None:\n LOG.debug(\"No configurations found for tenant % s\"\n % context.tenant)\n\n limit = int(context.limit or KSC_Configurations.DEFAULT_LIMIT)\n if limit > KSC_Configurations.DEFAULT_LIMIT:\n limit = KSC_Configurations.DEFAULT_LIMIT\n\n data_view = KSC_DBConfiguration.find_by_pagination('configurations',\n db_info,\n \"foo\",\n limit=limit,\n marker=context.marker)\n next_marker = data_view.next_page_marker\n return data_view.collection, next_marker\n \n @staticmethod\n def load_default_templates(context):\n if context is None:\n raise TypeError(\"Argument context not defined.\")\n\n db_info = KSC_DBConfiguration.find_all(config_type=ConfigurationType.TEMPLATE_CONFIG, tenant_id=CONF.default_template_tenant_id,\n deleted=False)\n if db_info is None:\n LOG.debug(\"No configurations found for tenant % s\"\n % context.tenant)\n\n limit = int(context.limit or KSC_Configurations.DEFAULT_LIMIT)\n if limit > KSC_Configurations.DEFAULT_LIMIT:\n limit = KSC_Configurations.DEFAULT_LIMIT\n\n data_view = KSC_DBConfiguration.find_by_pagination('configurations',\n db_info,\n \"foo\",\n limit=limit,\n marker=context.marker)\n next_marker = data_view.next_page_marker\n return data_view.collection, next_marker\n\nclass KSC_Configuration(models.Configuration):\n\n @staticmethod\n def create(name, description, tenant_id, datastore, datastore_version, config_type=ConfigurationType.TEMPLATE_CONFIG):\n if config_type not in [ConfigurationType.TEMPLATE_CONFIG, ConfigurationType.INSTANCE_CONFIG]:\n raise TypeError(\"Argument config_type=%s is error.\" % config_type)\n \n configurationGroup = KSC_DBConfiguration.create(\n name=name,\n description=description,\n tenant_id=tenant_id,\n datastore_version_id=datastore_version,\n config_type=config_type)\n return configurationGroup\n \n @staticmethod\n def delete(context, group):\n if not context.is_admin and group.tenant_id == CONF.default_template_tenant_id:\n raise exception.Forbidden\n \n if group.config_type == ConfigurationType.INSTANCE_CONFIG:\n instances = instances_models.DBInstance.find_all(\n configuration_id=group.id,\n deleted=False).all()\n if instances:\n return\n \n deleted_at = utils.utcnow()\n KSC_Configuration.remove_all_items(context, group.id, deleted_at)\n group.deleted = True\n group.deleted_at = deleted_at\n group.save()\n\n @staticmethod\n def load(context, id):\n try:\n if context.is_admin:\n config_info = KSC_DBConfiguration.find_by(id=id,\n deleted=False)\n else:\n try:\n config_info = KSC_DBConfiguration.find_by(id=id,\n tenant_id=context.tenant,\n deleted=False)\n except ModelNotFoundError:\n config_info = KSC_DBConfiguration.find_by(id=id,\n tenant_id=CONF.default_template_tenant_id,\n deleted=False) \n except ModelNotFoundError:\n msg = _(\"Configuration group with ID %s could not be found.\") % id\n raise ModelNotFoundError(msg)\n return config_info\n \n @staticmethod\n def validate_dynamic_params(context, configuration, instances, overrides, dynamic_param=False):\n '''validate if the dynamic params can be used on the instance'''\n if configuration.config_type == ConfigurationType.INSTANCE_CONFIG:\n \n from trove.patch.configuration.service import KSC_ConfigurationsController\n \n result = KSC_ConfigurationsController.validate_configuration(\n context, overrides, instances=instances, dynamic_param=dynamic_param)\n \n if len(result):\n return result\n \n \n @staticmethod\n def save(context, configuration, configuration_items, instances):\n if not context.is_admin and configuration.tenant_id == CONF.default_template_tenant_id:\n raise exception.Forbidden\n \n KSC_DBConfiguration.save(configuration)\n for item in configuration_items:\n item[\"deleted_at\"] = None\n models.ConfigurationParameter.save(item)\n \n if configuration.config_type == ConfigurationType.INSTANCE_CONFIG:\n items = KSC_Configuration.load_items(context, configuration.id)\n \n for instance in instances:\n LOG.debug(\"applying to instance: %s\" % instance.id)\n overrides = {}\n for i in items:\n overrides[i.configuration_key] = i.configuration_value\n \n task_api.API(context).update_overrides(instance.id, overrides)\n \n @staticmethod\n def ksc_create_instance_config_from_template(context, template_config_id):\n if template_config_id is None:\n raise TypeError(\"Argument template_config_id not defined.\")\n template_config = KSC_Configuration.load(context, template_config_id)\n \n template_overrides = KSC_Configuration.get_configuration_overrides(context, template_config_id)\n datastore = KSC_Configuration.load_configuration_datastore_version(context, template_config_id)\n instance_config = KSC_Configuration.create(template_config.name, template_config.description, \\\n context.tenant, datastore.datastore_name, \\\n template_config.datastore_version_id, ConfigurationType.INSTANCE_CONFIG)\n KSC_Configuration.create_items(instance_config.id, template_overrides)\n return instance_config\n \n @staticmethod\n def get_items_by_overrides(context,config_id,overrides):\n items = []\n for k, v in overrides.iteritems():\n items.append(models.ConfigurationParameter(configuration_id=config_id,\n configuration_key=k,\n configuration_value=v,\n deleted=False))\n return items\n \n\nclass KSC_DBConfiguration(models.DBConfiguration):\n _data_fields = ['name', 'description', 'tenant_id', 'datastore_version_id',\n 'deleted', 'deleted_at', 'config_type']\n","sub_path":"trove/patch/configuration/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":9752,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"182877235","text":"import numpy as np\nimport pandas as pd\n\nprint('Import csvs...\\n')\n\ndf1 = pd.read_csv('submits/lgbm0_sub.csv')\ndf2 = pd.read_csv('submits/lgbm1_sub.csv')\ndf3 = pd.read_csv('submits/lgbm2_sub.csv')\ndf4 = pd.read_csv('submits/lgbm3_sub.csv')\ndf5 = pd.read_csv('submits/lgbm4_sub.csv')\n#df6 = pd.read_csv('submits/panji_lgbm.csv')\n#df7 = pd.read_csv('submits/simple_averaging.csv')\n#df8 = pd.read_csv('submits/lgbm_50m_submit.csv')\n\nmodels = { 'df1': {\n 'name': 'mylgbm',\n 'score': 97.98,\n 'df':df1},\n 'df2': {\n 'name': 'mylgbm_w_nowday',\n 'score': 97.98,\n 'df': df2},\n 'df3': {\n 'name': 'asrafuls_lgbm',\n 'score': 97.8,\n 'df': df3},\n 'df4': {\n 'name': 'fm_ftrl',\n 'score': 97.72,\n 'df': df4},\n 'df5': {\n 'name': 'baris',\n 'score': 97.98,\n 'df': df5},\n# 'df6': {\n# 'name': 'panji',\n# 'score': 97.98,\n# 'df': df6},\n# 'df7': {\n# 'name': 'averging',\n# 'score': 97.98,\n# 'df': df7},\n# 'df8': {\n# 'name': 'mylgbm_50m',\n# 'score': 97.98,\n# 'df': df8},\n }\nisa_lg = 0\nisa_hm = 0\nisa_am = 0\n\nfor df in models.keys():\n isa_lg += np.log(models[df]['df'].is_attributed)\n isa_hm += 1/(models[df]['df'].is_attributed)\n isa_am += models[df]['df'].is_attributed\n\nisa_lg = np.exp(isa_lg/5)\nisa_hm = 5/isa_hm\nisa_am = isa_am/5\n\nsub_am = pd.DataFrame()\nsub_am['click_id'] = df1['click_id']\nsub_am['is_attributed'] = isa_am\nprint(df1.head(), df2.head(), sub_am.head())\n\nisa_fin = (isa_am + isa_hm + isa_lg)/3\n\nsub_fin = pd.DataFrame()\nsub_fin['click_id'] = df1['click_id']\nsub_fin['is_attributed'] = isa_fin\n\nprint('Saving...')\nsub_fin.to_csv('submits/blended.csv', index=False, float_format='%.9f')\n","sub_path":"talkingdata/blend.py","file_name":"blend.py","file_ext":"py","file_size_in_byte":2097,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"382073718","text":"#for accessing websites\nfrom selenium import webdriver\n#for structuring data\nimport pandas as pd\n#for timings and delays\nimport time\n# Connecting to chrome\ndriver=webdriver.Chrome(executable_path=\"/Users/khushibansal/Documents/chromedriver-2\")\n# connecting to thw website\ndef search_cheap_ticket(city_from,city_to,start_date,end_date):\n kayak='https://www.kayak.co.in/flights/'+city_from+'-'+city_to+'/'+start_date+'/'+end_date+'?sort=price_a'\n driver.get(kayak)\n print('loading...')\n time.sleep(20)\n print('Data got fetched!!')\n print('\\n')\n#fetching and storing data from the website\ndf=pd.DataFrame()\ndef compile_data():\n global airlines_list\n global price_list\n global details_list\n #fetching airline names\n airlines = driver.find_elements_by_xpath(\"//span[@class='codeshares-airline-names']\")\n airlines_list = [value.text for value in airlines]\n print(airlines_list)\n print('\\n')\n #fetching airline prices\n prices = driver.find_elements_by_xpath(\"//a[@class='book-direct-text']\")\n price_list = [int((value.text.split()[1].replace(\"'\",\"\")).replace(\",\",\"\")) for value in prices]\n print(price_list)\n print('\\n')\n #fetching airline details\n details= driver.find_elements_by_xpath(\"//p[@style='display: none']\")\n details_list = [value.text for value in details]\n\n\n for i in range(len(airlines_list)):\n try:\n df.loc[i, 'Airline_Name'] =airlines_list[i]\n except Exception as e:\n pass\n try:\n df.loc[i, 'Airline_price'] =price_list[i]\n except Exception as e:\n pass\n#searching cheapest flight among the stored list of flights\ndef search_cheapest():\n min_price=min(price_list)\n index=price_list.index(min_price)\n return index\n#getting start date & end date (format [yyyy-mm-dd)]) and from city & to city from user\nstart_date=input('enter the start date:')\nend_date=input('enter the end date:')\ncity_from=input('enter the city from:')\ncity_to=input('enter the city to:')\nsearch_cheap_ticket(city_from,city_to,start_date,end_date)\ncompile_data()\n#printing the result\nprint('the cheapest ticket available from {} to {} is:'.format(city_from,city_to))\nprint('\\n')\nindex_value=search_cheapest()\nprint(df.loc[index_value])\nprint('\\n')\nprint(\"The complete flight information is here :\")\nprint(details_list[index_value])\nprint('\\n')\ndriver.quit()\nprint('THANK YOU!!!')","sub_path":"flightScrapper.py","file_name":"flightScrapper.py","file_ext":"py","file_size_in_byte":2393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"152806681","text":"import dace\nimport numpy as np\n\n\n@dace.program\ndef transients(A: dace.float32[10]):\n ostream = dace.define_stream(dace.float32, 10)\n oscalar = dace.define_local_scalar(dace.int32)\n oarray = dace.define_local([10], dace.float32)\n oarray[:] = 0\n oscalar = 0\n for i in dace.map[0:10]:\n if A[i] >= 0.5:\n A[i] >> ostream(-1)\n oscalar += 1\n ostream >> oarray\n return oscalar, oarray\n\n\ndef test_transients():\n A = np.random.rand(10).astype(np.float32)\n scal, arr = transients(A)\n if scal[0] > 0:\n assert((arr[0:scal[0]] >= 0.5).all())\n assert((arr[scal[0]:] == 0).all())\n\n\nif __name__ == \"__main__\":\n test_transients()\n","sub_path":"tests/python_frontend/transients_test.py","file_name":"transients_test.py","file_ext":"py","file_size_in_byte":687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"395249672","text":"import dwavebinarycsp\nfrom dwave.system.samplers import DWaveSampler\nfrom dwave.system.composites import EmbeddingComposite\nimport networkx as nx\nimport matplotlib.pyplot as plt\n\nimport dimod\n\n# Represent the map as the nodes and edges of a graph\nfaces = ['213', '314', '423', '142', '243', '124', '413', '312']\npieces = [2, 1, 3 , 3, 1, 4 , 4, 2, 3 , 1, 4, 2 , 2, 4, 3 , 2, 4, 1 , 4, 1, 3 , 3, 1, 2]\npieces = [2,1,3, 4,3,1, 4,2,3, 4,2,1, 4,3,2, 2,4,1, 1,3,4, 1,2,3]\n\ndef createNames(base, count):\n return [base+str(i)+'_' for i in range(count)]\n\n# Function for the constraint that two nodes with a shared edge not both select one color\ndef not_all_4(i, j, v, u):\n return not (i and j and v and u)\n\ndef not_all_2(i, v):\n return not (i and v)\n\ndef if_a_and_b_then_c(a, b, c):\n return not a or not b or c\n\ndef createToupleSet(nval):\n return { ((0,)*i) + (1,) + ((0,)*(nval - i - 1)) for i in range(nval) }\n result = set()\n for i in range(nval):\n value = ((0,)*i) + (1,) + ((0,)*(nval - i - 1))\n result.add(value)\n return result\n\n# Valid configurations for the constraint that each node select a single color\nnrotval = 3\nrotval = createToupleSet(nrotval)\nrotnames = createNames('rot', 8)\n\nnindval = 7 # 8\nindval = createToupleSet(nindval)\nindnames = createNames('ind', 8)\n\nnfaceval = 4\nfaceval = createToupleSet(nfaceval)\nfacenames = createNames('face', 12)\n\n# Create a binary constraint satisfaction problem\ncsp = dwavebinarycsp.ConstraintSatisfactionProblem(dwavebinarycsp.BINARY)\n\n# Add constraint that each node (province) select a single color\nfor name in rotnames:\n variables = [name+str(i) for i in range(nrotval)]\n csp.add_constraint(rotval, variables)\nfor name in indnames:\n variables = [name+str(i) for i in range(nindval)]\n csp.add_constraint(indval, variables)\nfor name in facenames:\n variables = [name+str(i+1) for i in range(nfaceval)]\n csp.add_constraint(faceval, variables)\n\ndef matchingFace(face):\n return (4 - face) if (face == 1 or face == 3) else face\n\ndef faceMatch(iFace, jFace):\n return jFace == matchingFace(iFace)\n# return (jFace + iFace == 4) if (iFace == 1 or iFace == 3) else (jFace == iFace)\n\ndef face(i, iRot):\n return pieces[3 * (i % 8) + iRot % 3]\n\ndef match(i, iRot, j, jRot):\n return faceMatch(face(i, iRot), face(j, jRot))\n\ncountI = [0 for i in range(len(indnames))]\n\ndef addConstraints(i, iRot, j, jRot, constraintIndex):\n facename = facenames[constraintIndex]\n indi = indnames[i]\n v = rotnames[i]\n indj = indnames[j]\n u = rotnames[j]\n countI[i] += 1\n countI[j] += 1\n for ii in range(nindval):\n for ri in range(nrotval):\n fi = face(ii, ri + iRot)\n fj = matchingFace(face(ii, ri + jRot))\n variables = [indi+str(ii), v+str(ri), facename+str(fi)]\n csp.add_constraint(if_a_and_b_then_c, variables)\n variables = [indj+str(ii), u+str(ri), facename+str(fj)]\n csp.add_constraint(if_a_and_b_then_c, variables)\n\nconstraintIndex = 0\nfor j in range(8):\n if (j & 1):\n i = ((j & 2) >> 1) ^ ((j & 4) >> 2)\n addConstraints(j - 1, 2 - i, j, 1 + i, constraintIndex)\n constraintIndex += 1\n if (j & 2):\n i = (j & 1) ^ ((j & 4) >> 2)\n addConstraints(j - 2, 1 + i, j, 2 - i, constraintIndex)\n constraintIndex += 1\n if (j & 4):\n addConstraints(j - 4, 0, j, 0, constraintIndex)\n constraintIndex += 1\n\n# Add constraint that each pair of nodes with a shared edge not both select one color\n# for neighbor in neighbors:\n# v, u = neighbor\n# for i in range(colors):\n# variables = [v+str(i), u+str(i)]\n# csp.add_constraint(not_both_1, variables)\n\n# Convert the binary constraint satisfaction problem to a binary quadratic model\nprint(len(csp.constraints))\n\nbqm = dwavebinarycsp.stitch(csp)\n\n# Set up a solver using the local system’s default D-Wave Cloud Client configuration file\n# and sample 50 times\n\n# bqm = dimod.BinaryQuadraticModel.\n\nprint(len(bqm))\n\n# sampler = dimod.reference.samplers.RandomSampler()\n# response = sampler.sample(bqm, num_reads=10)\n\nsampler = dimod.reference.samplers.SimulatedAnnealingSampler()\nresponse = sampler.sample(bqm)\n\n# sampler = EmbeddingComposite(DWaveSampler()) # doctest: +SKIP\n# response = sampler.sample(bqm, num_reads=4) # doctest: +SKIP\n\nsample = next(response.samples()) # doctest: +SKIP\nif not csp.check(sample): # doctest: +SKIP\n print(\"Failed to color map\")\nelse:\n print(\"Solved!\")\n\n\n# Function that plots a returned sample\ndef plot_map(sample):\n color_map = {}\n for name in indnames:\n for i in range(nindval):\n if sample[name+str(i)]:\n color_map[name] = i\n print('ind: ' + str(color_map))\n color_map = {}\n for name in rotnames:\n for i in range(nrotval):\n if sample[name+str(i)]:\n color_map[name] = i\n print('rot: ' + str(color_map))\n\nplot_map(sample)\n","sub_path":"diabolik1.py","file_name":"diabolik1.py","file_ext":"py","file_size_in_byte":4996,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"593388431","text":"import requests\nimport json\nimport time\nimport os\nimport subprocess\nfrom random import randrange\n\ndef get_video(url, file_name):\n r = requests.get(url, stream=True) # create HTTP response object\n with open(str(file_name), 'wb') as f:\n for chunk in r.iter_content(chunk_size=2048):\n if chunk:\n f.write(chunk)\n\n return file_name\n\n\ndef convert_time(minutes):\n return time.strftime('%H:%M:%S', time.gmtime(minutes))\n\n\ndef splitVideo(file_name, length_video, time_need):\n stt_from = 0\n stt = 1\n is_break = False\n\n while True:\n from_time = convert_time(stt_from)\n to_time = convert_time(time_need)\n\n if stt_from + time_need >= length_video:\n to_time = convert_time(time_need + length_video - stt_from)\n is_break = True\n\n string = 'ffmpeg -noaccurate_seek -ss ' + from_time + ' -i ' + file_name + ' -to ' + to_time + ' -c copy input/input' + str(stt) + '.mp4'\n print(string)\n os.system(string)\n\n if is_break:\n return stt\n\n stt_from = stt_from + time_need + 1\n stt = stt + 1\n\n\ndef get_data_file(file_name):\n path_file = file_name\n fo = open(path_file, \"r\")\n lines = fo.readlines()\n fo.close()\n stt_video = ''\n\n if len(lines) > 0:\n stt_video = lines[0]\n\n return stt_video\n\n\ndef getLengthVideo(input_video):\n string = 'ffprobe -i ' + input_video + ' -show_entries format=duration -v quiet -of csv=\"p=0\"'\n\n result = subprocess.getoutput(string)\n\n return round(float(result), 0)\n\n\ndef getSourceVideo(id):\n access_token = get_data_file(\"access_token.txt\")\n url = \"https://graph.facebook.com/v3.3/\" + str(id) + \"?fields=source&access_token=\" + access_token\n req = requests.get(url)\n\n datas = json.loads(req.content)\n return datas['source']\n\n\ndef getSourceVideoByPage(id):\n access_token = get_data_file(\"access_token.txt\")\n url = \"https://graph.facebook.com/v3.3/\" + str(id) + \"/videos?fields=source&access_token=\" + access_token\n req = requests.get(url)\n\n datas = json.loads(req.content)\n\n return datas['data']\n\n\n\ndef rename_ts():\n for i in range(3, 4):\n pwd = os.getcwd() + '/input'\n\n filelist = os.listdir(pwd)\n list_file = []\n\n for ficher in filelist[:]:\n if (ficher.endswith('.ts')):\n list_file.append(ficher)\n\n stt = 1\n for file in list_file:\n item = pwd + '/' + file\n os.rename(item, pwd + '/' + str(stt) + '.ts')\n stt += 1\n\n\ndef convert_ts():\n for i in range(3, 4):\n pwd = os.getcwd() + '/input'\n\n filelist = os.listdir(pwd)\n list_file_delete_1 = []\n for fichier in filelist[:]:\n if (fichier.endswith('.mp4')):\n list_file_delete_1.append(fichier)\n\n stt = randrange(0, 1000, 2)\n\n for file in list_file_delete_1:\n item = pwd + '/' + file\n\n string = \"ffmpeg -i \" + item + \" -c copy -bsf:v h264_mp4toannexb -f mpegts input/\" + str(stt) + \".ts\"\n\n os.system(string)\n stt = randrange(0, 1000, 2)\n\n\nif __name__ == '__main__':\n # ffprobe -i ' + input_video + ' -show_entries format=duration -v quiet -of csv=\"p=0\"\n\n option = str(input(\"Find by id(0) - page(1) ? \"))\n\n if option == \"1\":\n i = 0\n id = str(input(\"Id page: \"))\n sources = getSourceVideoByPage(id)\n\n if len(sources) > 0:\n print(\"Downloading video...\")\n\n for i in range(len(sources)):\n source = sources[i][\"source\"]\n file_name = get_video(source, 'input/input' + str(i + 1) + '.mp4')\n\n else:\n id = str(input(\"Id: \"))\n source = getSourceVideo(id)\n\n print(\"Downloading video...\")\n file_name = get_video(source, 'input.mp4')\n file_name = 'input.mp4'\n length_video = getLengthVideo(file_name)\n\n splitVideo(file_name, length_video, 400)\n\n convert_ts()\n os.system(\"rm -rf input.mp4\")\n os.system(\"rm -rf input/*.mp4\")\n\n option = str(input(\"Rename ? \"))\n\n if option == \"1\":\n rename_ts()\n","sub_path":"download-video-from-fb/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4126,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"521911312","text":"from django.conf.urls.defaults import patterns, include, url\n\n# Uncomment the next two lines to enable the admin:\nfrom django.contrib import admin\n\nadmin.autodiscover()\n\nurlpatterns = patterns('',\n # Uncomment the admin/doc line below to enable admin documentation:\n # url(r'^admin/doc/', include('django.contrib.admindocs.urls')),\n\n # Site modules\n url(r'^', include('home.urls')),\n url(r'^japanese', include('japanese.urls')),\n url(r'^codekata', include('codekata.urls')),\n url(r'^photos', include('photos.urls')),\n url(r'^sketches', include('sketches.urls')),\n url(r'^reading', include('reading.urls')),\n\n # Uncomment the next line to enable the admin:\n # when changing the base context path for admin be sure to modify app_context_processors.py\n url(r'^manage/', include(admin.site.urls)),\n)\n\nhandler404 = \"views.handle404\"\nhandler500 = \"views.handle500\"\nhandler403 = \"views.handle403\"\n","sub_path":"urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":926,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"300194131","text":"# -*- coding: utf-8 -*-\r\nimport numpy as np\r\nfrom dataset.mnist import load_mnist\r\nfrom two_layers_net_with_backpropagation import TwoLayerNet\r\nimport matplotlib.pyplot as plt\r\n\r\n# loading dataset\r\n(x_train, t_train), (x_test, t_test) = load_mnist(normalize=True, one_hot_label=True)\r\n\r\ntrain_loss_list = []\r\ntrain_acc_list = []\r\ntest_acc_list = []\r\n\r\n# hyper-parameters\r\niters_num = 10000\r\ntrain_size = x_train.shape[0]\r\nbatch_size = 100\r\nlearning_rate = 0.1\r\niter_per_epoch = max(train_size / batch_size, 1)\r\n\r\n# create network\r\nnetwork = TwoLayerNet(input_size=784, hidden_size=50, output_size=10)\r\n\r\n# training\r\nfor i in range(iters_num):\r\n \r\n # get mini-batch\r\n batch_mask = np.random.choice(train_size, batch_size)\r\n x_batch = x_train[batch_mask]\r\n t_batch = t_train[batch_mask]\r\n \r\n # calculate gradient with backpropagation\r\n grad = network.gradient(x_batch, t_batch)\r\n \r\n # updating gradient\r\n for key in (\"W1\", \"b1\", \"W2\", \"b2\"):\r\n network.params[key] -= learning_rate * grad[key]\r\n \r\n # recording training results\r\n loss = network.loss(x_batch, t_batch)\r\n train_loss_list.append(loss)\r\n \r\n # check accuracy by 1 epoch\r\n if i % iter_per_epoch == 0:\r\n train_acc = network.accuracy(x_train, t_train)\r\n test_acc = network.accuracy(x_test, t_test)\r\n train_acc_list.append(train_acc)\r\n test_acc_list.append(test_acc)\r\n print(\"step : \", i)\r\n print(\"train acc : \", train_acc)\r\n print(\"test acc : \", test_acc)\r\n print()\r\n \r\n# visualizing training results by step\r\nfig, axes = plt.subplots(1, 3, figsize=(20, 5))\r\n\r\n# train loss graph\r\nx1 = np.arange(0, iters_num)\r\ny1 = train_loss_list\r\naxes[0].plot(x1, y1)\r\naxes[0].set_title(\"train_loss\")\r\n\r\n# train accuracy graph\r\nx2 = np.arange(0, len(train_acc_list))\r\ny2 = train_acc_list\r\naxes[1].plot(x2, y2)\r\naxes[1].set_title(\"train_accuracy\")\r\n\r\n# test accuracy graph\r\nx3 = np.arange(0, len(test_acc_list))\r\ny3 = test_acc_list\r\naxes[2].plot(x3, y3)\r\naxes[2].set_title(\"test_accuracy\")\r\n","sub_path":"01. Tensorflow & Deep Learning Basics/0. Deep Learning From Scratch/neural_net_implementation/train_neural_net_with_backpropagation.py","file_name":"train_neural_net_with_backpropagation.py","file_ext":"py","file_size_in_byte":2052,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"117520333","text":"import requests\nimport database\nimport pandas as pd\nfrom tqdm import tqdm\nfrom sqlalchemy.orm import sessionmaker\n\n\ndef extract():\n ''' Extract data from API '''\n req = requests.get('http://dataeng.quero.com:5000/caged-data')\n req.raise_for_status()\n try:\n return req.json()['caged']\n except KeyError:\n raise KeyError('Conteúdo do retorno da API desconhecido')\n\n\ndef transform(json_data):\n ''' Data transformation stage '''\n transformed_data = pd.DataFrame(json_data)\n float_columns = ['salario', 'saldo_movimentacao']\n for column in float_columns:\n transformed_data[column] = pd.to_numeric(\n transformed_data[column].str.replace(',', ''))\n return transformed_data.to_dict(orient='records')\n\n\ndef load(data):\n ''' Insert or update data in the target database '''\n db_engine = database.initialize()\n db_session = sessionmaker(bind=db_engine)()\n generator_data = (database.models.Caged(**caged) for caged in data)\n for caged in tqdm(generator_data, total=len(data)):\n db_session.merge(caged)\n db_session.commit()\n\n\ndef run():\n try:\n print('[+] Extraindo dados')\n data = extract()\n except Exception as err:\n raise Exception('[-] Não foi possível acessar a API: ' + str(err))\n try:\n print('[+] Tratando dados coletados')\n data = transform(data)\n except Exception as err:\n raise Exception('[-] Não foi possível tratar os dados: ' + str(err))\n try:\n print('[+] Gravando os dados tratados')\n load(data)\n except Exception as err:\n raise Exception('[-] Não foi possível gravar no banco de dados: ' + str(err))\n\n\nif __name__ == '__main__':\n try:\n run()\n print('[+] Processo finalizado com sucesso!')\n except Exception as err:\n print(err)\n","sub_path":"app/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"150407802","text":"import tensorflow as tf\r\n\r\nfrom models import base_model\r\nfrom models.actor_critic.base_actor_critic import BaseActorCritic\r\nfrom modelHelpers import tensorflow_reward_manager\r\nimport numpy as np\r\n\r\n\r\nclass PolicyGradient(BaseActorCritic):\r\n max_gradient = 1.0\r\n total_loss_divider = 1.0\r\n\r\n def __init__(self, session,\r\n state_dim,\r\n num_actions,\r\n player_index=-1,\r\n action_handler=None,\r\n is_training=False,\r\n optimizer=tf.train.GradientDescentOptimizer(learning_rate=0.01),\r\n summary_writer=None,\r\n summary_every=100,\r\n config_file=None,\r\n discount_factor=0.99, # discount future rewards\r\n ):\r\n self.reward_manager = tensorflow_reward_manager.TensorflowRewardManager(state_dim)\r\n\r\n super().__init__(session, state_dim, num_actions, player_index, action_handler, is_training, optimizer,\r\n summary_writer, summary_every, config_file, discount_factor)\r\n\r\n def printParameters(self):\r\n super().printParameters()\r\n print('policy gradient parameters:')\r\n print('max gradient allowed:', self.max_gradient)\r\n print('amount to squash total loss:', self.total_loss_divider)\r\n\r\n def load_config_file(self):\r\n super().load_config_file()\r\n try:\r\n self.max_gradient = self.config_file.getint(base_model.MODEL_CONFIGURATION_HEADER,\r\n 'max_gradient')\r\n except:\r\n print('unable to load max_gradient')\r\n try:\r\n self.max_gradient = self.config_file.getint(base_model.MODEL_CONFIGURATION_HEADER,\r\n 'total_loss_divider')\r\n except:\r\n print('unable to load total_loss_divider')\r\n\r\n def get_input(self, model_input=None):\r\n if model_input is None:\r\n return super().get_input(self.input)\r\n else:\r\n return super().get_input(model_input)\r\n\r\n def create_training_op(self, logprobs, taken_actions):\r\n critic_gradients, critic_loss, critic_reg_loss = self.create_critic_gadients()\r\n actor_gradients, actor_loss, actor_reg_loss = self.create_actor_gradients(logprobs, taken_actions)\r\n\r\n tf.summary.scalar(\"total_reg_loss\", critic_reg_loss + actor_reg_loss)\r\n\r\n return self._compute_training_op(actor_gradients, critic_gradients)\r\n\r\n def create_advantages(self):\r\n # compute advantages A(s) = R - V(s)\r\n return tf.reduce_sum(self.discounted_rewards - self.estimated_values, name='advantages')\r\n\r\n def create_actor_gradients(self, logprobs, taken_actions):\r\n advantages = self.create_advantages()\r\n\r\n actor_reg_loss = self.get_regularization_loss(self.all_but_last_actor_layer, prefix=\"actor_hidden\")\r\n indexes = np.arange(0, len(self.action_handler.get_action_sizes()), 1).tolist()\r\n\r\n result = self.action_handler.run_func_on_split_tensors([indexes,\r\n logprobs,\r\n taken_actions,\r\n advantages,\r\n self.last_row_variables],\r\n self.create_split_actor_loss,\r\n return_as_list=True)\r\n\r\n merged_gradient_list = []\r\n total_loss = 0\r\n for item in result:\r\n merged_gradient_list += item[0]\r\n total_loss += item[1]\r\n\r\n tf.summary.scalar(\"total_actor_loss\", tf.reduce_mean(total_loss))\r\n\r\n total_loss = total_loss / self.total_loss_divider\r\n\r\n total_loss += actor_reg_loss\r\n\r\n # total_loss = tf.Print(total_loss, [total_loss], 'total_loss')\r\n\r\n total_loss = tf.identity(total_loss, 'total_actor_loss_with_reg')\r\n\r\n all_but_last_row = self.all_but_last_actor_layer\r\n\r\n total_loss = tf.check_numerics(total_loss, 'nan loss is being created')\r\n # total_loss = tf.Print(total_loss, [total_loss], 'total_loss')\r\n\r\n actor_gradients = self.optimizer.compute_gradients(total_loss,\r\n all_but_last_row)\r\n\r\n merged_gradient_list += actor_gradients\r\n\r\n return merged_gradient_list, total_loss, actor_reg_loss\r\n\r\n def create_split_actor_loss(self, index, logprobs, taken_actions, advantages, actor_network_variables):\r\n if len(taken_actions.get_shape()) == 2:\r\n taken_actions = tf.squeeze(taken_actions, axis=[1])\r\n\r\n # calculates the entropy loss from getting the label wrong\r\n cross_entropy_loss, wrongNess, reduced = self.calculate_loss_of_actor(logprobs, taken_actions, index)\r\n if not reduced:\r\n tf.summary.histogram('actor_wrongness', wrongNess)\r\n with tf.name_scope(\"compute_pg_gradients\"):\r\n pg_loss = cross_entropy_loss * (wrongNess * wrongNess)\r\n\r\n pg_loss = tf.check_numerics(pg_loss, 'nan pg_loss')\r\n\r\n if reduced:\r\n pg_loss = tf.reduce_mean(pg_loss, name='pg_loss')\r\n tf.summary.scalar(\"actor_x_entropy_loss\", cross_entropy_loss)\r\n else:\r\n tf.summary.scalar(\"actor_x_entropy_loss\", tf.reduce_mean(cross_entropy_loss))\r\n\r\n actor_reg_loss = self.get_regularization_loss(actor_network_variables, prefix=\"actor\")\r\n\r\n actor_loss = pg_loss + actor_reg_loss * self.reg_param\r\n\r\n # compute actor gradients\r\n actor_gradients = self.optimizer.compute_gradients(actor_loss,\r\n [actor_network_variables])\r\n\r\n # compute policy gradients\r\n for i, (grad, var) in enumerate(actor_gradients):\r\n if grad is not None:\r\n actor_gradients[i] = (grad * advantages, var)\r\n\r\n if reduced:\r\n tf.summary.scalar(\"actor_loss\", actor_loss)\r\n else:\r\n tf.summary.scalar(\"actor_loss\", tf.reduce_mean(actor_loss))\r\n return [actor_gradients, actor_loss]\r\n\r\n def create_critic_gadients(self):\r\n critic_reg_loss = tf.reduce_sum([tf.reduce_sum(tf.square(x)) for x in self.critic_network_variables],\r\n name='critic_reg_loss')\r\n\r\n tf.summary.scalar(\"critic_reg_loss\", critic_reg_loss)\r\n\r\n # compute critic gradients\r\n mean_square_loss = tf.reduce_mean(tf.square(self.discounted_rewards - self.estimated_values), name='mean_square_loss')\r\n\r\n critic_loss = mean_square_loss + self.reg_param * critic_reg_loss\r\n tf.summary.scalar(\"critic_loss\", critic_loss)\r\n critic_gradients = self.optimizer.compute_gradients(critic_loss, self.critic_network_variables)\r\n return (critic_gradients, critic_loss, critic_reg_loss)\r\n\r\n def add_histograms(self, gradients):\r\n # summarize gradients\r\n for grad, var in gradients:\r\n tf.summary.histogram(var.name, var)\r\n if grad is not None:\r\n tf.summary.histogram(var.name + '/gradients', grad)\r\n\r\n # emit summaries\r\n tf.summary.histogram(\"estimated_values\", self.estimated_values)\r\n\r\n def _compute_training_op(self, actor_gradients, critic_gradients):\r\n # collect all gradients\r\n gradients = actor_gradients + critic_gradients\r\n\r\n # clip gradients\r\n for i, (grad, var) in enumerate(gradients):\r\n # clip gradients by norm\r\n if grad is not None:\r\n gradients[i] = (tf.clip_by_norm(grad, self.max_gradient), var)\r\n\r\n self.add_histograms(gradients)\r\n # training update\r\n with tf.name_scope(\"train_actor_critic\"):\r\n # apply gradients to update actor network\r\n return self.optimizer.apply_gradients(gradients)\r\n\r\n def create_reward(self):\r\n return None\r\n\r\n def discount_rewards(self, input_rewards, input):\r\n return self.reward_manager.create_reward_graph(input)\r\n\r\n #def parse_actions(self, taken_actions):\r\n # return tf.cast(self.action_handler.create_indexes_graph(taken_actions), tf.int32)\r\n\r\n def run_train_step(self, calculate_summaries, input_states, actions, rewards=None):\r\n # perform one update of training\r\n if self.batch_size > self.mini_batch_size:\r\n self.sess.run([self.input, self.taken_actions, self.iterator.initializer],\r\n feed_dict={self.input_placeholder: input_states, self.taken_actions_placeholder: actions})\r\n\r\n counter = 0\r\n while True:\r\n try:\r\n result, summary_str = self.sess.run([\r\n self.train_op,\r\n self.summarize if calculate_summaries else self.no_op\r\n ])\r\n # emit summaries\r\n if calculate_summaries:\r\n self.summary_writer.add_summary(summary_str, self.train_iteration)\r\n self.train_iteration += 1\r\n counter += 1\r\n except tf.errors.OutOfRangeError:\r\n #print(\"End of training dataset.\")\r\n break\r\n print('batch amount:', counter)\r\n else:\r\n result, summary_str = self.sess.run([\r\n self.train_op,\r\n self.summarize if calculate_summaries else self.no_op\r\n ],\r\n feed_dict={\r\n self.input_placeholder: input_states,\r\n self.taken_actions_placeholder: actions\r\n })\r\n # emit summaries\r\n if calculate_summaries:\r\n self.summary_writer.add_summary(summary_str, self.train_iteration,\r\n )\r\n self.train_iteration += 1\r\n\r\n return None, None\r\n\r\n\r\n def get_model_name(self):\r\n return 'a_c_policy_gradient' + ('_split' if self.action_handler.is_split_mode else '') + str(self.num_layers) + '-layers'\r\n\r\n def calculate_loss_of_actor(self, logprobs, taken_actions, index):\r\n \"\"\"\r\n Calculates the loss of th\r\n :param cross_entropy_loss:\r\n :return: The calculated_tensor, If the result is a scalar.\r\n \"\"\"\r\n return tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logprobs,\r\n labels=taken_actions), 1.0, True\r\n\r\n","sub_path":"models/actor_critic/policy_gradient.py","file_name":"policy_gradient.py","file_ext":"py","file_size_in_byte":10724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"189599852","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Feb 1 20:00:44 2016\n\nFunctions in Python\n\n@author: Kirby Urner\n\"\"\"\n\ndef section(title):\n \"\"\"center in a string 60 wide padded with =\"\"\"\n return \"\\n{:=^60}\\n.\".format(title)\n\nprint(section(\"<<<< local, nonlocal, global variables >>>>\"))\n\n# globals\nSTAR = \"Sirius\" # Polaris\nFavorites = [ ]\nX = 100\n\ndef setStar(name):\n global STAR\n STAR = name\n Favorites.append(name)\n\nprint(\"STAR: \", STAR)\nsetStar(\"Polaris\")\nprint(\"STAR: \", STAR)\n\ndef outerF():\n X = 10\n print(\"Inside outerF:\", X)\n def innerF():\n nonlocal X # global or nonlocal?\n print(\"Inside InnerF, X:\", X) # expect 10\n X = \"Infinity\"\n print(\"Inside InnerF, X:\", X)\n innerF()\n return X\n\nprint(\"Returned from outerF: \", outerF())\nprint(\" Global X: \", X)\n\nprint(section(\"<<<< Scattering and Gathering with * and ** >>>>\"))\n\ndef eats(*foods): # gather positional args in a tuple \n print(\"foods: \", foods) # foods is a tuple now\n\nprint(\"Tuple of positional arguments to eats():\")\n\n# open-ended number of positional arguments passed in...\neats(\"Spaghetti\", \"Oysters\", \"Chili\", \"Crackers\", \"Rice\")\n \ndef pretty(*names):\n for name in names: # looping over the tuple\n print(\"Name: {:>20}\".format(name))\n\nprint(\"Some US presidential candidates:\")\nprint(pretty(\"Bernie Sanders\", \"Donald Trump\", \"Hillary Clinton\", \n \"Ted Cruz\"))\n \ndef example(*args, **kwargs): # gather keyword args in a dict\n \"\"\"\n (* ) convert positionals --> tuple\n (**) convert keyword args --> dict\n \"\"\"\n for arg in args: # loop over the tuple\n print(arg, sep=\", \", end=\"\")\n print()\n for key, value in kwargs.items(): # ...now the dict\n print(\"Arg name:\",key,\"Value: \", value)\n\n# positional + keyword (named) arguments\nexample( 1,2,3,4, on_vacation=True, at_work=False )\n\n# same thing using \"exploders\" * and **\nexample( *(1,2,3,4), **dict(on_vacation=False, at_work=True) )\n\nprint(section(\"<<<< GNU Math section >>>>\"))\n\n# note: the int( ) type *is* \"base aware\"\nprint(\"======= int as multi-base ========\")\nprint(\"Convert from Base 2:\", int(\"1001010\",2))\nprint(\"Convert from Base 16:\", int(\"AFF2\", 16))\n\nprint(\"======= Top-Level Functions ========\")\n#=== functions that return and/or eat functions...\ndef addLetter(letters): # <-- pass in a string\n \"\"\"\n A function factory that builds and returns \n function objects. L is a function that will\n add whatever letters are passed in to be the\n ending letters.\n \"\"\"\n def L(s):\n return s + letters # <--- concatenation!\n return L\n \nadd_s = addLetter(\"s\")\nadd_ed = addLetter(\"ed\")\n\nprint(\"add_s('cat')\", add_s('cat'))\nprint(\"add_ed('show')\", add_ed('show'))\n\ndef compose(g, f):\n \"\"\"Take two functions as inputs and return a\n function that's their composition\"\"\"\n def newfunc(x):\n return g(f(x))\n return newfunc\n\n# input function\ndef G(n):\n return n + 2\n\n# input function\ndef F(n):\n return n * 2\n\n# compare:\nH = compose(G, F) # build a 3rd function from 1 & 2\nprint(\"G(F(x)):\", H(100)) # G(F(x))\n\n# ... now with \nH = compose(F, G)\nprint(\"F(G(x)):\", H(100)) # F(G(x))\n\n\nprint(\"======= Totient / Totatives ========\")\n# ====== GNU Math section ========\n#\n# (a pun on New Math ala Tom Lehrer Youtube\n# see: make_links_v2.py)\n\ndef gcd(a, b):\n \"\"\"\n Euclid's Method for finding the GCD\n \"\"\"\n while b: # <--- while loop!\n a, b = b, a%b\n return a\n\ndef totatives(N):\n # list comprehension!\n return [x for x in range(1,N) if gcd(x,N)==1]\n \ndef totient(N):\n \"\"\"\n Returns the number of numbers between (1, N) that \n have no factors in common with N: called the 'totient\n of N -- called the totient of N.\n \"\"\"\n return len(totatives(N))\n\nprint(\"Totient of 100:\", totient(100))\nprint(\"Totient of 1000:\", totient(1000))\n\n\n\n\n\n\n","sub_path":"functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":3876,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"381267388","text":"import cv2, os\n\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtWidgets import *\nimport numpy as np\n\n\nimport utils, time\nfrom base_viewer import BaseViewer\nfrom main_function import main_function\nfrom enum import Enum\n\n\nclass PhotoViewer(BaseViewer):\n\n def __init__(self, parent, window):\n super().__init__(parent, window)\n self.mask = None\n\n\n #def enterEvent(self, event):\n #cursor_coord_x, cursor_coord_y = self.widget_to_img_pos(event.pos().x(), event.pos().y())\n #start_point = (cursor_coord_x, cursor_coord_y)\n #print(start_point)\n\n\n def mousePressEvent(self, event):\n super().mousePressEvent(event)\n if event.buttons() == Qt.LeftButton:\n if self.image is not None:\n current_color = self.colors[self.window.selection_criterion.currentText()].value\n cursor_coord_x, cursor_coord_y = self.widget_to_img_pos(event.pos().x(), event.pos().y())\n target_pixel = self.image_slic[cursor_coord_y][cursor_coord_x]\n if target_pixel == 0:\n current_image_slic = np.where(self.image_slic == target_pixel, self.image_slic, 777)\n current_image_slic = np.where(current_image_slic != target_pixel, current_image_slic, 1)\n current_image_slic = np.where(current_image_slic == 1, current_image_slic, 0)\n else:\n current_image_slic = np.where(self.image_slic == target_pixel, self.image_slic, 0) // target_pixel\n temp_trimap = self.image_trimap[cursor_coord_y][cursor_coord_x]\n current_image_slic_3 = np.zeros(self.image.shape, dtype=np.uint8)\n\n if temp_trimap[0] != 255 or temp_trimap[1] != 255 or temp_trimap[2] != 255:\n self.image_trimap = self.image_trimap.astype(np.int32)\n for i in range(3):\n self.image_trimap[:, :, i] = self.image_trimap[:, :, i] + current_image_slic * 255\n self.image_trimap[:, :, i] = np.where(self.image_trimap[:, :, i] < 255,\n self.image_trimap[:, :, i], 255)\n self.image_trimap = self.image_trimap.astype(np.uint8)\n\n for i in range(3):\n current_image_slic_3[:, :, i] = current_image_slic\n self.image_trimap[:, :, i] = self.image_trimap[:, :, i] + current_image_slic\n\n color_mask = (current_color * current_image_slic_3).astype(np.uint8)\n self.image_trimap = self.image_trimap + color_mask\n trimap_for_stack = self.image_trimap.copy()\n self.stack.append(trimap_for_stack)\n if len(self.stack) > 10:\n self.stack = self.stack[1:]\n self.updatePhoto(self.image_orig)\n self.update()\n\n #============================================================================================\n\n #image_bytes = self.image.tobytes()\n #shape0, shape1, shape2 = self.image.shape\n #result_image_bytes = main_function(image_bytes, cursor_coord_x, cursor_coord_y, wand, antialiasing, edges, threshold, mode, criterion, shape0, shape1, shape2)\n #self.image = np.frombuffer(result_image_bytes, dtype = np.uint8).reshape(self.image.shape)\n # ============================================================================================\n try:\n\n self.start_photo.updatePhoto(self.image)\n self.start_photo.update()\n print('image ready!')\n except AttributeError:\n pass\n\n\n\n","sub_path":"src_progect/photo_viewer.py","file_name":"photo_viewer.py","file_ext":"py","file_size_in_byte":3767,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"627074429","text":"import numpy as np\nimport cv2\n\n# if you've come across this randomly on the internet\n# DO NOT USE THIS\n# DO NOT COPY AND PASTE THIS\n# AND THERE IS NO ERROR DETECTION. NOTHING.\n# IT'S NOT GOOD CODE.\n\n\nclass FaceDetection(object):\n # was having issues until i specified full path to these files.\n # could be a strange local opencv issue.\n face_cascade = cv2.CascadeClassifier(\n '/usr/local/share/OpenCV/haarcascades/haarcascade_frontalface_default.xml')\n eye_cascade = cv2.CascadeClassifier(\n '/usr/local/share/OpenCV/haarcascades/haarcascade_eye.xml')\n\n def __init__(self):\n pass\n\n def find_cascade(self, frame, cascade_classifier):\n frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n # Python: cv.HaarDetectObjects(image, cascade, storage, scale_factor=1.1, min_neighbors=3, flags=0, min_size=(0, 0)) detectedObjects\n # should probably add this to each cascade definition above.\n frame_cascades_found = cascade_classifier.detectMultiScale(\n frame_gray, 1.3, 5)\n return frame_cascades_found\n\n def rectangle_cascade(self, frame, cascade_finds, rectangle_color):\n for (x, y, w, h) in cascade_finds:\n cv2.rectangle(frame, (x, y), (x + w, y + h), rectangle_color, 2)\n return frame\n\n def find_and_rectangle_all_cascades(self, frames, cascade_classifier, rectangle_color):\n rectangle_color = tuple(reversed(rectangle_color))\n for frame_id in frames:\n faces = self.find_cascade(frames[frame_id], cascade_classifier)\n self.rectangle_cascade(frames[frame_id], faces, rectangle_color)\n return frames\n\n\nclass VideoCapture(object):\n _cap_list = {}\n _frames = {}\n\n def __init__(self, cam_list_cap):\n cam_count = 0\n for cam_info in cam_list_cap:\n cap = cv2.VideoCapture(cam_info['cam_id'])\n self._cap_list[cam_info['cam_id']] = {'cam_count': cam_count, 'cam_id': cam_info['cam_id'],\n 'cap': cap, 'cam_name': cam_info['cam_name']\n }\n cam_count += 1\n\n def cap_frame(self, cap):\n (ret, frame) = cap.read()\n return frame\n\n def grab_all_cam_frames(self):\n for cam_id in self._cap_list:\n cap = self._cap_list[cam_id]['cap']\n self._frames[cam_id] = self.cap_frame(cap)\n\n def release(self, cap):\n cap.release()\n\n def release_all(self):\n for cam_id in self._cap_list:\n cap = self._cap_list[cam_id]['cap']\n self.release(cap)\n\n\n# rprogram starts now.\nvco = VideoCapture([{'cam_id': 0, 'cam_name': 'left'},\n {'cam_id': 1, 'cam_name': 'right'}])\n\nfdo = FaceDetection()\n\n\nwhile True:\n vco.grab_all_cam_frames()\n vco._frames = fdo.find_and_rectangle_all_cascades(\n vco._frames, fdo.face_cascade, (0, 0, 255))\n\n for cam_id in vco._cap_list:\n cv2.imshow(vco._cap_list[cam_id]['cam_name'], vco._frames[cam_id])\n cv2.imshow(vco._cap_list[cam_id]['cam_name'], vco._frames[cam_id])\n\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n# release all the caps when we're done\nvco.release_all()\ncv2.destroyAllWindows()\n","sub_path":"face_finder.py","file_name":"face_finder.py","file_ext":"py","file_size_in_byte":3249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"654009370","text":"import opensim as osim\nfrom osim.http.client import Client\nfrom osim.env import *\nimport numpy as np\nimport argparse\n\nimport tensorflow as tf\n\nfrom baselines.common import set_global_seeds, tf_util as U\nfrom baselines import bench, logger\n\nfrom baselines.pposgd import mlp_policy, pposgd_simple\n\nimport os.path as osp\nimport gym, logging\nfrom gym import utils\n\nfrom baselines import logger\nimport sys\n\n\n# Settings\nremote_base = 'http://grader.crowdai.org:1729'\n\n# Command line parameters\nparser = argparse.ArgumentParser(description='Submit the result to crowdAI')\nparser.add_argument('--token', dest='token', action='store', required=True)\nparser.add_argument('--logdir', type=str, default='saves')\nparser.add_argument('--agentName', type=str, default='PPO-Agent')\nparser.add_argument('--hid_size', type=int, default=64)\nparser.add_argument('--num_hid_layers', type=int, default=2)\nparser.add_argument('--resume', type=int, default=1197)\nargs = parser.parse_args()\n\nsess = U.single_threaded_session()\nsess.__enter__()\n\ndef policy_fn(name, ob_space, ac_space):\n return mlp_policy.MlpPolicy(name=name, ob_space=ob_space, ac_space=ac_space,\n hid_size=args.hid_size, num_hid_layers=args.num_hid_layers)\n\n\nenv = RunEnv(visualize=False)\nob_space = env.observation_space\nac_space = env.action_space\npi = policy_fn(\"pi\", ob_space, ac_space) # Construct network for the trained policy\n\nob = U.get_placeholder_cached(name=\"ob\")\nac = pi.pdtype.sample_placeholder([None])\n\nsaver = tf.train.Saver()\nif args.resume > 0:\n saver.restore(tf.get_default_session(), os.path.join(os.path.abspath(args.logdir), \"{}-{}\".format(args.agentName, args.resume)))\nelse:\n print(\"No weights to load!\")\n\nclient = Client(remote_base)\n\n# Create environment\nobservation = client.env_create(args.token)\n\n# Run a single step\n#\n# The grader runs 3 simulations of at most 1000 steps each. We stop after the last one\nwhile True:\n v = np.array(observation).reshape((env.observation_space.shape[0]))\n action, vpred = pi.act(False, v)\n [observation, reward, done, info] = client.env_step(action.tolist())\n print(observation)\n if done:\n observation = client.env_reset()\n if not observation:\n break\n\nclient.submit()\n","sub_path":"baselines/ppo1/submit_ppo.py","file_name":"submit_ppo.py","file_ext":"py","file_size_in_byte":2229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"168946849","text":"#CH2 An array of sequences\n\n# 리스트, 문자열, 바이트 자료형 등 주로 시퀀스형\n# 리스트, 튜플, 배열, 큐 외로 뉴이코드 문자열은 4장에서\n# 표준 라이브러리에서 제공하는 시퀀스를 사용, 시퀀스형 구현은 10장\n\n\n# 문자열, 리스트, 바이트 시퀀스, 배열, XML 요소, 데이터베이스 결과에는 모두\n# 반복, 슬라이싱, 정렬, 연결 등 공통된 연산을 적용할 수 있다.\n\n\n# 2.1 내장 시퀀스\n# container sequence vs flat sequence\n# 가변 시퀀스 vs 불변 시퀀스\n\nlist, tuple, collections.deque\nstr, bytes, bytearray, memoryview, array.array\n\n\n\n# 2.2 지능형 리스트(List Comprehension)와 제너레이터 표현식(Generator Expression)\n# 지능형 리스트(리스트형인 경우) listcomp\n# 제너레이터 표현식(그 외 시퀀스의 경우) genexp\n\n# 지능형 리스트는 항목을 필터링 및 변환함으로써 시퀀스나 기타 반복 가능한 자료형으로부터 리스트를 만든다.\n\n# 지능형 리스트는 리스트만 만든다.\n# 다른 종류의 시퀀스를 채우려면 제너레이터 표현식을 사용해야 한다.\n\n\n# 2.2.1 지능형 리스트와 가독성\n\n# Example 2-1. Build a list of Unicode codepoints from a string. (Version1)\nsymbols = '$¢£¥€¤'\ncodes = []\nfor symbol in symbols:\n codes.append(ord(symbol))\n\ncodes\nord('$') # 36\nord('€') # 8364\n\n# Example 2-2. Build a list of Unicode codepoints from a string. (Version2)\nsymbols = '$¢£¥€¤'\ncodes = [ord(symbol) for symbol in symbols]\n\ncodes\n\n\n\n# 다른 예제 1 - listcomp\nnumbers = [1, 2, 3, 4, 5]\n\nsquares = [n * n for n in numbers]\nsquares = [n * n for n in numbers if n % 2 == 0]\nmatrix = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]\nsquared = [[n * n for n in row] for row in matrix]\n\n# 다른 예제 2 - listcomp? no\n# 지능형 리스트 구문이 두 줄 이상 넘어가는 경우에는\n# 코드를 분할하거나 for 문을 이용\nmy_lists = [\n [[1, 2, 3], [4, 5, 6]],\n [[7, 8, 9], [10, 11, 12]],\n]\n# 파이썬에서는 [], {}, () 안에서의 개행이 무시. 역슬래쉬(\\)가 없어도 됨\n\nflat = [x for sublist1 in my_lists\n for sublist2 in sublist1\n for x in sublist2]\n\nflat = []\nfor sublist1 in my_lists:\n for sublist2 in sublist1:\n flat.extend(sublist2)\n\n\n# 파이썬 2.x의 메모리 누수 문제��� 있었다\n# list comprehension, generator expression, set comprehension, dict comprehension\n# 은, 함수처럼 고유한 지역 범위를 가진다.\n# 표현식 안에서 할당된 변수는 지역변수지만, 주변 범위의 변수를 여전히 참조할 수 있다\nx = 'ABC'\ndummy = [ord(x) for x in x]\nx\ndummy\n# x의 값이 유지 / 지능형 리스트가 기대했던 리스트를 만듬\n\n\n\n\n# 2.2.2 지능형 리스트와 map()/filter() 비교\n\n\n# map() filter() 를 이용해서 수행할 수 있는 작업은 lambda를 끼워 넣지 않고도\n# 지능형 리스트로 모두 구현할 수 있다\n\n# Example 2-3. The same list built by a listcomp and a map/filter composition.\nsymbols = '$¢£¥€¤'\nbeyond_ascii = [ord(s) for s in symbols if ord(s) > 127]\nbeyond_ascii\n\nbeyond_ascii = list(filter(lambda c: c > 127, map(ord, symbols)))\nbeyond_ascii\n# map(), filter()를 이용한 방법이 listcomp 보다 더 빠르다 (X)\n\n\n# listcomp_speed.py / 지능형 리스트와 map()/filter() 조합의 속도 비교\nimport timeit\n\nTIMES = 10000\n\nSETUP = \"\"\"\nsymbols = '$¢£¥€¤'\ndef non_ascii(c):\n return c > 127\n\"\"\"\n\ndef clock(label, cmd):\n res = timeit.repeat(cmd, setup=SETUP, number=TIMES)\n print(label, *('{:.3f}'.format(x) for x in res))\n# timeit.repeat?\n\nclock('listcomp :', '[ord(s) for s in symbols if ord(s) > 127]')\nclock('listcomp + func :', '[ord(s) for s in symbols if non_ascii(ord(s))]')\nclock('filter + lambda :', 'list(filter(lambda c: c > 127, map(ord, symbols)))')\nclock('filter + func :', 'list(filter(non_ascii, map(ord, symbols)))')\n\n\n\n\n\n# 2.2.3 Cartesian products 데카르트 곱\n\n# Example 2-4. Cartesian product using a list comprehension.\n# 두 가지 색상과 세 가지 크기의 티셔츠 리스트를 만드는 경우\n\ncolors = ['black', 'white']\nsizes = ['S', 'M', 'L']\ntshirts = [(color, size) for color in colors for size in sizes]\n# color 다음에 size를 배치해서 만든 튜플 리스트 생성\n\ntshirts\n\nfor color in colors:\n for size in sizes:\n print((color, size))\n# color를 반복하는 루프 안에서 sizes를 반복해서 튜플 리스트 출력\n\ntshirts = [(color, size) for size in sizes\n for color in colors]\n# size를 반복하고 그 안에서 color로 반복하려면 for문의 순서만 바꾸면 된다.\n# 지능형 리스트 안에서 줄을 바꾸면 생성될 리스트가 어떻게 정렬될지 알아보기 더 쉽다\n\ntshirts\n\n\n\n# 2.2.4 제너레이터 표현식 (genexp)\n\n# 튜플, 배열 등의 시퀀스형을 초기화 할 때,\n# 제너레이터 표현식은 다른 생성자에 전달할 리스트를 통째로 만들지 않고\n# 반복자 프로토콜(iterator protocol)을 이용해서 항목을 하나씩 생성하므로 메모리를 더 적게 사용한다\n\n# 메모리에 유지할 필요가 없는 데이터를 생성할 때 사용\n\n# 지능형 리스트와 동일한 구문을 사용하지만, 대괄호 대신 괄호 사용\n\n# 작동방식은 14장에서 자세히\n\n\n\n# Example 2-5. Initializing a tuple and an array from a generator expression.\n# 튜플과 배열 생성하는 제너레이터 표현식\n\nsymbols = '$¢£¥€¤'\ntuple(ord(symbol) for symbol in symbols)\n# 제너레이터 표현식이 함수에 보내는 단 하나의 인수라면 괄호 안에 또 콸호를 넣을 필요는 없다\n\nimport array\narray.array('I', (ord(symbol) for symbol in symbols))\n# 배열 생성자는 인수를 두 개 받으므로 제너레이터 표현식(ord ~ symbols) 앞뒤에 반드시 괄호를 넣어야 한다\n# 배열 생성자의 첫 번째 인수는 배열에 들어 갈 숫자들을 저장할 자료형을 지정한다\n\n\n\n# Example 2-6. Cartesian product in a generator expression.\n# 제너레이터 표현식을 이용한 데카르트 곱\ncolors = ['black', 'white']\nsizes = ['S', 'M', 'L']\nfor tshirt in ('%s %s' % (c, s) for c in colors for s in sizes):\n print(tshirt)\n# [예제 2-4]와 달리 티셔츠 리스트의 6개 항목을 메모리 안에 생성하지 않는다.\n# 제너레이터 표현식은 한 번에 한 항목을 생성할 수 있도록 for 루프에 데이터를 전달하기 때문\n\n# 데카르트 곱에 사용할 리스트에 각 천 개의 항목이 들어 있는 경우 제너레이터 표현식을 사용하면\n# 단지 for 루프에 전달하기 위해 항목이 백만 개 들어 있는 리스트를 생성하는 일을 피할 수 있다\n\n\n\n\n\n# 2.3 튜플\n# 단순한 불변 리스트가 아님\n\n# 필드명이 없는 레코드로 사용할 수도 있다.\n\n\n\n# 2.3.1 레코드로서의 튜플\n\n# 튜플은 레코드를 담고 있다.\n# 튜플의 각 항목은 레코드의 필드 하나를 의미하며 항목의 위치가 의미를 결정\n\n# 단순 불변 리스트로 생각하면 경우에 따라 항목의 크기와 순서가 중요할 수도, 아닐 수도 있다\n# 튜플을 필드의 집합으로 사용하는 경우에는 항목 수가 고정되어 있고, 항목의 순서가 중요\n\n\n\n# Example 2-7. Tuples used as records.\n# 레코드로 사용된 튜플\n\nlax_coordinates = (33.9425, -118.408056)\n# 로스엔젤레스 국제공항의 위도, 경도\n\ncity, year, pop, chg, area = ('Tokyo', 2003, 32450, 0.66, 8014)\n# 도쿄에 대한 데이터(지명, 년도, 백만 단위 인구수, 인구 변화율, 제곱킬로미터 단위(면적)\n\ntraveler_ids = [('USA', '31195855'), ('BRA', 'CE342567'), ('ESP', 'XDA205856')]\n# (국가 코드, 여권 번호) 형태의 튜플로 구성된 리스트\n\ntraveler_ids[0]\ntraveler_ids[0][0]\n\nfor passport in sorted(traveler_ids): # 리스트를 반복할 때 passport 변수가 각 튜플에 바인딩된다\n print('%s/%s' % passport)\n # 퍼센트(%) 포맷 연산자는 튜플을 이해하고 각 항목을 하나의 필드로 처리한다\n # print() 함수의 인자로 전달한 포맷 문자열의 각 슬롯에 passport 튜플의 각 항목을 할당했다\n\nfor country, _ in traveler_ids:\n print(country)\n# for 루프는 튜플의 각 항목을 어떻게 가져와야 하는지 알고 있다(이 과정을 '언패킹'이라고 함)\n# 여기서 두 번째 항목에는 관심이 없으므로 더미 변수(dummy variable)을 나타내는 언더바(_)를 할당\n\n# 튜플은 언패킹 메커니즘 덕분에 레코드로도 잘 작동한다.\n\n\n\n\n\n\n\n# 다른 예제 1 : *args\ndef multiply(x, y):\n print (x * y)\n\ncoords = (10, 20)\nmultiply(coords) # TypeError: multiply() missing 1 required positional argument: 'y'\nmultiply(*coords) ## 200\n\ndef func(*args):\n for arg in args:\n print(arg, )\n\nfunc(1, 2, 3) ## 1 2 3\n\n\n# 다른 예제 2 : **kwargs\ndef print_values(**kwargs):\n for key, value in kwargs.items():\n print(\"The value of {} is {}\".format(key, value))\n\nprint_values(my_name=\"Sammy\", your_name=\"Casey\")\n\ndef some_kwargs(kwarg_1, kwarg_2, kwarg_3):\n print(\"kwarg_1:\", kwarg_1)\n print(\"kwarg_2:\", kwarg_2)\n print(\"kwarg_3:\", kwarg_3)\n\nkwargs = {\"kwarg_1\": \"Val\", \"kwarg_2\": \"Harper\", \"kwarg_3\": \"Remy\"}\nsome_kwargs(**kwargs)\n\n\n\n\n\n# 2.3.2 튜플 언패킹(tuple unpacking) / iterable unpacking\n\n# 반복 가능한 객체라면 어느 객체든 적용할 수 있다.\n# 초과된 항목을 잡기 위해 *를 사용한 경우가 아니라면\n# [반복 가능한 객체]는 한 번에 하나의 항목을 생성한다는 점만 기억하면 된다.\n\n\n# 튜플은 parallel assignment(병렬 할당, 반복형 데이터를 변수로 구성된 튜플에 할당하는 것)를 할 때 가장 눈에 띈다.\nlax_coordinates = (33.9425, -118.408056)\nlatitude, longitude = lax_coordinates # tuple unpacking\nlatitude\nlongitude\n\n\n# 튜플 언패킹을 이용하면 임시 변수를 사용하지 않고도 두 변수의 값을 서로 교환할 수 있다.\na, b = 1, 2\nb, a = a, b\n\n\n# 다음과 같이 함수를 호출할 때 인수 앞에 *를 붙여 튜플을 언패킹 할 수 있다.\ndivmod(20, 8)\n\nt = (20, 8)\ndivmod(*t) # divmod(t)는 에러\n\nquotient, remainder = divmod(*t)\nquotient, remainder\n\n\n# 함수에서 호출자에 여러 값을 간단히 반환하는 기능으로 사용 할 수 있다.\n\n# 예를 들어, os.path.split() 함수를 이용해서 파일시스템 경로에서 경로명과 파일을 가져올 수 있다.\n# 더미 변수(_)를 플레이스 홀더로 사용해서 관심 없는 부분은 언패킹 때 무시할 수 있다.\nimport os\n_, filename = os.path.split('C:/Users/OLVT/PycharmProjects/STUDY_DATA')\nfilename\n\npath, _ = os.path.split('C:/Users/OLVT/PycharmProjects/STUDY_DATA')\npath\n\n\n\n# 초과 항목을 잡기 위해 * 사용하기\n\n# 튜플을 언패킹 할 때, 일부 항목에만 관심이 있는 경우에 *를 사용할 수도 있다.\n# 매개변수에 *를 연결해서 초과된 인수를 가져올 수 있고, 이 개념을 확장해서 다음과 같이 병렬 할당에도 적용한다.\na, b, *rest = range(5)\na, b, rest\n\na, b, *rest = range(3)\na, b, rest\n\na, b, *rest = range(2)\na, b, rest\n\n\n# 병렬 할당의 경우 *는 단 하나의 변수에만 적용할 수 있다. 하지만 어떠한 변수에도 적용할 수 있다\na, *body, c, d = range(5)\na, body, c, d\n\n*head, b, c, d = range(5)\nhead, b, c, d\n\n\n\n\n\n\n\n# 튜플 언패킹은 내포된 구조체에도 적용할 수 있다는 장점\n\n\n# 2.3.3 내포된 튜플 언패킹 / Nested tuple unpacking\n\n# (a, b, (c, d)) 처럼 파이썬은 표현식이 내포된 구조체에 일치하면 제대로 처리한다.\n# Example 2-8. Unpacking nested tuples to access the longitude.\n\nmetro_areas = [\n ('Tokyo', 'JP', 36.933, (35.689722, 139.691667)),\n ('Delhi NCR', 'IN', 21.935, (28.613889, 77.208889)),\n ('Mexico City', 'MX', 20.142, (19.433333, -99.133333)),\n ('New York-Newark', 'US', 20.104, (40.808611, -74.020386)),\n ('Sao Paulo', 'BR', 19.649, (-23.547778, -46.635833)),]\n# 각 튜플은 4개의 필드로 구성된 레코드를 담고 있으며, 마지막 필드는 좌표쌍\n\nprint('{:15} | {:^9} | {:^9}'.format('', 'lat.', 'long.'))\nfmt = '{:15} | {:9.4f} | {:9.4f}'\n\nfor name, cc, pop, (latitude, longitude) in metro_areas:\n# 마지막 필드를 튜플에 할당함으로써 좌표를 언패킹한다.\n if longitude <= 0: # 경도가 음수인 서반구 도시만 출력하게 만든다.\n print(fmt.format(name, latitude, longitude))\n\n\n\n\n\n\n\n\n\n","sub_path":"Fluent Python/CH2_An array of sequences.py","file_name":"CH2_An array of sequences.py","file_ext":"py","file_size_in_byte":12598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"258087953","text":"from tkinter import *\r\nfrom tkinter import messagebox\r\nobj=Tk()\r\ndef cmd():\r\n lab=Label(text='check').pack()\r\n \r\ndef cmdd():\r\n messagebox.showinfo(obj, title='quit',font=24,message=\"sure\") \r\n\r\nobj.title(\"message box\")\r\nobj.geometry(\"500x500+100+20\")\r\n\r\n \r\n\r\nbut=Button(text=\"click me\",font=('roman',20,'italic'),command=\"cmd\")\r\nbut.pack()\r\nbut1=Button(text=\"quit button\",font=('roman',20,'italic'),command=\"cmdd\")\r\nbut1.pack()\r\nlab12=Label(text='check').pack()\r\n\r\n\r\nobj.mainloop()\r\n","sub_path":"messagebox.py","file_name":"messagebox.py","file_ext":"py","file_size_in_byte":495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"120348280","text":"# import necessary libraries\nfrom flask_sqlalchemy import SQLAlchemy\nfrom models import create_classes\nimport os\nfrom flask import (\n Flask,\n render_template,\n jsonify,\n request,\n redirect)\n\n\n# 1. create a new clean env\n# python -m venv \n# 2. add this name to gitignore\n# 3. use git bash > source /Scripts/activate\n# 4. conda deactivate\n# 5. pip freeze > make sure it's clean\n# 6. pip install dependencies > make sure you will have gunicorn\n# 7. make sure you create a db on heroku > make sure 'posgresql'\n# go to heroku website > go to app > Setting > create a new key for your correct DB link\n# 8. put this new var to your code\n# 9. python app.py on localhost\n\n\n#################################################\n# Flask Setup\n#################################################\napp = Flask(__name__)\n\n#################################################\n# Database Setup\n#################################################\n\nurlz = os.getenv('DATABASE_URLZ') # used for Heroku deployment\n# urlz = 'postgresql://njfgtyroqyycuk:2b4d75c46d7bb5075e0146ac8e835985246903b252cc2423ad4779e9a7bb74ac@ec2-3-231-40-72.compute-1.amazonaws.com:5432/d15jf7fk1ra3kr' # local machine testing\n\nprint(f'urlz before try and except {urlz}', flush=True)\n\ntry:\n if urlz:\n print(f'try has worked, urlz is: {urlz}', flush=True)\nexcept Exception as error:\n urlz = \"postgresql://njfgtyroqyycuk:2b4d75c46d7bb5075e0146ac8e835985246903b252cc2423ad4779e9a7bb74ac@ec2-3-231-40-72.compute-1.amazonaws.com:5432/d15jf7fk1ra3kr\"\n print('except has activated', flush=True)\n\nprint(f'urlz after try and except = {urlz}')\n\napp.config['SQLALCHEMY_DATABASE_URI'] = urlz\n\n# 1. create Postgres database on Heroku\n# 2. create Database URL on Heroku with new PostgreSQL syntax\n# 3. input Database URL + NEW\n# 4. Replace sqlite with PostgreSQL Alchemy log in information\n\n# Remove tracking modifications\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\n\ndb = SQLAlchemy(app)\n\n\nPet = create_classes(db)\n\n# create route that renders index.html template\n\n\n@app.route(\"/\")\ndef home():\n return render_template(\"index.html\")\n\n\n# Query the database and send the jsonified results\n@app.route(\"/send\", methods=[\"GET\", \"POST\"])\ndef send():\n if request.method == \"POST\":\n name = request.form[\"petName\"]\n lat = request.form[\"petLat\"]\n lon = request.form[\"petLon\"]\n\n pet = Pet(name=name, lat=lat, lon=lon)\n db.session.add(pet)\n db.session.commit()\n return redirect(\"/\", code=302)\n\n return render_template(\"form.html\")\n\n\n@app.route(\"/api/pals\")\ndef pals():\n results = db.session.query(Pet.name, Pet.lat, Pet.lon).all()\n\n hover_text = [result[0] for result in results]\n lat = [result[1] for result in results]\n lon = [result[2] for result in results]\n\n pet_data = [{\n \"type\": \"scattergeo\",\n \"locationmode\": \"USA-states\",\n \"lat\": lat,\n \"lon\": lon,\n \"text\": hover_text,\n \"hoverinfo\": \"text\",\n \"marker\": {\n \"size\": 50,\n \"line\": {\n \"color\": \"rgb(8,8,8)\",\n \"width\": 1\n },\n }\n }]\n\n return jsonify(pet_data)\n\n\nif __name__ == \"__main__\":\n db.create_all()\n app.run(debug=True)\n","sub_path":"Heroku/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"368275302","text":"class Solution:\n def longestPalindrome(self, s: str) -> str:\n if not s or len(s) == 1:\n return s\n candidates = []\n for i in range(len(s)):\n for j in range(i+1, len(s)):\n if s[i:j+1] not in candidates:\n candidates.append(s[i:j+1])\n # print(candidates)\n \n deleted = []\n \n for i in range(len(candidates)):\n l, r = 0, len(candidates[i])-1\n while l < r:\n if candidates[i][l] == candidates[i][r]:\n l += 1\n r -= 1\n else:\n deleted.append(candidates[i])\n break\n \n for item in deleted:\n candidates.remove(item)\n \n res = s[0]\n \n for candidate in candidates:\n if len(candidate) > len(res):\n res = candidate\n \n return res\n \n # TC: O(n^3)\n # this comes from the second for loop\n # as the total number of substring is (n+1)*n/2, and inner while loop is O(n)\n \n # SC: O(n^2)\n # this comes from the candidates list\n \n # this approach is a naive brute force one, which raises TLE error. \n # and the code above can be totally cleaned to make SC to O(1) by putting the comparison\n # step directly into the first for loop. I think. \n","sub_path":"5_LongestPalindromicSubstring/5_LongestPalindromicSubstring_1.py","file_name":"5_LongestPalindromicSubstring_1.py","file_ext":"py","file_size_in_byte":1381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"181017426","text":"import sys\nsys.stdin = open(\"subtree_input.txt\")\n\nT = int(input())\nfor _ in range(T):\n ea, goal = map(int,input().split())\n temp = list(map(int,input().split()))\n tree = []\n for i in range(ea):\n tree.append((temp[i*2],temp[i*2+1]))\n\n # process\n visit =[]\n visit.append(goal)\n result = []\n while len(visit)>0:\n a = visit.pop()\n if a not in result:\n result.append(a)\n for i in range(len(tree)):\n if tree[i][0] == a:\n visit.append(tree[i][1])\n\n print(f\"#{_ + 1} {len(result)}\")","sub_path":"HJ_AL/tree/sw_subtree.py","file_name":"sw_subtree.py","file_ext":"py","file_size_in_byte":569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"46145014","text":"# -*- coding: utf-8 -*-\n\n# Copyright 2018 IBM.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# =============================================================================\n\nimport numpy as np\n\nimport logging\n\nfrom qiskit_aqua.algorithms.many_sample.qsvm import SVM_QKernel_ABC\nfrom qiskit_aqua.algorithms.many_sample.qsvm import (get_points_and_labels, optimize_SVM, kernel_join)\n\nlogger = logging.getLogger(__name__)\n\nclass SVM_QKernel_Binary(SVM_QKernel_ABC):\n \"\"\"\n the binary classifier\n \"\"\"\n\n def __init__(self):\n self.ret = {}\n\n def train(self, training_input, class_labels):\n \"\"\"\n train the svm\n Args:\n training_input (dict): dictionary which maps each class to the points in the class\n class_labels (list): list of classes. For example: ['A', 'B']\n \"\"\"\n training_points, training_points_labels, label_to_class = get_points_and_labels(training_input, class_labels)\n\n kernel_matrix = kernel_join(training_points, training_points, self.entangler_map,\n self.coupling_map, self.initial_layout, self.shots,\n self._random_seed, self.num_of_qubits, self._backend)\n\n self.ret['kernel_matrix_training'] = kernel_matrix\n\n [alpha, b, support] = optimize_SVM(kernel_matrix, training_points_labels)\n alphas = np.array([])\n SVMs = np.array([])\n yin = np.array([])\n for alphindex in range(len(support)):\n if support[alphindex]:\n alphas = np.vstack([alphas, alpha[alphindex]]) if alphas.size else alpha[alphindex]\n SVMs = np.vstack([SVMs, training_points[alphindex]]) if SVMs.size else training_points[alphindex]\n yin = np.vstack([yin, training_points_labels[alphindex]]\n ) if yin.size else training_points_labels[alphindex]\n\n self.ret['svm'] = {}\n self.ret['svm']['alphas'] = alphas\n self.ret['svm']['bias'] = b\n self.ret['svm']['support_vectors'] = SVMs\n self.ret['svm']['yin'] = yin\n\n def test(self, test_input, class_labels):\n \"\"\"\n test the svm\n Args:\n test_input (dict): dictionary which maps each class to the points in the class\n class_labels (list): list of classes. For example: ['A', 'B']\n \"\"\"\n\n test_points, test_points_labels, label_to_labelclass = get_points_and_labels(test_input, class_labels)\n\n alphas = self.ret['svm']['alphas']\n bias = self.ret['svm']['bias']\n SVMs = self.ret['svm']['support_vectors']\n yin = self.ret['svm']['yin']\n\n kernel_matrix = kernel_join(test_points, SVMs, self.entangler_map, self.coupling_map,\n self.initial_layout, self.shots, self._random_seed,\n self.num_of_qubits, self._backend)\n\n self.ret['kernel_matrix_testing'] = kernel_matrix\n\n success_ratio = 0\n L = 0\n total_num_points = len(test_points)\n Lsign = np.zeros(total_num_points)\n for tin in range(total_num_points):\n Ltot = 0\n for sin in range(len(SVMs)):\n L = yin[sin] * alphas[sin] * kernel_matrix[tin][sin]\n Ltot += L\n\n Lsign[tin] = np.sign(Ltot + bias)\n\n\n\n logger.debug(\"\\n=============================================\")\n logger.debug('classifying' + str(test_points[tin]))\n logger.debug('Label should be ' + str(label_to_labelclass[np.int(test_points_labels[tin])]))\n logger.debug('Predicted label is ' + str(label_to_labelclass[np.int(Lsign[tin])]))\n if np.int(test_points_labels[tin]) == np.int(Lsign[tin]):\n logger.debug('CORRECT')\n else:\n logger.debug('INCORRECT')\n\n if Lsign[tin] == test_points_labels[tin]:\n success_ratio += 1\n final_success_ratio = success_ratio / total_num_points\n\n logger.debug('Classification success for this set is %s %% \\n' % (100 * final_success_ratio))\n return final_success_ratio\n\n def predict(self, test_points):\n \"\"\"\n predict using the svm\n Args:\n test_points (numpy.ndarray): the points\n \"\"\"\n alphas = self.ret['svm']['alphas']\n bias = self.ret['svm']['bias']\n SVMs = self.ret['svm']['support_vectors']\n yin = self.ret['svm']['yin']\n\n kernel_matrix = kernel_join(test_points, SVMs, self.entangler_map, self.coupling_map,\n self.initial_layout, self.shots, self._random_seed,\n self.num_of_qubits, self._backend)\n\n self.ret['kernel_matrix_prediction'] = kernel_matrix\n\n total_num_points = len(test_points)\n Lsign = np.zeros(total_num_points)\n for tin in range(total_num_points):\n Ltot = 0\n for sin in range(len(SVMs)):\n L = yin[sin] * alphas[sin] * kernel_matrix[tin][sin]\n Ltot += L\n Lsign[tin] = np.int(np.sign(Ltot + bias))\n return Lsign\n\n def run(self):\n \"\"\"\n put the train, test, predict together\n \"\"\"\n if self.training_dataset is None:\n self.ret['error'] = 'training dataset is missing! please provide it'\n return self.ret\n\n num_of_qubits = self.auto_detect_qubitnum(self.training_dataset) # auto-detect mode\n if num_of_qubits == -1:\n self.ret['error'] = 'Something wrong with the auto-detection of num_of_qubits'\n return self.ret\n if num_of_qubits != 2 and num_of_qubits != 3:\n self.ret['error'] = 'You should lower the feature size to 2 or 3 using PCA first!'\n return self.ret\n self.train(self.training_dataset, self.class_labels)\n if self.test_dataset is not None:\n success_ratio = self.test(self.test_dataset, self.class_labels)\n self.ret['test_success_ratio'] = success_ratio\n if self.datapoints is not None:\n predicted_labels = self.predict(self.datapoints)\n _, _, label_to_class = get_points_and_labels(self.training_dataset, self.class_labels)\n predicted_labelclasses = [label_to_class[x] for x in predicted_labels]\n self.ret['predicted_labels'] = predicted_labelclasses\n\n return self.ret\n","sub_path":"qiskit_aqua/algorithms/many_sample/qsvm/svm_qkernel_binary.py","file_name":"svm_qkernel_binary.py","file_ext":"py","file_size_in_byte":6924,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"58853586","text":"# Copyright (c) 2010,2011 Roger Light \n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# 1. Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n# 2. Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# 3. Neither the name of mosquitto nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n\nimport mosquitto\nimport os\nimport urlparse\n\n\n# Define event callbacks\ndef on_connect(mosq, obj, rc):\n print(\"rc: \" + str(rc))\n\n\ndef on_message(mosq, obj, msg):\n print(msg.topic + \" \" + str(msg.qos) + \" \" + str(msg.payload))\n\n\ndef on_publish(mosq, obj, mid):\n print(\"mid: \" + str(mid))\n\n\ndef on_subscribe(mosq, obj, mid, granted_qos):\n print(\"Subscribed: \" + str(mid) + \" \" + str(granted_qos))\n\n\ndef on_log(mosq, obj, level, string):\n print(string)\n\nmqttc = mosquitto.Mosquitto()\n# Assign event callbacks\nmqttc.on_message = on_message\nmqttc.on_connect = on_connect\nmqttc.on_publish = on_publish\nmqttc.on_subscribe = on_subscribe\n\n\n# Uncomment to enable debug messages\nmqttc.on_log = on_log\n\nif __name__ == '__main__':\n from app import create_app\n this_app = create_app(os.getenv('FLASK_CONFIG') or 'default')\n with this_app.app_context():\n # Parse CLOUDMQTT_URL (or fallback to localhost)\n url_str = this_app.config['CLOUDMQTT_URL']\n url = urlparse.urlparse(url_str)\n # Connect\n mqttc.username_pw_set(url.username, url.password)\n mqttc.connect(url.hostname, url.port)\n # Continue the network loop, exit when an error occurs\n # rc = 0\n # while rc == 0:\n # rc = mqttc.loop()\n # print(\"rc: \" + str(rc))\n","sub_path":"mqtt.py","file_name":"mqtt.py","file_ext":"py","file_size_in_byte":2854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"240110755","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Apr 9 17:57:57 2020\n\n@author: Real\n\"\"\"\nfrom propagate import propagate\n\n# GRADED FUNCTION: optimize\ndef optimize(w, b, X, Y, num_iterations, learning_rate, print_cost = False):\n costs = []\n for i in range(num_iterations):\n # Cost and gradient calculation ( 1-4 lines of code)\n ### START CODE HERE ###\n grads, cost = propagate(w, b, X, Y)\n dw = grads[\"dw\"]\n db = grads[\"db\"]\n # update rule ( 2 lines of code)\n ### START CODE HERE ###\n w = w-learning_rate*dw\n b = b-learning_rate*db\n ### END CODE HERE ###\n # Record the costs\n if i % 100 == 0:\n costs.append(cost)\n # Print the cost every 100 training examples\n #if print_cost and i % 100 == 0:\n # print (\"Cost after iteration %i: %f\" %(i, cost))\n params = {\"w\": w,\"b\": b}\n grads = {\"dw\": dw,\"db\": db}\n return params, grads, costs","sub_path":"Deeplearning codes/Course 1 Neural Network and deep learning/Week 1 Logistic regression on Cat vs Non Cat/optimize.py","file_name":"optimize.py","file_ext":"py","file_size_in_byte":947,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"279916323","text":"# program to check given number is prime or not\n\nnum=int(input(\"Enter a number : \")) #9\nflag=0\nfor i in range(2,num): #2...8\n if(num%i==0): #9%2==0\n flag=1\n break\n else:\n flag=0\nif(flag>0):\n print(\"not prime\")\nelse:\n print(\"prime\")\n\n\n\n","sub_path":"PythonPrograms/flowcontrols/looping/primeornot.py","file_name":"primeornot.py","file_ext":"py","file_size_in_byte":288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"487580586","text":"import sys\n\nfrom finder import Finder\n\n\nif __name__ == '__main__':\n if len(sys.argv) < 3:\n exit('Two arguments required: run.py ')\n\n filename, pattern = sys.argv[1:3]\n output = 'Not found'\n finder = Finder.load(filename)\n result = finder.search(pattern)\n\n if result:\n output = '\\n'.join(str(kls) for kls in result)\n\n exit(output)\n","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"424500196","text":"\nimport sys\nimport time\nfrom .pages import Page\nfrom .components import Text, Number, Button, Gauge, Component\n\n\nif sys.implementation.name == \"micropython\":\n\timport machine\n\ttraceback = None\nelse:\n\timport serial\n\timport traceback\n\timport logging\n\n\n# pylint: disable=C0330,C0111\n\nclass Nextion(object):\n\t'''\n\tAbstract Nextion base class.\n\n\tSubclasses must provide implementations of\n\n\t\tlog\n\t\tprobe_set_baud()\n\t\t_write_internal(bytearr)\n\t\t_read_internal(max_read_bytes, read_timeout)\n\n\tfor concrete implementations\n\n\t'''\n\n\n\n\tERRORS = {\n\t\t0x00 : \"Invalid instruction\",\n\t\t# 0x01 : \"Successful execution of instruction\",\n\t\t0x02 : \"Component ID invalid\",\n\t\t0x03 : \"Page ID invalid\",\n\t\t0x04 : \"Picture ID invalid\",\n\t\t0x05 : \"Font ID invalid\",\n\t\t0x11 : \"Baud rate setting invalid\",\n\t\t0x12 : \"Curve control ID number or channel number is invalid\",\n\t\t0x1A : \"Variable name invalid\",\n\t\t0x1B : \"Variable operation invalid\",\n\t\t0x1C : \"Failed to assign\",\n\t\t0x1D : \"Operate EEPROM failed\",\n\t\t0x1E : \"Parameter quantity invalid\",\n\t\t0x1F : \"IO operation failed\",\n\t\t0x20 : \"Undefined escape characters\",\n\t\t0x23 : \"Too long variable name\",\n\n\t}\n\n\tMESSAGES = {\n\t\t0x65 : \"Touch event return data\",\n\t\t0x66 : \"Current page ID number returns\"\n\t}\n\n\tRED = 63488\n\tBLUE = 31\n\tGRAY = 33840\n\tBLACK = 0\n\tWHITE = 65535\n\tGREEN = 2016\n\tBROWN = 48192\n\tYELLOW = 65504\n\n\n\tdef __init__(self, page_definitions=None):\n\n\t\tassert hasattr(self, \"log\"), \"Nextion class must be subclassed with the \" \\\n\t\t\t\"subclass providing a implementation of the 'log' member!\"\n\n\t\tself.pages = []\n\t\tself.debug = False\n\n\t\twhile True:\n\t\t\ttry:\n\t\t\t\tself.set_cmd_response_mode(3)\n\t\t\t\tbreak\n\t\t\texcept Exception as err:\n\t\t\t\tif traceback:\n\t\t\t\t\ttraceback.print_exc()\n\t\t\t\telse:\n\t\t\t\t\t# uPython traceback output\n\t\t\t\t\tsys.print_exception(err)\n\n\t\t\t\tself.log.info(\"Trying to probe device\")\n\t\t\t\ttime.sleep(1)\n\n\t\tif page_definitions is not None:\n\t\t\tfor page_definition in page_definitions:\n\t\t\t\tself.pages.append(Page.new_page_by_definition(self, page_definition))\n\n\t########################################################\n\t# Stub functions that need to be overridden\n\n\tdef _write_internal(self, message):\n\t\traise RuntimeError(\"This must be overridden in a sublclass!\")\n\n\tdef _read_internal(self, cmax, timeout):\n\t\traise RuntimeError(\"This must be overridden in a sublclass!\")\n\n\tdef probe_set_baud(self):\n\t\traise RuntimeError(\"This must be overridden in a sublclass!\")\n\n\t########################################################\n\n\tdef show_page_by_name(self, name):\n\t\tresult = None\n\t\tfor page in self.pages:\n\t\t\tif page.name == name:\n\t\t\t\tresult = page\n\t\t\t\tbreak\n\t\treturn result\n\n\tdef page_reference(self, page_id):\n\t\tif len(self.pages) > page_id:\n\t\t\treturn self.pages[page_id]\n\n\t\tpage = Page(self, page_id)\n\t\tself.pages.append(page)\n\t\treturn page\n\n\tdef set_debug(self, debug):\n\t\tself.debug = debug\n\n\tdef set_cmd_response_mode(self, value):\n\t\tself.set('bkcmd', value)\n\n\tdef set_dim(self, value, save=False):\n\t\tself.set('dim' + 's' if save else '', value)\n\n\tdef set_page(self, value):\n\t\tself.nx_write('page ' + str(value))\n\n\tdef get_page(self):\n\t\tret = self.nx_write('sendme')\n\t\tif ret[0] == 0x66:\n\t\t\tif ret[1] == 0xff:\n\t\t\t\tret[1] = 0x00\n\t\t\treturn ret[1]\n\n\t\traise ValueError(Nextion.get_nx_error_message(0x00))\n\n###############################\n\n\tdef refresh(self, comp_id=\"0\"):\n\t\tself.nx_write('ref %s' % comp_id)\n\n\tdef get_text(self, comp_id):\n\t\tself.nx_write('get %s.txt' % comp_id)\n\t\tret = self.nx_read()\n\t\treturn ret\n\n\n\tdef get_value(self, comp_id):\n\t\tself.nx_write('get %s.val' % comp_id)\n\n###############################\n\n\tdef set_value(self, comp_id, value):\n\t\tprint(comp_id + '.val=\"' + str(value) + '\"')\n\t\tself.nx_write(comp_id + '.val=' + str(value))\n\n\tdef set_text(self, comp_id, value):\n\t\tself.nx_write(comp_id + '.txt=\"' + str(value) + '\"')\n\n\tdef clear(self, color):\n\t\tself.nx_write('cls %s' % color)\n\n\tdef drawPicture(self, x, y, pic, w=None, h=None):\n\t\tif w is None or h is None:\n\t\t\tself.nx_write('pic %s,%s,%s' % (x, y, pic))\n\t\telse:\n\t\t\tself.nx_write('picq %s,%s,%s,%s,%s' % (x, y, w, h, pic))\n\n\tdef drawString(self, x1, y1, x2, y2, fontid, fontcolor, backcolor, xcenter,\n\t\t\t\t ycenter, sta, string):\n\t\tself.nx_write('xstr %s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s' %\n\t\t\t\t\t\t (x1, y1, x2 - x1, y2 - y1, fontid, fontcolor,\n\t\t\t\t\t\t backcolor, xcenter, ycenter, sta, string))\n\n\tdef drawLine(self, x1, y1, x2, y2, color):\n\t\tself.nx_write('line %s,%s,%s,%s,%s' % (x1, y1, x2, y2, color))\n\n\tdef drawRectangle(self, x1, y1, x2, y2, color):\n\t\tself.nx_write('draw %s,%s,%s,%s,%s' % (x1, y1, x2, y2, color))\n\n\tdef drawBox(self, x1, y1, x2, y2, color):\n\t\tself.nx_write('fill %s,%s,%s,%s,%s' % (x1, y1, x2 - x1, y2 - y1, color))\n\n\tdef drawCircle(self, x, y, r, color):\n\t\tself.nx_write('cir %s,%s,%s,%s' % (x, y, r, color))\n\n##############################\n\n\tdef set(self, key, value, check_return=True):\n\n\t\tmessage = key + '=' + str(value)\n\t\tself.nx_write(message)\n\t\tret = self.nx_read(check_return=True)\n\n\t@staticmethod\n\tdef get_nx_error_message(err_code_char):\n\t\treturn Nextion.ERRORS[int(err_code_char)]\n\n\n\tdef nx_write(self, message):\n\t\tmessage = message.encode(\"ISO-8859-1\")\n\t\tmessage += b\"\\xFF\\xFF\\xFF\"\n\n\t\t# self.log.debug(\"Transmitting: %s\", message)\n\t\tself._write_internal(message)\n\n\n\tdef nx_read(self, cmax=20, timeout=None, check_return=True):\n\t\t# Packet end postfix is 3 bytes\n\t\tbytes_buf = self._read_internal(cmax + 3, timeout)\n\n\t\t# self.log.debug(\"Read response: %s\", bytes_buf)\n\n\t\tif not bytes_buf:\n\t\t\traise ValueError(\"No response from hardware!\")\n\n\t\tif not check_return:\n\t\t\treturn bytes_buf\n\t\t# 0X65\tTouch event return data\n\t\t# 0X66\tCurrent page ID number returns\n\t\t# 0X67\tTouch coordinate data returns\n\t\t# 0X68\tTouch Event in sleep mode\n\t\t# 0X70\tString variable data returns\n\t\t# 0X71\tNumeric variable data returns\n\t\t# 0X86\tDevice automatically enters into sleep mode\n\t\t# 0X87\tDevice automatically wake up\n\t\t# 0X88\tSystem successful start up\n\t\t# 0X89\tStart SD card upgrade\n\t\t# 0XFD\tData transparent transmit finished\n\n\t\tfbyte = bytes_buf[0]\n\t\tif fbyte in self.ERRORS:\n\t\t\traise ValueError(\"Response Error: {} -> {}\".format(\n\t\t\t\tNextion.get_nx_error_message(fbyte),\n\t\t\t\tbytes_buf\n\t\t\t\t))\n\n\t\texpected_postfix = [255, 255, 255]\n\t\tif bytes_buf[-3:] != expected_postfix:\n\t\t\traise ValueError(\"Response missing trailing bytes: {} -> {} != {}\".format(\n\t\t\t\tbytes_buf, bytes_buf[-3:], expected_postfix))\n\n\t\t# Truncate the type and postfix\n\t\tbytes_buf = bytes_buf[1:-3]\n\n\n\t\tif fbyte == 0x01:\n\t\t\treturn bytes_buf\n\t\telif fbyte == 0x65: # Touch event return data\n\t\t\tpass\n\t\telif fbyte == 0x66: # Current page ID number returns\n\t\t\tpass\n\t\telif fbyte == 0x67: # Touch coordinate data returns\n\t\t\tpass\n\t\telif fbyte == 0x68: # Touch Event in sleep mode\n\t\t\tpass\n\t\telif fbyte == 0x70: # String variable data returns\n\t\t\tstrb = \"\".join([chr(b) for b in bytes_buf])\n\t\t\treturn strb\n\t\telif fbyte == 0x71: # Numeric variable data returns\n\t\t\tprint(\"Numeric data:\", bytes_buf)\n\t\t\tpass\n\t\telif fbyte == 0x86: # Device automatically enters into sleep mode\n\t\t\tpass\n\t\telif fbyte == 0x87: # Device automatically wake up\n\t\t\tpass\n\t\telif fbyte == 0x88: # System successful start up\n\t\t\tpass\n\t\telif fbyte == 0x89: # Start SD card upgrade\n\t\t\tpass\n\t\telif fbyte == 0xfd: # Data transparent transmit finished\n\t\t\tpass\n\t\telse:\n\t\t\traise ValueError(\"Response Error with unknown code: {}\".format(bytes_buf))\n\n\nclass PySerialNextion(Nextion):\n\n\tBAUD_VALUES = {\n\t\t\t2400,\n\t\t\t4800,\n\t\t\t9600,\n\t\t\t19200,\n\t\t\t38400,\n\t\t\t57600,\n\t\t\t115200,\n\t\t}\n\n\tdef __init__(self, device_path, timeout=None, *args, **kwargs):\n\t\tself.log = logging.getLogger(\"Main.Nex.Protocol\")\n\n\t\tself.port = serial.Serial(device_path, 9600, timeout=timeout)\n\t\tself.port.flushOutput()\n\n\t\tif timeout is None:\n\t\t\tself.read_timeout = 0.1\n\t\telse:\n\t\t\tself.read_timeout = timeout\n\t\tself.probe_set_baud()\n\n\n\t\tsuper().__init__(*args, **kwargs)\n\n\tdef _autobaud(self):\n\t\tfor baudrate in self.BAUD_VALUES:\n\t\t\tself.log.info(\"Autobaud: Probing with baudrate: %s\", baudrate)\n\t\t\tself.port.baudrate = baudrate\n\t\t\tfor __ in range(2):\n\t\t\t\ttry:\n\t\t\t\t\tself.port.flush()\n\t\t\t\t\tself.set_cmd_response_mode(3)\n\t\t\t\t\tself.log.info(\"Autobaud: Connected to display with baudrate %s\", baudrate)\n\t\t\t\t\treturn\n\n\t\t\t\texcept Exception:\n\t\t\t\t\ttraceback.print_exc()\n\n\tdef probe_set_baud(self):\n\t\tself._autobaud()\n\t\tself.set_baud(115200, save=True)\n\t\tself._autobaud()\n\n\n\tdef _write_internal(self, message):\n\t\tself.port.write(message)\n\t\tself.port.flushOutput()\n\n\tdef _read_internal(self, cmax, timeout):\n\n\t\tif timeout is None:\n\t\t\ttimeout = self.read_timeout\n\t\tbytes_buf = []\n\n\t\tcount = 0\n\t\ttime_now = time.time()\n\t\twhile timeout == 0 or (time.time() - time_now) < timeout:\n\t\t\tread_byte = self.port.read()\n\t\t\tif read_byte is None or read_byte == b\"\":\n\t\t\t\tcontinue\n\n\n\t\t\tread_char = read_byte[0]\n\n\t\t\tif read_char == 0xff and not bytes_buf:\n\t\t\t\tcontinue\n\n\t\t\tif read_char != 0x00:\n\t\t\t\tself.log.debug(\"Rx: %02x, %s, %s\", read_char, len(bytes_buf), count)\n\n\t\t\t\tbytes_buf.append(read_char)\n\t\t\t\tif len(bytes_buf) == cmax:\n\t\t\t\t\treturn bytes_buf\n\t\t\t\tif read_char == 0xff:\n\t\t\t\t\tcount = count + 1\n\t\t\t\t\tif count == 3:\n\t\t\t\t\t\tif self.debug is True:\n\t\t\t\t\t\t\tprint(\"Complete. Returning\")\n\t\t\t\t\t\treturn bytes_buf\n\t\t\t\telse:\n\t\t\t\t\tcount = 0\n\t\treturn bytes_buf\n\n\tdef set_baud(self, baud, save=False):\n\n\t\tself.log.info(\"Setting display baudrate to %s\", baud)\n\t\tif baud not in self.BAUD_VALUES:\n\t\t\traise ValueError(\"Baud rate not supported: %s\" % baud)\n\n\t\tself.set('baud' + ('s' if save else ''), baud)\n\n# Logging stub for micropython compabability\nclass LogStub(object):\n\tdef __init__(self, log_path):\n\t\tself.log_path = log_path\n\n\tdef debug(self, fmt_str, *args):\n\t\tprint(\"debug(\" + self.log_path + \"): \" + fmt_str % args)\n\tdef info(self, fmt_str, *args):\n\t\tprint(\"info(\" + self.log_path + \"): \" + fmt_str % args)\n\tdef warning(self, fmt_str, *args):\n\t\tprint(\"warning(\" + self.log_path + \"): \" + fmt_str % args)\n\tdef error(self, fmt_str, *args):\n\t\tprint(\"error(\" + self.log_path + \"): \" + fmt_str % args)\n\tdef critical(self, fmt_str, *args):\n\t\tprint(\"critical(\" + self.log_path + \"): \" + fmt_str % args)\n\nclass UpyNextion(Nextion):\n\n\tBAUD_VALUES = [\n\t\t\t115200,\n\t\t\t# 2400,\n\t\t\t# 4800,\n\t\t\t9600,\n\t\t\t# 19200,\n\t\t\t# 38400,\n\t\t\t# 57600,\n\t\t\t115200,\n\t\t]\n\tdef __init__(self, device_no, tx_pin=None, rx_pin=None, timeout=None, *args, **kwargs):\n\n\t\tprint(\"Creating logger\")\n\t\tself.log = LogStub(\"Main.Nex.Protocol\")\n\n\n\n\t\tif timeout is None:\n\t\t\tself.read_timeout = 0.1 # in milliseconds\n\t\telse:\n\t\t\tself.read_timeout = timeout\n\n\t\tassert self.read_timeout is not None\n\n\t\tself.init_args = {\n\t\t\t\t\"baudrate\" : 9600,\n\t\t\t\t\"bits\" : 8,\n\t\t\t\t\"parity\" : None,\n\t\t\t\t\"stop\" : 1,\n\n\t\t\t\t# UART takes timeout in milliseconds\n\t\t\t\t\"timeout\" : int(self.read_timeout * 1000),\n\t\t\t\t\"timeout_char\" : int(self.read_timeout * 1000),\n\n\t\t}\n\n\t\tif tx_pin:\n\t\t\tself.init_args[\"tx\"] = tx_pin\n\t\telse:\n\t\t\tself.init_args[\"tx\"] = 25\n\n\t\tif rx_pin:\n\t\t\tself.init_args[\"rx\"] = rx_pin\n\t\telse:\n\t\t\tself.init_args[\"rx\"] = 26\n\n\t\tprint(\"Creating uart\")\n\n\t\tself.uart = machine.UART(\n\t\t\t\t\tdevice_no,\n\t\t\t\t\t9600,\n\t\t\t\t\ttx = self.init_args[\"tx\"],\n\t\t\t\t\trx = self.init_args[\"rx\"],\n\t\t\t)\n\n\t\tprint(\"Configuring\")\n\n\t\tprint(\"uart init params:\", self.init_args)\n\n\t\tself.uart.init(**self.init_args)\n\n\t\tself.probe_set_baud()\n\n\t\tprint(\"Super call:\", args, kwargs)\n\t\tsuper().__init__(*args, **kwargs)\n\n\tdef _autobaud(self):\n\t\tfor baudrate in self.BAUD_VALUES:\n\t\t\tself.log.info(\"Autobaud: Probing with baudrate: %s\", baudrate)\n\t\t\tself.init_args['baudrate'] = baudrate\n\t\t\tself.uart.init(**self.init_args)\n\t\t\tfor __ in range(2):\n\t\t\t\ttry:\n\t\t\t\t\t# self.uart.flush()\n\t\t\t\t\t# Flush hack replacement.\n\t\t\t\t\ttime.sleep(0.5)\n\t\t\t\t\tself.set_cmd_response_mode(3)\n\t\t\t\t\tself.log.info(\"Autobaud: Connected to display with baudrate %s\", baudrate)\n\t\t\t\t\treturn\n\n\t\t\t\texcept Exception as err:\n\t\t\t\t\t# uPython traceback output\n\t\t\t\t\tsys.print_exception(err)\n\n\t\traise RuntimeError(\"Failed to establish communication with display!\")\n\n\tdef probe_set_baud(self):\n\t\tself._autobaud()\n\t\tself.set_baud(115200, save=True)\n\t\tself._autobaud()\n\n\n\tdef _write_internal(self, message):\n\t\tself.uart.write(message)\n\n\tdef _read_internal(self, cmax, timeout):\n\n\t\tif timeout is None:\n\t\t\ttimeout = self.read_timeout\n\t\tbytes_buf = []\n\n\t\tcount = 0\n\t\ttime_now = time.time()\n\t\twhile timeout == 0 or (time.time() - time_now) < timeout:\n\t\t\tread_byte = self.uart.read(1)\n\t\t\tif read_byte is None or read_byte == b\"\":\n\t\t\t\tcontinue\n\n\n\t\t\tread_char = read_byte[0]\n\n\t\t\tif read_char == 0xff and not bytes_buf:\n\t\t\t\tcontinue\n\n\t\t\tif read_char != 0x00:\n\t\t\t\t# self.log.debug(\"Rx: %02x, %s, %s\", read_char, len(bytes_buf), count)\n\n\t\t\t\tbytes_buf.append(read_char)\n\t\t\t\tif len(bytes_buf) == cmax:\n\t\t\t\t\t# self.log.debug(\"Reached cmax (%s) -> '%s'\", cmax, bytes_buf)\n\t\t\t\t\treturn bytes_buf\n\t\t\t\tif read_char == 0xff:\n\t\t\t\t\tcount = count + 1\n\t\t\t\t\tif count == 3:\n\t\t\t\t\t\treturn bytes_buf\n\t\t\t\telse:\n\t\t\t\t\tcount = 0\n\n\t\tprint(\"Read internal returning after reading: '%s'\" % bytes_buf)\n\t\treturn bytes_buf\n\n\tdef set_baud(self, baud, save=False):\n\n\t\tself.log.info(\"Setting display baudrate to %s\", baud)\n\t\tif baud not in self.BAUD_VALUES:\n\t\t\traise ValueError(\"Baud rate not supported: %s\" % baud)\n\n\t\tself.set('baud' + ('s' if save else ''), baud)\n\n\nif __name__ == \"__main__\":\n\t#ser=serial.Serial('/dev/ttyMCC',9600,timeout=0)\n\tport = serial.Serial('/dev/tty.SLAB_USBtoUART', 9600, timeout=0)\n\tport.flushOutput()\n\tnextion = Nextion(port)\n\tprint('Serial connected')\n\t#nextion.set_dim(50)\n\t#for p in range(0,3):\n\t# nextion.set_page(p)\n\t# nextion.set_text('t0',\"Fede<3\")\n\t# print(nextion.get_page())\n\t#nextion.set_dim(100)\n\n\tpageBoatSpeed = nextion.page_reference(0)\n\tpageHeading = nextion.page_reference(1)\n\tpageWindSpeed = nextion.page_reference(2)\n\n\ttxtBoatSpeedValue = pageBoatSpeed.hook_text(\"t1\")\n\ttxtBoatSpeedAttr = pageBoatSpeed.hook_text(\"t2\")\n\ttxtBoatSpeedUnit = pageBoatSpeed.hook_text(\"t3\")\n\n\ttxtHeadingValue = pageBoatSpeed.hook_text(\"t1\")\n\ttxtHeadingTrueMag = pageBoatSpeed.hook_text(\"t3\")\n\n\ttxtWindSpeedValue = pageWindSpeed.hook_text(\"t1\")\n\ttxtWindSpeedTrueApp = pageWindSpeed.hook_text(\"t2\")\n\ttxtWindSpeedUnit = pageWindSpeed.hook_text(\"t3\")\n\n\tpageBoatSpeed.show()\n\ttxtBoatSpeedValue.set(2.2)\n\tpageHeading.show()\n\ttxtHeadingValue.set(300)\n\tpageWindSpeed.show()\n\ttxtWindSpeedValue.set(3.75)\n\n\tnextion.clear(Nextion.RED)\n\tnextion.drawBox(0, 0, 100, 100, Nextion.BLUE)\n\tnextion.drawRectangle(0, 0, 100, 100, Nextion.YELLOW)\n\tnextion.drawLine(0, 0, 100, 100, Nextion.GREEN)\n\tnextion.drawCircle(100, 100, 50, Nextion.BROWN)\n\t#nextion.drawString(0,0,400,200,2,4096,3072,1,1,1,\"*\")\n\n\tpageBoatSpeed.show()\n\ttxtBoatSpeedValue.set(\"Fede<3\")\n\n","sub_path":"pynextion/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":14520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"183224143","text":"game_summary = []\n\nrounds_lost = 0\nrounds_won = 0\nrounds_drawn = 0\nrounds_played = 5\n\nfor item in range(1, 6):\n result = input(\"choose result: \")\n\n outcome = \"Round {}: {}\".format(item, result)\n\n if result == \"lost\":\n rounds_lost += 1\n elif result == \"won\":\n rounds_won += 1\n elif result == \"draw\":\n rounds_drawn += 1\n\n game_summary.append(outcome)\n\n# Calculate Game Stats\npercent_win = rounds_won / rounds_played * 100\npercent_lose = rounds_lost / rounds_played * 100\npercent_drawn = rounds_drawn / rounds_played * 100\n\nprint()\nprint(\"***** Game History *****\")\nfor game in game_summary:\n print(game)\n\nprint()\n\n# display game stats with % values to the nearest whole number\nprint(\"***** Game Statistics *****\")\nprint(\"Win: {}, ({:.0f}%)\\nLoss {}, ({:.0f}%)\\nDraw {}, ({:.0f}%)\"\n .format(rounds_won, percent_win, rounds_lost, percent_lose, rounds_drawn, percent_drawn))","sub_path":"07_game_summary_v1.py","file_name":"07_game_summary_v1.py","file_ext":"py","file_size_in_byte":916,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"640068086","text":"from __future__ import print_function\n# from collections import defaultdict\nfrom datetime import datetime\nimport gc\nimport json\nfrom multiprocessing import Manager, Pipe, Process\nimport sqlite3\nfrom requests import ConnectionError\nfrom sys import argv\nimport signal\nfrom wotconsole import WOTXResponseError, player_tank_statistics, player_data\nfrom wotconsole import vehicle_info\n\ntry:\n range = xrange\nexcept NameError:\n pass\n\n\ndef query(worker_number, parent_pipe, api_key, result_queues, error_queue,\n delay=0.00000001, timeout=10, max_retries=5, debug=False):\n '''\n Pull data from WG's servers. This allows us to retry pages up until a\n certain point\n '''\n signal.signal(signal.SIGINT, signal.SIG_IGN)\n buf = 0\n not_done = True\n while not_done:\n try:\n if not parent_pipe.poll(delay):\n # No work received yet. Wait.\n continue\n received = parent_pipe.recv()\n # If we get a number lower than one, we exit the process\n if not received:\n not_done = False\n break\n if not isinstance(received, int):\n raise TypeError(\n '{}: Bad entry from pipe'.format(worker_number))\n retries = max_retries\n realm = 'xbox' if received < 1000000000 else 'ps4'\n ans = None\n player_queued = False\n queried = False\n while retries:\n try:\n # We can get a query limit error from the server in this\n # loop. To prevent duplicate player retrieval, we'll check\n # if we've already received a response for it and move on\n if not queried:\n # Try Xbox first\n ans = player_data(\n received,\n api_key,\n fields=[\n '-statistics.company',\n '-statistics.frags',\n '-private'],\n timeout=timeout,\n api_realm=realm)\n # Xbox and PS4 account ID ranges do not overlap\n # if not ans[str(received)]:\n # realm = 'ps4'\n # ans = player_data(\n # received,\n # api_key,\n # fields=[\n # '-statistics.company',\n # '-statistics.all.frags',\n # '-statistics.frags',\n # '-private'],\n # timeout=timeout,\n # api_realm=realm)\n queried = True\n # An empty resposne = player was removed or ID not yet made\n if ans.data and ans[str(received)]:\n # Prevent duplicate entries into Queues\n if not player_queued:\n player_queued = True\n p = ans[str(received)]\n p.update(p['statistics']['all'])\n del p['statistics']['all']\n p.update(p['statistics'])\n del p['statistics']\n p['console'] = realm\n p['created_at_raw'] = p['created_at']\n p['last_battle_time_raw'] = p['last_battle_time']\n p['updated_at_raw'] = p['updated_at']\n p['created_at'] = datetime.strftime(\n datetime.utcfromtimestamp(p['created_at']),\n '%Y-%m-%d')\n p['last_battle_time'] = datetime.strftime(\n datetime.utcfromtimestamp(\n p['last_battle_time']),\n '%Y-%m-%d')\n p['updated_at'] = datetime.strftime(\n datetime.utcfromtimestamp(p['updated_at']),\n '%Y-%m-%d')\n for result_queue in result_queues:\n result_queue.put(('player', p))\n tanks = player_tank_statistics(\n received,\n api_key,\n fields=[\n '-company',\n '-frags',\n '-in_garage',\n '-in_garage_updated'],\n timeout=timeout,\n api_realm=realm)\n # Some player IDs return an empty dict\n if str(received) not in tanks.data:\n error_queue.put(\n (received, Exception(\n \"Method 'player_tank_statistics' did not yield any tanks\")))\n elif tanks[str(received)] is None:\n pass\n else:\n for tank in tanks[str(received)]:\n tank.update(tank['all'])\n tank['last_battle_time_raw'] = tank[\n 'last_battle_time']\n tank['last_battle_time'] = datetime.strftime(\n datetime.utcfromtimestamp(\n tank['last_battle_time']),\n '%Y-%m-%d')\n del tank['all']\n for result_queue in result_queues:\n result_queue.put(('tank', tank))\n buf += 1\n if buf >= 1000:\n gc.collect()\n buf = 0\n if debug:\n print(\n 'QT{:2}: Cleared memory'.format(worker_number))\n parent_pipe.send(received)\n break\n # Patch: until WOTXResponseError is updated, exceeding the API\n # request limit is not properly handled by the library\n except (TypeError, ConnectionError) as ce:\n # print('Error for page {}'.format(page_no))\n # print(ce)\n if 'Max retries exceeded with url' in str(ce):\n retries -= 1\n else:\n parent_pipe.send('{} (Error)'.format(received))\n error_queue.put((received, ce))\n break\n except WOTXResponseError as wg:\n if 'REQUEST_LIMIT_EXCEEDED' in wg.message:\n retries -= 1\n else:\n parent_pipe.send('{} (Error)'.format(received))\n error_queue.put((received, wg))\n break\n if not retries:\n parent_pipe.send('{} (Retry limit exceeded'.format(received))\n error_queue.put((received, Exception('Retry limit exceeded')))\n # Just in case!\n del ans\n received = None\n except Exception as e:\n print('{}: Unknown error: {}'.format(ps, e))\n try:\n error_queue.put((received, e))\n parent_pipe.send('Unknown (Error)')\n except:\n pass\n print('QT{:2}: Exiting'.format(worker_number))\n\n\ndef update_stats_database(data_queue, conn, tanks, outfile, error_queue,\n delay=0.0000000001, debug=False):\n with sqlite3.connect(outfile) as db:\n signal.signal(signal.SIGINT, signal.SIG_IGN)\n buf = 0\n c = db.cursor()\n # setup database\n c.execute(\"\"\"Create table if not exists tanks(\n name TEXT,\n short_name TEXT,\n tank_id INTEGER PRIMARY KEY,\n tier INTEGER,\n is_premium INTEGER,\n type TEXT,\n nation TEXT,\n price_gold INTEGER)\"\"\")\n for _, t in tanks.iteritems():\n c.execute('insert or ignore into tanks values (?,?,?,?,?,?,?,?)',\n tuple(t[k] for k in (\n 'name',\n 'short_name',\n 'tank_id',\n 'tier',\n 'is_premium',\n 'type',\n 'nation',\n 'price_gold')))\n db.commit()\n c.execute(\"\"\"Create table if not exists players(\n account_id INTEGER PRIMARY KEY,\n battles INTEGER,\n capture_points INTEGER,\n console TEXT,\n created_at TEXT,\n created_at_raw INTEGER,\n damage_assisted_radio INTEGER,\n damage_assisted_track INTEGER,\n damage_dealt INTEGER,\n damage_received INTEGER,\n direct_hits_received INTEGER,\n dropped_capture_points INTEGER,\n explosion_hits INTEGER,\n explosion_hits_received INTEGER,\n frags INTEGER,\n global_rating INTEGER,\n hits INTEGER,\n last_battle_time TEXT,\n last_battle_time_raw INTEGER,\n losses INTEGER,\n max_damage INTEGER,\n max_damage_tank_id INTEGER,\n max_frags INTEGER,\n max_frags_tank_id INTEGER,\n max_xp INTEGER,\n max_xp_tank_id INTEGER,\n nickname TEXT,\n no_damage_direct_hits_received INTEGER,\n piercings INTEGER,\n piercings_received INTEGER,\n shots INTEGER,\n spotted INTEGER,\n survived_battles INTEGER,\n trees_cut INTEGER,\n updated_at TEXT,\n updated_at_raw INTEGER,\n wins INTEGER,\n xp INTEGER)\"\"\")\n # faster than sorting keys, even if it adds a lot of lines\n player_keys = (\n 'account_id',\n 'battles',\n 'capture_points',\n 'console',\n 'created_at',\n 'created_at_raw',\n 'damage_assisted_radio',\n 'damage_assisted_track',\n 'damage_dealt',\n 'damage_received',\n 'direct_hits_received',\n 'dropped_capture_points',\n 'explosion_hits',\n 'explosion_hits_received',\n 'frags',\n 'global_rating',\n 'hits',\n 'last_battle_time',\n 'last_battle_time_raw',\n 'losses',\n 'max_damage',\n 'max_damage_tank_id',\n 'max_frags',\n 'max_frags_tank_id',\n 'max_xp',\n 'max_xp_tank_id',\n 'nickname',\n 'no_damage_direct_hits_received',\n 'piercings',\n 'piercings_received',\n 'shots',\n 'spotted',\n 'survived_battles',\n 'trees_cut',\n 'updated_at',\n 'updated_at_raw',\n 'wins',\n 'xp'\n )\n c.execute(\"\"\"Create table if not exists stats(\n account_id INTEGER,\n battle_life_time INTEGER,\n battles INTEGER,\n capture_points INTEGER,\n damage_assisted_radio INTEGER,\n damage_assisted_track INTEGER,\n damage_dealt INTEGER,\n damage_received INTEGER,\n direct_hits_received INTEGER,\n dropped_capture_points INTEGER,\n explosion_hits INTEGER,\n explosion_hits_received INTEGER,\n frags INTEGER,\n hits INTEGER,\n last_battle_time TEXT,\n last_battle_time_raw INTEGER,\n losses INTEGER,\n mark_of_mastery INTEGER,\n max_frags INTEGER,\n max_xp INTEGER,\n no_damage_direct_hits_received INTEGER,\n piercings INTEGER,\n piercings_received INTEGER,\n shots INTEGER,\n spotted INTEGER,\n survived_battles INTEGER,\n tank_id INTEGER,\n trees_cut INTEGER,\n wins INTEGER,\n xp INTEGER)\"\"\")\n tank_keys = (\n 'account_id',\n 'battle_life_time',\n 'battles',\n 'capture_points',\n 'damage_assisted_radio',\n 'damage_assisted_track',\n 'damage_dealt',\n 'damage_received',\n 'direct_hits_received',\n 'dropped_capture_points',\n 'explosion_hits',\n 'explosion_hits_received',\n 'frags',\n 'hits',\n 'last_battle_time',\n 'last_battle_time_raw',\n 'losses',\n 'mark_of_mastery',\n 'max_frags',\n 'max_xp',\n 'no_damage_direct_hits_received',\n 'piercings',\n 'piercings_received',\n 'shots',\n 'spotted',\n 'survived_battles',\n 'tank_id',\n 'trees_cut',\n 'wins',\n 'xp'\n )\n try:\n while not conn.poll(delay):\n try:\n while not data_queue.empty():\n classification, data = data_queue.get()\n if classification == 'tank':\n c.execute(\n 'insert into stats values (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)',\n tuple(data[t] for t in tank_keys))\n elif classification == 'player':\n try:\n c.execute(\n 'insert into players values (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)',\n tuple(data[p] for p in player_keys))\n except sqlite3.IntegrityError:\n pass\n else:\n print('SQL : Uh, got some bad data?')\n buf += 1\n # if debug:\n # print('SQL: Got data for', data['account_id'])\n if buf >= 2500:\n db.commit()\n gc.collect()\n buf = 0\n if debug:\n print('SQL : Cleared buffer') # and memory')\n except (ValueError, sqlite3.OperationalError) as oe:\n error_queue.put(\n ('(sql) {}'.format(data['account_id']), oe))\n if debug:\n print('SQL : Parent signal received. Clearing queue')\n while not data_queue.empty():\n try:\n classification, data = data_queue.get()\n if classification == 'tank':\n c.execute(\n 'insert into stats values (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)',\n tuple(data[t] for t in tank_keys))\n elif classification == 'player':\n try:\n c.execute(\n 'insert into players values (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)',\n tuple(data[p] for p in player_keys))\n except sqlite3.IntegrityError:\n pass\n else:\n print('SQL : Uh, got some bad data?')\n buf += 1\n # if debug:\n # print('SQL: Got data for', data['account_id'])\n if buf >= 2500:\n db.commit()\n gc.collect()\n buf = 0\n if debug:\n print('SQL : Cleared buffer') # and memory')\n except (ValueError, sqlite3.OperationalError):\n error_queue.put(\n ('(sql) {}'.format(data['account_id']), oe))\n\n except (IOError, EOFError):\n print('SQL : Did the parent terminate?')\n except Exception as e:\n print('SQL :', e)\n error_queue.put(('sql', e))\n finally:\n db.commit()\n print('SQL : Exiting')\n\n\ndef update_csv(data_queue, conn, tanks, stats_filename,\n player_filename, tankinfo_filename, error_queue,\n delay=0.0000000001, debug=False):\n buf = 0\n signal.signal(signal.SIGINT, signal.SIG_IGN)\n stats_csv = open(stats_filename, 'a')\n player_csv = open(player_filename, 'a')\n with open(tankinfo_filename, 'w') as tankinfo_csv:\n tankinfo_csv.write(\n 'name,short_name,tank_id,tier,is_premium,type,nation,price_gold\\n')\n for _, t in tanks.iteritems():\n tankinfo_csv.write(','.join(\n map(\n lambda x: str(x) if not isinstance(\n x, unicode) else x.encode('utf-8'),\n tuple(t[k] for k in (\n 'name',\n 'short_name',\n 'tank_id',\n 'tier',\n 'is_premium',\n 'type',\n 'nation',\n 'price_gold')\n )\n )) + '\\n')\n player_keys = (\n 'account_id',\n 'battles',\n 'capture_points',\n 'console',\n 'created_at',\n 'created_at_raw',\n 'damage_assisted_radio',\n 'damage_assisted_track',\n 'damage_dealt',\n 'damage_received',\n 'direct_hits_received',\n 'dropped_capture_points',\n 'explosion_hits',\n 'explosion_hits_received',\n 'frags',\n 'global_rating',\n 'hits',\n 'last_battle_time',\n 'last_battle_time_raw',\n 'losses',\n 'max_damage',\n 'max_damage_tank_id',\n 'max_frags',\n 'max_frags_tank_id',\n 'max_xp',\n 'max_xp_tank_id',\n 'nickname',\n 'no_damage_direct_hits_received',\n 'piercings',\n 'piercings_received',\n 'shots',\n 'spotted',\n 'survived_battles',\n 'trees_cut',\n 'updated_at',\n 'updated_at_raw',\n 'wins',\n 'xp'\n )\n player_csv.write(','.join(player_keys) + '\\n')\n tank_keys = (\n 'account_id',\n 'battle_life_time',\n 'battles',\n 'capture_points',\n 'damage_assisted_radio',\n 'damage_assisted_track',\n 'damage_dealt',\n 'damage_received',\n 'direct_hits_received',\n 'dropped_capture_points',\n 'explosion_hits',\n 'explosion_hits_received',\n 'frags',\n 'hits',\n 'last_battle_time',\n 'last_battle_time_raw',\n 'losses',\n 'mark_of_mastery',\n 'max_frags',\n 'max_xp',\n 'no_damage_direct_hits_received',\n 'piercings',\n 'piercings_received',\n 'shots',\n 'spotted',\n 'survived_battles',\n 'tank_id',\n 'trees_cut',\n 'wins',\n 'xp'\n )\n stats_csv.write(','.join(tank_keys) + '\\n')\n try:\n while not conn.poll(delay):\n while not data_queue.empty():\n classification, data = data_queue.get()\n if classification == 'tank':\n stats_csv.write(','.join(\n tuple(unicode(data[t]) for t in tank_keys)) + '\\n')\n elif classification == 'player':\n player_csv.write(','.join(\n tuple(unicode(data[p]) for p in player_keys)) + '\\n')\n else:\n print('CSV : Uh, got some bad data?')\n # if debug:\n # print('CSV: Got data for', data['account_id'])\n buf += 1\n if buf >= 2500:\n gc.collect()\n buf = 0\n if debug:\n print('CSV : Cleared memory')\n if debug:\n print('CSV : Parent signal received. Clearing queue')\n while not data_queue.empty():\n classification, data = data_queue.get()\n if classification == 'tank':\n stats_csv.write(','.join(\n tuple(unicode(data[t]) for t in tank_keys)) + '\\n')\n elif classification == 'player':\n player_csv.write(','.join(\n tuple(unicode(data[p]) for p in player_keys)) + '\\n')\n else:\n print('CSV : Uh, got some bad data?')\n buf += 1\n if buf >= 2500:\n gc.collect()\n buf = 0\n if debug:\n print('CSV : Cleared memory')\n # if debug:\n # print('CSV: Got data for', data['account_id'])\n\n except (IOError, EOFError):\n print('CSV: Did the parent terminate?')\n except Exception as e:\n error_queue.put(('csv', e))\n finally:\n player_csv.close()\n stats_csv.close()\n print('CSV : Exiting')\n\n\ndef error_logger(exception_queue, outfile, conn,\n delay=0.0000000001, debug=False):\n try:\n signal.signal(signal.SIGINT, signal.SIG_IGN)\n buf = 0\n with open(outfile, 'w') as logfile:\n not_done = True\n while not_done:\n if conn.poll(delay):\n not_done = False\n while not exception_queue.empty():\n pid, e = exception_queue.get()\n logfile.write(str(pid) + ': ' + str(e) + '\\n')\n logfile.flush()\n buf += 1\n if buf >= 2500:\n gc.collect()\n buf = 0\n if debug:\n print('Log : Cleared memory')\n except (IOError, EOFError):\n print('Log : Did the parent terminate?')\n except Exception as e:\n print('Log :', e)\n print('Log : Exiting')\n\n\ndef generate_players(sqldb, start, finish):\n '''\n Create the list of players to query for. If the SQLite database already has\n a list, we'll use it; otherwise we'll generate a range.\n\n :param sqldb: File name of SQLite database\n :param int start: Starting account ID number\n :param int finish: Ending account ID number\n '''\n with sqlite3.connect(sqldb) as db:\n try:\n return filter(lambda x: start <= x <= finish,\n map(lambda p: p[0],\n db.execute('select account_id from players').fetchall()))\n except (ValueError, sqlite3.OperationalError):\n return range(start, finish)\n\n# 1 Worker to export to CSV\n# 1 Worker to export errors to file\n# 1 Worker to handle database\n# N-number of workers to handle requests to API\n\nif __name__ == '__main__':\n with open(argv[1]) as f:\n config = json.load(f)\n\n manager = Manager()\n\n process_count = 4 if 'pool size' not in config else config['pool size']\n start_account = 1 if 'start account' not in config else config[\n 'start account']\n max_account = 13000000 if 'max account' not in config else config[\n 'max account']\n max_retries = 5 if 'max retries' not in config else config[\n 'max retries']\n timeout = 15 if 'timeout' not in config else config['timeout']\n delay = 0.0000000001 if 'delay' not in config else config['delay']\n join_wait = 1800 if 'join wait' not in config else config['join wait']\n debug = False if 'debug' not in config else config['debug']\n if 'database' not in config:\n config['database'] = None\n\n player_ids = generate_players(\n config['database'],\n start_account,\n max_account)\n\n handlers = []\n handler_conns = []\n queues = []\n\n vehicles = vehicle_info(\n config['application_id'],\n fields=[\n 'name',\n 'price_gold',\n 'short_name',\n 'is_premium',\n 'tier',\n 'type',\n 'tank_id',\n 'nation']).data\n\n if 'error log' in config:\n error_handler_conn, error_child_conn = Pipe()\n error_queue = manager.Queue()\n error_handler = Process(\n name='Error Handler',\n target=error_logger,\n args=(\n error_queue,\n config['error log'],\n error_child_conn,\n delay,\n debug))\n handlers.append(error_handler)\n handler_conns.append(error_handler_conn)\n\n if 'sql' in config['output']:\n sql_handler_conn, sql_child_conn = Pipe()\n sql_queue = manager.Queue()\n sql_handler = Process(\n name='Stats DB Handler',\n target=update_stats_database,\n args=(\n sql_queue,\n sql_child_conn,\n # config['input'],\n vehicles,\n config['output']['sql'],\n error_queue,\n delay,\n debug))\n handlers.append(sql_handler)\n handler_conns.append(sql_handler_conn)\n queues.append(sql_queue)\n\n if 'csv' in config['output']:\n csv_handler_conn, csv_child_conn = Pipe()\n csv_queue = manager.Queue()\n csv_handler = Process(\n name='Player DB Handler',\n target=update_csv,\n args=(\n csv_queue,\n csv_child_conn,\n # config['input'],\n vehicles,\n config['output']['csv'],\n config['output']['players'],\n config['output']['tanks'],\n error_queue,\n delay,\n debug))\n handlers.append(csv_handler)\n handler_conns.append(csv_handler_conn)\n queues.append(csv_queue)\n\n pipes = []\n processes = []\n waiting = []\n for ps in range(0, process_count):\n parent_conn, child_conn = Pipe()\n processes.append(\n Process(\n target=query,\n args=(\n ps + 1,\n child_conn,\n config['application_id'],\n queues,\n error_queue,\n delay,\n timeout,\n max_retries,\n debug)))\n pipes.append(parent_conn)\n waiting.append(True)\n\n try:\n # pool_handler_conn, pool_child_conn = Pipe()\n if debug:\n print('Starting data handlers')\n for handler in handlers:\n handler.start()\n for p in processes:\n p.start()\n not_done = True\n player_iter = iter(player_ids)\n if debug:\n print('Main: Adding work to pool')\n buf = 0\n while not_done:\n for n, process in enumerate(processes):\n if pipes[n].poll(delay):\n received = pipes[n].recv()\n if debug and received:\n print(\n 'Main: Worker {:2} got account {}'.format(\n n + 1, received))\n waiting[n] = True\n if waiting[n]:\n try:\n pipes[n].send(player_iter.next())\n waiting[n] = False\n except StopIteration:\n if debug:\n print('Main: No more player IDs to process')\n not_done = False\n break\n buf += 1\n if buf >= 5000000:\n gc.collect()\n buf = 0\n if debug:\n print('Main: Cleared memory')\n except (KeyboardInterrupt, SystemExit):\n print('Main: Attempting to prematurely terminate processes')\n finally:\n for n, p in enumerate(processes):\n pipes[n].send(0)\n p.join()\n # player_queue.close()\n for conn in handler_conns:\n conn.send(-1)\n # sql_handler_conn.send(-1)\n # csv_handler_conn.send(-1)\n if debug:\n print('Main: Sending signal to queue handler(s)')\n for handler in handlers:\n handler.join(join_wait)\n # sql_handler.join(join_wait)\n # csv_handler.join(join_wait)\n","sub_path":"src/getAllFinal.py","file_name":"getAllFinal.py","file_ext":"py","file_size_in_byte":29046,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"120763468","text":"#Given a string, find the length of the longest substring without repeating characters.\n#Example:\n#Input: \"abcbacbb\"\n#Output: 3\n\n#Solution: Take the string, split into a list of chars\n#Place first two chars into a new list.\n#Compare each char to each other char in the new list.\n#If no repeats, set longestSubstring to length of the list, add another char and try again.\n#If there is a repeat, remove first char from the list and check again.\n\nclass Solution:\n def lengthOfLongestSubstring(s: str) -> int:\n longestSubstring = 1\n sequence = list(s)\n substring = list()\n substring.append(sequence[0])\n substring.append(sequence[1])\n highestIndex = 1\n while highestIndex < len(sequence)-1:\n noRepeats = True\n for x in range(0, len(substring)):\n for y in range(x+1, len(substring)):\n if substring[x] == substring[y]:\n substring.pop(0)\n noRepeats = False\n if noRepeats:\n longestSubstring = len(substring)\n highestIndex += 1\n substring.append(sequence[highestIndex])\n return longestSubstring\n\nstring = \"The quick brown fox jumped over the lazy dog\"\nlength = Solution.lengthOfLongestSubstring(string)\nprint(length)","sub_path":"src/longestSubstringNoRepeats.py","file_name":"longestSubstringNoRepeats.py","file_ext":"py","file_size_in_byte":1324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"364765614","text":"import os\nimport pydicom\nfrom pydicom.data import get_testdata_files\nimport numpy as np\n\ndef read_files(path):\n\n file = get_testdata_files(path)[0]\n ds = pydicom.dcmread(file)\n return ds\n\nfile1 = read_files('ttfm.dcm')\n#print(file1)\n\nfile2 = read_files('bmode.dcm')\nprint(file2)\n\n\n# For Saving output in text format\n\nnp.savetxt(r'out.txt', file1, fmt='%s')\n\nwith open('out.txt','ab') as f:\n np.savetxt(f, file2, delimiter=\",\", fmt='%s')\n\n\n","sub_path":"Script.py","file_name":"Script.py","file_ext":"py","file_size_in_byte":451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"338876241","text":"fname = input('Enter the file name: ')\n\ntry:\n fhand = open(fname)\nexcept:\n print ('File cannot be opened: %s'.format(fname))\n exit()\n\ncount = 0\nfor line in fhand:\n if line.startswith('From'):\n count += 1\n\nprint ('There were {} subject lines'.format(count))\n\n\n\n\n","sub_path":"fname.py","file_name":"fname.py","file_ext":"py","file_size_in_byte":280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"51829447","text":"import psycopg2\nfrom flask import Flask, g, request\nfrom psycopg2.extras import DictCursor\n\nfrom accounting.core.entities import Material, PetrolStation\nfrom accounting.core.utils import json_response\nfrom accounting.service.service import AccountingService\n\napp = Flask(__name__)\naccounting_service: AccountingService = None\n\n\n@app.before_first_request\ndef before_first_request():\n \"\"\"\n Initializes everything before the first request\n Works similar to post-construct phase in Java\n \"\"\"\n g.connection = psycopg2.connect(app.config['DATABASE_NAME'])\n g.cur = g.connection.cursor(cursor_factory=psycopg2.extras.DictCursor)\n\n global accounting_service\n accounting_service = AccountingService(g.connection, g.cur)\n\n\n@app.route('/material', methods=['POST'])\ndef create_materials():\n # Extract incoming params from request\n params = request.get_json()\n\n materials = [Material(param) for param in params]\n accounting_service.create_materials(materials)\n\n return json_response()\n\n\n@app.route('/material/', methods=['GET'])\ndef get_material(id):\n material = accounting_service.get_material(id)\n return json_response(str(material))\n\n\n@app.route('/material', methods=['GET'])\ndef get_materials():\n materials = accounting_service.get_materials()\n return json_response(str(materials))\n\n\n@app.route('/material/', methods=['DELETE'])\ndef remove_material(id):\n accounting_service.remove_material(id)\n return json_response()\n\n\n@app.route('/petrolstation', methods=['POST'])\ndef create_petrol_station():\n # Extract incoming params from request\n params = request.get_json()\n\n petrol_stations = [PetrolStation(param) for param in params]\n accounting_service.create_petrol_stations(petrol_stations)\n\n return json_response()\n\n\n@app.route('/petrolstation/', methods=['DELETE'])\ndef remove_petrol_station(id):\n accounting_service.remove_petrol_station(id)\n return json_response()\n\n\n@app.route('/petrolstation/', methods=['GET'])\ndef get_petrol_station(id):\n petrol_station = accounting_service.get_petrol_station(id)\n return json_response(str(petrol_station))\n\n\n@app.route('/petrolstation', methods=['GET'])\ndef get_petrol_stations():\n petrol_stations = accounting_service.get_petrol_stations()\n return json_response(str(petrol_stations))\n\n\n@app.route('/petrolstation//material', methods=['GET'])\ndef get_materials_on_petrol_station(petrol_station_id):\n materials = accounting_service.get_materials_on_petrol_station(petrol_station_id)\n return json_response(str(materials))\n\n\n@app.route('/petrolstation//material/', methods=['GET'])\ndef get_material_on_petrol_station(petrol_station_id, material_id):\n material = accounting_service.get_material_on_petrol_station(petrol_station_id, material_id)\n return json_response(str(material))\n\n\n@app.route('/petrolstation//material//add/',\n methods=['PUT'])\ndef add_material_to_petrol_station(petrol_station_id, material_id, number):\n accounting_service.add_material_to_petrol_station(petrol_station_id, material_id, number)\n return json_response()\n\n\n@app.route('/petrolstation//material//take/',\n methods=['PUT'])\ndef take_material_from_petrol_station(petrol_station_id, material_id, number):\n accounting_service.take_material_from_petrol_station(petrol_station_id, material_id, number)\n return json_response()\n","sub_path":"accounting/controller/controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":3601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"195721319","text":"from django.urls import path\nfrom rest_framework.authtoken import views\nfrom rest_framework_simplejwt.views import TokenRefreshView\n\nfrom .views import create_user_and_get_token, send_code\n\nurlpatterns = [\n path('api-token-auth/',\n views.obtain_auth_token),\n path('token/',\n create_user_and_get_token,\n name='token_obtain_pair'),\n path('email/',\n send_code),\n path('token/refresh/',\n TokenRefreshView.as_view(),\n name='token_refresh'),\n]\n","sub_path":"yamdb_auth/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"274886045","text":"# https://www.hackerrank.com/challenges/30-conditional-statements\n\n#!/bin/python3\n\nN = int(input().strip())\n\ns = \"Weird\"\n\nif N % 2 == 0 and (2 <= N <= 5 or N > 20):\n s = \"Not \" + s\n\nprint(s)\n","sub_path":"HackerRank/All Domains/Tutorials/30 Days of Code/Day 3: Intro to Conditional Statements.py","file_name":"Day 3: Intro to Conditional Statements.py","file_ext":"py","file_size_in_byte":194,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"612144666","text":"import pandas as pd\nimport numpy as np\nimport logging\n\nlogger = logging.getLogger(__name__)\n\n_STATUS = ['INIT', 'ITER', 'FINAL', 'STOP']\n\nclass Horizon(object):\n\n def __init__(self, tstart='2018-01-01 00:00:00', tend='2019-01-01 00:00:00', repeat_every='12 hours',\n horizon='2 days', time_step='15 minutes', tz=\"Europe/Paris\"):\n \"\"\"\n\n :param tstart:\n :param tend:\n :param repeat_every:\n :param duration:\n :param time_step:\n :param tz:\n \"\"\"\n\n\n self.tz_info = tz\n self.iter = None\n self.tstart = None\n self._current = None\n self._previous = None\n self.tend = None\n self._actual_horizon = None\n self._status = _STATUS[0]\n\n try:\n self.TSTART = pd.Timestamp(tstart).tz_localize(self.tz_info)\n self.TEND = pd.Timestamp(tend).tz_localize(self.tz_info)\n except ValueError as e:\n raise e\n\n self.horizon = pd.Timedelta(horizon)\n self.repeat_every = pd.Timedelta(repeat_every)\n self.time_step = pd.Timedelta(time_step)\n\n assert ((self.TEND - self.TSTART)%self.horizon)%self.time_step == pd.Timedelta('0s'), \\\n \"The last 'horizon' should be divisible by the chosen 'time_step'. \" \\\n \"Otherwise, TEND will not be present in the current horizon.\"\n assert self.horizon % self.time_step == pd.Timedelta('0s'), \\\n \"Parameter 'horizon' should be divisible by the chosen 'time_step'. \" \\\n \"Otherwise, tend will not be present in the current horizon.\"\n assert self.repeat_every % self.time_step == pd.Timedelta('0s'),\\\n \"Parameter 'repeat_every' should be divisible by the chosen 'time_step'. \" \\\n \"Otherwise, synchronization issues will raise for moving horizon.\"\n assert self.repeat_every.total_seconds() <= self.horizon.total_seconds(), \\\n \"duration of the current horizon must be greater than repeat_every.\"\n assert 2 * self.time_step.total_seconds() <= self.horizon.total_seconds(), \\\n \"duration of the current horizon must contain at least 2 timestamps\"\n\n self.reset()\n self.index = np.linspace(0, self.horizon.total_seconds(), num=len(self._current)) # time steps in seconds\n self.map = pd.Series(self._current, index=self.index)\n self.nfe = len(self.index) - 1 # number of finite element for discretisation\n\n @property\n def current(self):\n return self._current\n\n @current.setter\n def current(self, t):\n raise NotImplementedError('NotImplemented, user can not set the current horizon himself.')\n\n @property\n def previous(self):\n return self._previous\n\n @previous.setter\n def previous(self, value):\n raise NotImplementedError('NotImplemented, user can not set the previous horizon himself.')\n\n def next(self):\n\n if self._status == _STATUS[2]:\n self._status = _STATUS[3]\n logger.warning(f'Has no effect : the current horizon stops at horizon.TEND, we cannot go further.'\n f' Changing _status to {_STATUS[3]}')\n return\n\n if self.tend + self.repeat_every < self.TEND: # next horizon is within the time frame [TSTART, TEND]\n self.iter += 1\n self._previous = self._current\n self._current += self.repeat_every\n self.tend += self.repeat_every\n self.tstart += self.repeat_every\n\n self._status = _STATUS[1]\n logger.debug(f'Iteration over the horizon. Changing _status to {_STATUS[1]}')\n\n else : # next horizon should stop at TEND, in this case we should ensure that TEND is in self.current\n self.iter += 1\n self.tstart += self.repeat_every\n self.tend = self.TEND\n self._previous = self._current\n self._current = pd.date_range(start=self.tstart, freq=self.time_step, end=self.tend)\n\n assert self.TEND == self._current[-1], Warning('The last Timestamp of last horizon should be TEND. This is \\\n usually indicative of a modelling error. Try chosing another time_step or TEND.') # should not be necessary\n\n self._status = _STATUS[2]\n logger.info(f'Final horizon has been reached. Changing _status to {_STATUS[2]}')\n\n logger.info(f'Iteration over the horizon. '\n f'Tstart = {self.tstart}, '\n f'Tend={self.tend}. ')\n\n def reset(self):\n self.iter = 0\n self.tstart = pd.Timestamp(self.TSTART)\n self.tend = self.tstart + self.horizon\n self._current = pd.date_range(start=self.tstart, freq=self.time_step, end=self.tend)\n\n assert self.tend == self._current[-1], Warning('The last Timestamp of current horizon should be tend. This is \\\n usually indicative of a modelling error. ') # should not be necessary\n\n self._status = _STATUS[0]\n logger.debug(f'Initialization of the time horizon. TSTART = {self.TSTART}, TEND={self.TEND}.')\n\n logger.info(f'Initialization of the time horizon : '\n f'TSTART = {self.tstart}, '\n f'tend = {self.tend}, '\n f'time step = {self.time_step}, '\n f'repeat every = {self.repeat_every}')\n\n\ndef get_prediction_data(horizon, path ='.csv', usecols=None, tz_data='UTC', fillnan=False, filldict = {}, method='time'):\n\n \"\"\"\n :param Horizon horizon: Time horizon\n :param path: csv data file\n :param tz_data: time zone information ('UTC' or 'Europe/Paris')\n :return:\n\n \"\"\"\n # reading data file (all of it)\n if usecols is not None:\n d1 = pd.read_csv(path, index_col=0, usecols=usecols, parse_dates=True, dayfirst=True)\n else:\n d1 = pd.read_csv(path, index_col=0, parse_dates=True, dayfirst=True)\n\n # Nan values must be filled before interpolating, the user can use fillnan and filldict to do this\n if fillnan:\n try :\n for d in filldict:\n d1[d] = d1[d].fillna(filldict[d])\n except KeyError as e:\n raise e\n\n # convert date time index to the correct time zone\n if d1.index.tzinfo is None :\n d1.index = d1.index.tz_localize(tz_data).tz_convert(horizon.tz_info)\n else:\n d1.index = d1.index.tz_convert(horizon.tz_info)\n\n # be sure that the current horizon is in the data index set\n assert horizon.current[0] >= d1.index[0], \"\"\n assert horizon.current[-1] <= d1.index[-1], \"\"\n\n # Synchronizing / Interpolating data to the current horizon date time index\n\n dh1 = pd.DataFrame([np.NaN]*len(horizon.current), index=horizon.current, columns=['tmp'])\n\n import warnings\n with warnings.catch_warnings():\n # Pandas 0.24.1 emits useless warning when sorting tz-aware index\n warnings.simplefilter(\"ignore\")\n if method=='time':\n a = d1.join(dh1, how='outer').interpolate(method=method)\n if method=='pad':\n a = d1.join(dh1, how='outer').sort_index().fillna(method='pad')\n\n del a['tmp']\n\n # selecting only horizon period\n data_horizon = a.loc[horizon.current]\n\n return data_horizon","sub_path":"PyomoTutorial/microgrid/data/tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":7411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"646894534","text":"##If sales are under $1,000, the user gets a 10% bonus.\n##If sales are $1,000 or over, the bonus is 15%.\n\nsalesTotal = float(input(\"Enter sales: $\"))\nif salesTotal < 1000:\n bonus = salesTotal * 0.1\nelse:\n bonus = salesTotal * 0.15\n\nprint(\"Bonus for sales is: $\", bonus, sep='')","sub_path":"Workshop2/salesBonus.py","file_name":"salesBonus.py","file_ext":"py","file_size_in_byte":283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"208396549","text":"from django.http import HttpResponse\nfrom django.http import HttpResponseNotFound\nfrom django.shortcuts import render\n\nfrom tasks.models import Task\n\n\ndef task_list(request):\n \"\"\"\"\n Recupera todas las tareas de la base de datos y las pimta\n :param request:HttpRequest\n :return HttpResponse\n \"\"\"\n # recuperamos los objs del modelo\n tasks = Task.objects.select_related(\"owner\", \"assigned\")\n\n #devolvemos la respuesta\n context = {\n 'task_objects': tasks\n }\n\n return render(request, 'task/list.html', context)\n\ndef task_detail(request,task_pk):\n \"\"\"\n Recupera una tarea de la BBDD y la pinta con una plantilla\n :param request: HttpRequest\n :param task_pk: Primary key de la tarea a recuperar\n :return: HttpResponse\n \"\"\"\n\n #recuperar la tarea\n #tenemos 2 formas: forma 1=> try y excepts, forma 2: if-else controlando el num de elementos\n try:\n task = Task.objects.get(pk=task_pk)\n except Task.DoesNotExist:\n return HttpResponseNotFound(\"la tarea no existe\")\n except Task.MultipleObjectsReturned:\n return HttpResponse(\"Existen varias tareas con ese id\", status=300)\n\n #preparar el contexto\n context = {\n 'task': task\n }\n\n #renderizar la plantilla\n return render(request, 'task/detail.html', context)","sub_path":"src/tasks/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"64289113","text":"#!/Library/Frameworks/Python.framework/Versions/3.5/bin/python3\n\nimport os\nimport random\nimport re\nimport subprocess\nimport sys\n\ndef replace_several(s, from_strs, to_strs):\n for from_str, to_str in zip(from_strs, to_strs):\n s = s.replace(from_str, to_str)\n return s\n\nscript_path = os.path.abspath(__file__)\nfolder = os.path.join(os.path.split(script_path)[0], 'winamp_output')\nfilenames = os.listdir(folder)\nmp3_filenames = []\nm3u_filename = None\nfor filename in filenames:\n if re.match(r'.+\\.mp3', filename):\n mp3_filenames.append(filename)\n elif re.match(r'.+\\.m3u8?', filename):\n if m3u_filename:\n print('Error: found more than one M3U file in winamp_output.')\n sys.exit(1)\n else:\n m3u_filename = filename\nif not m3u_filename:\n print('Error: couldn\\'t find an M3U file in winamp_output.')\n sys.exit(1)\nwith open(os.path.join(folder, m3u_filename), 'r') as f:\n contents = f.read()\n track_names = re.findall(r'#EXTINF:\\d+,(.+)', contents)\n for i in range(len(track_names)):\n unused = True\n for j in range(i):\n if track_names[i] == track_names[j]:\n unused = False\n break\n if not unused:\n count = 0\n while True:\n new_track_name = '{}-{}'.format(track_names[i], count)\n for j in range(i):\n if new_track_name == track_names[j]:\n break\n else:\n break\n count += 1\n track_names[i] = new_track_name\n extension = input('Enter music file extension: ') or 'spc'\n song_filenames = re.findall(r'.+?winamp_input\\\\(.+)\\.{}'.format(extension), contents)\nwhile True:\n user_input = input('Use filenames (e.g. \"{}\") or track names (e.g. \"{}\")? '.format(\n song_filenames[0], track_names[0]))\n if not user_input or user_input[0] in 'tT':\n song_names = track_names\n break\n elif user_input[0] in 'fF':\n song_names = song_filenames\n break\n elif user_input[0] in 'mM':\n print('Filename | Track name')\n pairs = list(zip(song_filenames, track_names))\n random.shuffle(pairs)\n for song_filename, track_name in pairs[:10]:\n print('{} | {}'.format(song_filename, track_name))\n else:\n print('Invalid response. Please enter \\'filenames\\' or \\'track names\\' to continue, or \\'more\\' to see more examples.')\nfor song_name in song_names:\n print(song_name)\nsong_name_regex = input('Enter song name regex: ') or '(.+)'\nalbum_name = input('Enter album name: ')\nfor mp3_filename in mp3_filenames:\n for index, (track_name, song_name) in enumerate(zip(track_names, song_names)):\n if mp3_filename == replace_several('{}.mp3'.format(track_name), ':/?', '_-.'):\n subprocess.run([\n 'mid3v2',\n '--album=' + album_name,\n '--song=' + re.match(song_name_regex, song_name).group(1),\n '--track=' + str(index + 1),\n os.path.join(folder, mp3_filename)],\n stdout=subprocess.DEVNULL)\n break\n else:\n print('Error: couldn\\'t find an entry for \"{}\" in the M3U file.'.format(mp3_filename))\n sys.exit(1)\n","sub_path":"spc.py","file_name":"spc.py","file_ext":"py","file_size_in_byte":3291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"229948367","text":"#!/usr/bin/env python\n##########################################################################\n# $Id: turbine_consumer_script.py 4480 2013-12-20 23:20:21Z boverhof $\n# Joshua R. Boverhof, LBNL\n# See LICENSE.md for copyright notice!\n#\n# $Author: boverhof $\n# $Date: 2013-12-20 15:20:21 -0800 (Fri, 20 Dec 2013) $\n# $Rev: 4480 $\n#\n###########################################################################\nimport json\nimport optparse\nimport sys\nimport time\nimport dateutil.parser\nimport datetime\nfrom urllib.error import HTTPError\nfrom turbine.commands import add_options, post_page, put_page, \\\n get_page, get_page_by_url, get_paging, _open_config, load_pages_json, \\\n _print_page, _print_numbered_lines, add_json_option, _print_as_json\n\n\nSECTION = \"Consumer\"\n\n\ndef main(args=None, func=_print_numbered_lines):\n \"\"\"List all Consumer resources, by default print in human readable format.\n \"\"\"\n op = optparse.OptionParser(usage=\"USAGE: %prog [options] CONFIG_FILE\",\n description=main.__doc__)\n\n # add_options(op)\n op.add_option(\"-v\", \"--verbose\",\n action=\"store_true\", dest=\"verbose\",\n help=\"verbose output\")\n \"\"\"\n op.add_option(\"-p\", \"--page\", type=\"int\",\n action=\"store\", dest=\"page\", default=0,\n help=\"page number\")\n op.add_option(\"-r\", \"--rpp\", type=\"int\",\n action=\"store\", dest=\"rpp\", default=1000,\n help=\"results per page\")\n \"\"\"\n op.add_option(\"-s\", \"--status\",\n action=\"store\", dest=\"status\",\n help=\"query on status ['up'|'down'|'error']\")\n add_json_option(op)\n\n (options, args) = op.parse_args(args)\n if len(args) != 1:\n op.error('expecting 1 argument')\n\n configFile = _open_config(args[0])\n\n query = {}\n if options.status:\n query['status'] = options.status\n\n # NOTE RESOURCE NOT SUPPORTING PAGING\n #pages = get_paging(configFile, SECTION, options, **query)\n #data = load_pages_json(pages)\n page = get_page(configFile, SECTION, **query)\n data = json.loads(page)\n if options.json:\n func = _print_as_json\n if func:\n func(data)\n return data\n\n\ndef main_get_consumer_by_guid(args=None, func=_print_page):\n \"\"\"Retrieves consumer by GUID\n \"\"\"\n op = optparse.OptionParser(usage=\"USAGE: %prog [options] CONSUMER_GUID CONFIG_FILE\",\n description=main.__doc__)\n\n (options, args) = op.parse_args(args)\n if len(args) != 2:\n op.error('expecting 2 arguments')\n configFile = _open_config(args[1])\n query = dict(subresource='/%s' % args[0])\n page = get_page(configFile, SECTION, **query)\n data = json.loads(page)\n if func:\n func(data)\n return data\n\n\ndef main_log(args=None, func=_print_page):\n \"\"\"Retrieves logging messages from compute resource running the specified\n Consumer. Log messages are printed to screen in order. This functionality\n is not available in all deployments.\n \"\"\"\n op = optparse.OptionParser(usage=\"USAGE: %prog [options] CONFIG_FILE\",\n description=main_log.__doc__)\n\n (options, args) = op.parse_args(args)\n if len(args) != 2:\n op.error('expecting 2 arguments')\n configFile = _open_config(args[1])\n query = dict(subresource='/%s/log' % args[0])\n page = get_page(configFile, SECTION, **query)\n if func:\n func(page)\n return page\n\n\ndef main_get_config(args=None, func=_print_page):\n \"\"\"Return configuration settings for top-level Consumer resource, by default\n print as JSON. These settings are utilized by an orchestrator process\n (deployment specific). The AWS EC2 orchestator handles auto-scaling of instances.\n \"\"\"\n op = optparse.OptionParser(usage=\"USAGE: %prog [options] CONFIG_FILE\",\n description=main_get_config.__doc__)\n\n # add_options(op)\n op.add_option(\"-v\", \"--verbose\",\n action=\"store_true\", dest=\"verbose\",\n help=\"verbose output\")\n\n (options, args) = op.parse_args(args)\n if len(args) != 1:\n op.error('expecting 1 argument')\n\n configFile = _open_config(args[0])\n query = dict(subresource='/config')\n page = get_page(configFile, SECTION, **query)\n if func:\n func(page)\n return page\n\n\ndef main_update_config_floor(args=None, func=_print_page):\n \"\"\"Sets a floor for the number of Consumer processes that should remain running\n for a interval determined by the server. Currently this is only supported in\n the AWS EC2 deployment. By default prints entire resultant configuration in JSON.\n \"\"\"\n op = optparse.OptionParser(usage=\"USAGE: %prog [options] INT CONFIG_FILE\",\n description=main_update_config_floor.__doc__)\n\n # add_options(op)\n op.add_option(\"-v\", \"--verbose\",\n action=\"store_true\", dest=\"verbose\",\n help=\"verbose output\")\n\n (options, args) = op.parse_args(args)\n if len(args) != 2:\n op.error('expecting 2 argument')\n\n configFile = _open_config(args[1])\n query = dict(subresource='/config')\n\n page = put_page(configFile, SECTION, json.dumps(\n dict(floor=int(args[0]))), **query)\n if func:\n func(page)\n return page\n\n\ndef main_update_config_instanceType(args=None, func=_print_page):\n \"\"\"Sets the AWS EC2 instance type for new virtual machines. This feature is only\n relevant for the AWS EC2 deployment. By default prints entire resultant configuration in JSON.\n \"\"\"\n op = optparse.OptionParser(usage=\"USAGE: %prog [options] [t1.micro | m1.small | c1.medium] CONFIG_FILE\",\n description=main_update_config_instanceType.__doc__)\n\n # add_options(op)\n op.add_option(\"-v\", \"--verbose\",\n action=\"store_true\", dest=\"verbose\",\n help=\"verbose output\")\n\n (options, args) = op.parse_args(args)\n if len(args) != 2:\n op.error('expecting 2 argument')\n\n configFile = _open_config(args[1])\n query = dict(subresource='/config')\n page = put_page(configFile, SECTION, json.dumps(\n dict(instance=args[0])), **query)\n if func:\n func(page)\n return page\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"turbine/commands/turbine_consumer_script.py","file_name":"turbine_consumer_script.py","file_ext":"py","file_size_in_byte":6311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"452552908","text":"#!/usr/bin/env python3\n\n\"\"\"defines a single neuron performing binary classification\"\"\"\n\nimport numpy as np\n\n\nclass Neuron:\n \"\"\"class Neuron\"\"\"\n\n def __init__(self, nx):\n \"\"\"nx is the number of input features to the neuron\n Public instance attributes:\n W: The weights vector for the neuron.\n b: The bias for the neuron.\n A: The activated output of the neuron (prediction).\n \"\"\"\n\n if not isinstance(nx, int):\n raise TypeError('nx must be an integer')\n if nx < 1:\n raise ValueError('nx must be a positive integer')\n\n self.__W = np.random.randn(1, nx)\n self.__b = 0\n self.__A = 0\n\n @property\n def W(self):\n \"\"\"getter function to W\"\"\"\n return self.__W\n\n @property\n def b(self):\n \"\"\"getter function to b\"\"\"\n return self.__b\n\n @property\n def A(self):\n \"\"\"getter function to A\"\"\"\n return self.__A\n\n def forward_prop(self, X):\n \"\"\"Calculates the forward propagation of the neuron\n X is a numpy.ndarray with shape (nx, m) that contains the input data\n nx is the number of input features to the neuron\n m is the number of examples\n The neuron should use a sigmoid activation function\n Returns the private attribute __A\"\"\"\n\n W = self.__W\n b = self.__b\n z = np.dot(W, X) + b\n\n sigmoidea = 1 / (1 + np.exp(-1 * z))\n\n self.__A = sigmoidea\n\n return self.__A\n\n def cost(self, Y, A):\n \"\"\"Calculates the cost of the model using logistic regression\n Y is a numpy.ndarray with shape (1, m)\n that contains the correct labels for the input data.\n A is a numpy.ndarray with shape (1, m)\n containing the activated output of the neuron for each example.\n To avoid division by zero errors, use 1.0000001-A instead of 1-A\n Returns the cost\"\"\"\n\n y1 = 1 - Y\n y2 = 1.0000001 - A\n\n m = Y.shape[1]\n\n cost = -1 * (1 / m) * np.sum(Y * np.log(A) + y1 * np.log(y2))\n\n return cost\n\n def evaluate(self, X, Y):\n \"\"\"Evaluates the neuron’s predictions\n X is a numpy.ndarray with shape (nx, m) that contains the input data.\n nx is the number of input features to the neuron.\n m is the number of examples.\n Y is a numpy.ndarray with shape (1, m)\n that contains the correct labels for the input data.\n Returns the neuron’s prediction and the cost of the network\n The prediction should be a numpy.ndarray with shape (1, m)\n containing the predicted labels for each example\n The label values should be 1 if the output of the network is >= 0.5\n and 0 otherwise\"\"\"\n\n A = self.forward_prop(X)\n\n evaluate_predict = np.where(A < 0.5, 0, 1)\n cost = self.cost(Y, A)\n\n return (evaluate_predict, cost)\n","sub_path":"supervised_learning/0x00-binary_classification/4-neuron.py","file_name":"4-neuron.py","file_ext":"py","file_size_in_byte":2897,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"553021144","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Sep 16 17:46:39 2017\n\n@author: e6990\n\"\"\"\n\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport GD\nimport SGD\nfrom matplotlib.colors import ListedColormap\n\ndef decision_regions(X,Y,classifier,step = 0.02):\n X = np.asarray(X);\n Y = np.asarray(Y);\n markers = ('s','x','o','^','v');\n colors = ('red','blue','lightgreen','gray','cyan');\n cmap = ListedColormap(colors[:len(np.unique(Y))]);\n #plot the decision surface\n x1_min,x1_max = X[:,0].min() - 1, X[:,0].max() + 1;\n x2_min,x2_max = X[:,1].min() - 1, X[:,1].max() + 1;\n xx1,xx2 = np.meshgrid(np.arange(x1_min,x1_max,step),np.arange(x2_min,x2_max,step));\n Z = classifier.predict(np.array([xx1.ravel(),xx2.ravel()]).T);\n Z = Z.reshape(xx1.shape);\n plt.contourf(xx1, xx2, Z, alpha = 0.5, cmap = cmap);\n plt.xlim(x1_min,x1_max);\n plt.ylim(x2_min,x2_max);\n \n #plot the class smaple\n for idx,sample in enumerate(np.unique(Y)):\n plt.scatter(X[Y == sample,0],X[Y == sample,1],alpha = 0.8, color = cmap(idx),marker = markers[idx],label = idx);\n \n\ndf = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data',header = None);\ny = df.iloc[0:100,4].values;\ny = np.where(y == 'Iris-setosa',1,-1);\nx = df.iloc[0:100,[0,2]].values;\npositive = np.where(y == 1);\nnegative = np.where(y == -1);\nplt.scatter(x[positive,0], x[positive,1], color = 'red', marker = 'o', label = 'setosa');\nplt.scatter(x[negative,0], x[negative,1], color = 'blue', marker = '*', label = 'versicolor');\nplt.xlabel('petal length');\nplt.ylabel('sepal length');\nplt.legend(loc='upper left');\nplt.show();\nnpp = SGD.GradientDescent(step = 0.01,n_iter = 50);\nnpp.fix(x,y);\ndecision_regions(x,y,classifier = npp);\n'''\nplt.scatter(range(1,len(npp.error)+1),npp.error,color = 'green', marker = 'o');\nplt.xlabel('number of sample');\nplt.ylabel('error');\nplt.show();\n'''\n","sub_path":"GradientDescent/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1926,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"468169821","text":"import pygame\n\npygame.init()\nscreen = pygame.display.set_mode((400, 300))\ndone = False\n\nrect1 = pygame.Rect(30, 30, 60, 60) #New\nrect2 = pygame.Rect(200, 200, 60, 60) #New\n\nis_1_blue = True\nis_2_blue = True\n\nwhile not done:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n done = True\n \n if event.type == pygame.MOUSEBUTTONUP:\n mouse_position = pygame.mouse.get_pos()\n if rect1.collidepoint(mouse_position): #New\n is_1_blue = not is_1_blue #New\n if rect2.collidepoint(mouse_position): #New\n is_2_blue = not is_2_blue #New\n \n if is_1_blue: #New\n color = (0, 128, 255) #New\n else: #New\n color = (255, 100, 0) #New\n pygame.draw.rect(screen, color, rect1) #New\n \n if is_2_blue: #New\n color = (0, 128, 255) #New\n else: #New\n color = (255, 100, 0) #New\n pygame.draw.rect(screen, color, rect2) #New\n \n pygame.display.flip()\n","sub_path":"Lectures/Lecture 12 Files - Pygame/pygame11.py","file_name":"pygame11.py","file_ext":"py","file_size_in_byte":994,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"445359442","text":"import unittest\nimport os\n\n\nfrom simtk.openmm.app import *\nfrom simtk.openmm import *\nfrom simtk.unit import *\nfrom sys import stdout\nimport numpy as np\nimport json\nfrom MC.MC import *\n\n\nclass TestMD(unittest.TestCase):\n def test_md(self):\n with open(os.getcwd()+'/test/test_MC/input/test_MC.json') as file:\n Information = json.load(file)\n \n if os.path.isfile(os.getcwd()+'/test/test_MC/output/Trajectory_MC.pdb'):\n os.remove(os.getcwd()+'/test/test_MC/output/Trajectory_MC.pdb') \n if os.path.isfile(os.getcwd()+'/test/test_MC/output/MC_states.txt'):\n os.remove(os.getcwd()+'/test/test_MC/output/MC_states.txt') \n if os.path.isfile(os.getcwd()+'/test/test_MC/output/'+Information[\"Structure_name\"]+'_intermedi.pdb'):\n os.remove(os.getcwd()+'/test/test_MC/output/'+Information[\"Structure_name\"]+'_intermedi.pdb')\n if os.path.isfile(os.getcwd()+'/test/test_MC/output/'+Information[\"Structure_name\"]+'_intermedi2.pdb'):\n os.remove(os.getcwd()+'/test/test_MC/output/'+Information[\"Structure_name\"]+'_intermedi2.pdb')\n\n pdb_input = os.getcwd()+'/test/test_MC/input/Trajectory_MC.pdb'\n pdb_in = parsePDB(pdb_input)\n a1_in = pdb_in.getNames()\n atoms_in = []\n for a_in in a1_in:\n atoms_in.append(a_in) \n\n\n Metropolis(Information, 'P', 43)\n \n states_out = Information[\"Output_address\"]+'/MC_states.txt'\n pdb_out = Information[\"Output_address\"]+'/Trajectory_MC.pdb'\n \n # Number of states\n with open(states_out) as f:\n states = f.readlines()\n \n # Atoms pdb out\n pdb = parsePDB(pdb_out) \n a1 = pdb.getNames()\n atoms = []\n for a in a1:\n atoms.append(a)\n \n \n assert len(states) == Information[\"N_conf\"]\n np.testing.assert_array_equal(atoms_in, atoms)\n \nif __name__ == '__main__':\n unittest.main()\n \n","sub_path":"test/test_MC/test_MC.py","file_name":"test_MC.py","file_ext":"py","file_size_in_byte":1997,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"8280172","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.distributions import Categorical\n\nfrom collections import OrderedDict\nfrom maml_rl.policies.policy import ConvGRUCell\nimport numpy as np\n\nclass LSTMPolicy(nn.Module):\n \"\"\"\n Baseline LSTM Architecture\n \"\"\"\n def __init__(self, input_size, output_size, lstm_size=256):\n super(LSTMPolicy, self).__init__()\n self.input_size = input_size\n self.output_size = output_size\n self.lstm_size = lstm_size\n\n lstm_input_size = self.input_size + self.output_size + 3\n self.lstm = nn.LSTMCell(lstm_input_size, hidden_size=self.lstm_size)\n self.pi = nn.Linear(self.lstm_size, self.output_size)\n self.v = nn.Linear(self.lstm_size, 1)\n\n def forward(self, x, hx, cx, embed):\n output = torch.cat((x, embed), dim=1)\n h, c = self.lstm(output, (hx, cx))\n return Categorical(logits=self.pi(h)), self.v(h), h, c\n\nclass GRUVaePolicy(nn.Module):\n \"\"\"\n Baseline GRU Architecture\n \"\"\"\n def __init__(self, output_size, encoder_model, lstm_size=256, D=1):\n super(GRUVaePolicy, self).__init__()\n self.output_size = output_size\n self.lstm_size = lstm_size\n self.D = D\n self.encoder = encoder_model\n\n lstm_input_size = 32 + self.output_size + 2\n self.cell_list = [nn.GRUCell(lstm_input_size, hidden_size=self.lstm_size)]\n for d in range(1, self.D):\n self.cell_list.append(nn.GRUCell(self.lstm_size, self.lstm_size))\n self.cell_list = nn.ModuleList(self.cell_list)\n self.pi = nn.Linear(self.lstm_size, self.output_size)\n self.v = nn.Linear(self.lstm_size, 1)\n\n def forward(self, x, hx, embed):\n output = x.permute(0, 3, 1, 2)\n mu, sigma = self.encoder.encode(output)\n output = self.encoder.sample_latent(mu, sigma)\n output = torch.cat((output, embed), dim=1)\n h_out = []\n for d in range(self.D):\n output = self.cell_list[d](output, hx[d])\n h_out.append(output)\n return Categorical(logits=self.pi(output)), self.v(output), torch.stack(h_out)\n\nclass GRUPolicy(nn.Module):\n \"\"\"\n Baseline GRU Architecture\n \"\"\"\n def __init__(self, input_size, output_size, lstm_size=256, D=1, N=1):\n super(GRUPolicy, self).__init__()\n self.input_size = input_size\n self.output_size = output_size\n self.lstm_size = lstm_size\n self.D = D\n self.N = N\n\n self.conv1 = nn.Conv2d(1, 16, kernel_size=3, stride=1)\n self.conv2 = nn.Conv2d(16, 16, kernel_size=3, stride=1)\n self.conv3 = nn.Conv2d(16, 16, kernel_size=3, stride=1)\n self.conv4 = nn.Conv2d(16, 16, kernel_size=3, stride=1)\n lstm_input_shape = (2,2)\n\n self.cell_list = [ConvGRUCell(input_size=lstm_input_shape,\n input_dim=16, hidden_dim=self.lstm_size, kernel_size=3)]\n for d in range(1, self.D):\n self.cell_list.append(ConvGRUCell(input_size=lstm_input_shape,\n input_dim=self.lstm_size, hidden_dim=self.lstm_size, kernel_size=3))\n self.cell_list = nn.ModuleList(self.cell_list)\n self.pi = nn.Linear(2 * 2 * lstm_size, self.output_size)\n self.v = nn.Linear(2 * 2 * lstm_size, 1)\n\n def forward(self, x, hx, embed):\n output = x.unsqueeze(1)\n output = F.relu(self.conv1(output))\n output = F.relu(self.conv2(output))\n output = F.relu(self.conv3(output))\n output = F.relu(self.conv4(output))\n #output = output.view(output.size(0), -1)\n #output = torch.cat((output, embed), dim=1)\n h_in = hx\n for n in range(self.N):\n inner_out = output; h_out = []\n for d in range(self.D):\n inner_out = self.cell_list[d](inner_out, h_in[d])\n h_out.append(inner_out)\n h_in = h_out\n output = inner_out.view(inner_out.size(0), -1)\n return Categorical(logits=self.pi(output)), self.v(output), torch.stack(h_out)\n\nclass FFPolicy(nn.Module):\n \"\"\"\n Baseline GRU Architecture\n \"\"\"\n def __init__(self, input_size, output_size, D=1):\n super(FFPolicy, self).__init__()\n self.input_size = input_size\n self.output_size = output_size\n self.D = D\n\n self.conv1 = nn.Conv2d(1, 16, kernel_size=3, stride=1)\n self.conv2 = nn.Conv2d(16, 16, kernel_size=3, stride=1)\n self.conv3 = nn.Conv2d(16, 16, kernel_size=3, stride=1)\n self.fc = nn.Linear(4 * 4 * 16, 32)\n\n self.pi = nn.Linear(32, self.output_size)\n self.v = nn.Linear(32, 1)\n\n def forward(self, x):\n output = x.unsqueeze(1)\n output = F.relu(self.conv1(output))\n output = F.relu(self.conv2(output))\n output = F.relu(self.conv3(output))\n output = output.view(output.size(0), -1)\n output = F.relu(self.fc(output))\n return Categorical(logits=self.pi(output)), self.v(output)\n\n","sub_path":"maml_rl/policies/lstm_policy.py","file_name":"lstm_policy.py","file_ext":"py","file_size_in_byte":4962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"240021929","text":"import numpy as np\nfrom keras.models import Model\nfrom keras.optimizers import SGD, Adam, Nadam, RMSprop\n\nimport data\nimport datagen\nimport net\n\nimport random\nimport skimage.transform\nimport json\nimport pickle\nimport sklearn.metrics\n\nimport sys\nimport importlib\nimport datetime\nimport subprocess\n\nSNAP_PATH = '/mnt/data/snap/'\n\nconfig_name = sys.argv[1]\nconfig = importlib.import_module(config_name)\n\nfold = int(sys.argv[2])\n\nlocalizer_weights_file = sys.argv[3]\n\nlocalizer_output_dir = sys.argv[4]\n\nweights_file = sys.argv[5]\n\nrun_id = 'classifier' + '__' + config_name + '__' + datetime.datetime.utcnow().strftime('%Y%m%d%H%M%S')\nprint(run_id)\n\n\nvsize = np.asarray([32,32,32])\n\ndf_nodes = data.ndsb17_get_df_nodes() \ndf_nodes = df_nodes[(df_nodes[\"diameter_mm\"]>=9)]\n\npatient_ids = data.ndsb17_get_patient_ids_noncancer()\nfor k in range(fold):\n np.random.shuffle(patient_ids)\n\nX_cancer_nodules, cancer_diams = data.ndsb17_get_all_nodules(np.asarray([64,64,64]), df_nodes)\nprint(\"cancer nodules\", len(X_cancer_nodules))\n\n# X_localizer_nodules, _ = data.ndsb17_get_predicted_nodules(np.asarray([64,64,64]), patient_ids, SNAP_PATH+localizer_output_dir, min_activity=config.min_activity_train)\n# print(\"localizer nodules\", len(X_localizer_nodules))\n\ndf_benign = data.ndsb17_get_df_nodes(cancer_label=0)\nX_benign_nodules, benign_diams = data.ndsb17_get_all_nodules(np.asarray([64,64,64]), df_benign)\nprint(\"benign nodules\", len(X_benign_nodules))\n\nX_cancer_nodules = [x for x in X_cancer_nodules if x.shape == (64,64,64)]\n# X_localizer_nodules = [x for x in X_localizer_nodules if x.shape == (64,64,64)]\nX_benign_nodules = [x for x in X_benign_nodules if x.shape == (64,64,64)]\n\nX_cancer_nodules_train, X_cancer_nodules_test = data.kfold_split(X_cancer_nodules, fold)\n# X_localizer_nodules_train, X_localizer_nodules_test = data.kfold_split_fixed(X_localizer_nodules, fold, size=len(X_cancer_nodules_test))\nX_benign_nodules_train, X_benign_nodules_test = data.kfold_split_fixed(X_benign_nodules, fold, size=len(X_cancer_nodules_test))\n\nprint(len(X_cancer_nodules_train), len(X_cancer_nodules_test))\n# print(len(X_localizer_nodules_train), len(X_localizer_nodules_test))\nprint(len(X_benign_nodules_train), len(X_benign_nodules_test))\n\n\ndef batch_generator_ab(vsize, X_nodules_a, X_nodules_b, batch_size=64, do_downscale=True):\n while True:\n X = np.zeros((batch_size,) + tuple(vsize) + (1,), dtype=np.float32)\n y = np.zeros((batch_size), dtype=np.int)\n n = 0\n while n < batch_size:\n if np.random.choice([True, False]):\n idx = np.random.choice(len(X_nodules_a))\n volume = X_nodules_a[idx]\n volume = datagen.make_augmented(vsize, volume)\n y[n] = 0\n else:\n idx = np.random.choice(len(X_nodules_b))\n volume = X_nodules_b[idx]\n volume = datagen.make_augmented(vsize, volume)\n y[n] = 1\n X[n,:,:,:,0] = volume\n n += 1\n X = datagen.preprocess(X)\n if do_downscale:\n X = skimage.transform.downscale_local_mean(X, (1,2,2,2,1), clip=False)\n yield X, y\n\n\ngen = batch_generator_ab(np.asarray((32,32,32)), X_benign_nodules_train, X_cancer_nodules_train, do_downscale=config.do_downscale)\n\ntest_nodules = np.stack(X_benign_nodules_test + X_cancer_nodules_test)[:,16:16+32,16:16+32,16:16+32,None]\ntest_nodules = datagen.preprocess(test_nodules)\nif config.do_downscale:\n test_nodules = skimage.transform.downscale_local_mean(test_nodules, (1,2,2,2,1), clip=False)\ntest_y = np.concatenate( (np.zeros((len(X_benign_nodules_test),)), np.ones((len(X_cancer_nodules_test),))) )\n\nhistory = {'loss':[], 'acc':[], 'val_loss':[], 'val_acc':[]}\nhistory['version'] = subprocess.check_output('git describe --always --dirty', shell=True).decode('ascii').strip()\nhistory['argv'] = sys.argv\n\nmodel = net.model3d(config.net_input_vsize, sz=config.feature_sz, alpha=config.feature_alpha)\nprint(model.summary())\n\ndef get_optimizer(lr):\n if config.optimizer == 'rmsprop':\n optimizer = RMSprop(lr=lr)\n elif config.optimizer == 'adam':\n optimizer = Adam(lr=lr)\n elif config.optimizer == 'nadam':\n optimizer = Nadam(lr=lr)\n elif config.optimizer == 'sgd':\n optimizer = SGD(lr=lr, momentum=0.9, nesterov=True)\n return optimizer\n\nmodel.compile(loss='binary_crossentropy', metrics=['accuracy'], optimizer=get_optimizer(config.lr))\nmodel.load_weights(SNAP_PATH + localizer_weights_file, by_name=True)\n\nfom_best = 1e+6\nfor e in range(config.num_epochs):\n h = model.fit_generator(\n gen,\n config.samples_per_epoch,\n nb_epoch=1,\n verbose=1,\n validation_data=(test_nodules, test_y))\n\n print(h.history)\n history['loss'].append(h.history['loss'][0])\n history['acc'].append(h.history['acc'][0])\n history['val_loss'].append(h.history['val_loss'][0])\n history['val_acc'].append(h.history['val_acc'][0])\n\n pred_y = model.predict(test_nodules)\n print(pred_y.shape)\n\n fpr, tpr, thresholds = sklearn.metrics.roc_curve(test_y, pred_y)\n auc = sklearn.metrics.auc(fpr, tpr)\n print(\"auc\", auc)\n\n model.save_weights(SNAP_PATH + run_id + '.{:04d}'.format(e) + '.h5')\n\n with open(SNAP_PATH + run_id + '.log.json', 'w') as fh:\n json.dump(history, fh)\n\n fom = h.history['val_loss'][0]\n # print(\"fom\", fom)\n if fom < fom_best:\n fom_best = fom\n print(\"*** saving best result\")\n model.save_weights(SNAP_PATH + weights_file)\n\n if e == config.lr_step_num_epochs:\n print(\"*** reloading from best result\")\n\n model.compile(loss='binary_crossentropy', metrics=['accuracy'], optimizer=get_optimizer(config.lr * config.lr_step_multiplier))\n model.load_weights(SNAP_PATH + weights_file)\n","sub_path":"train_classifier_paper.py","file_name":"train_classifier_paper.py","file_ext":"py","file_size_in_byte":5814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"524836531","text":"import numpy as np\nimport pandas as pd\nfrom tqdm import tqdm\nimport librosa\nimport PIL\n\n\ndef transform(data, dest_path, n_fft=1024, segment_size=150000):\n \"\"\"Creates spectrogram images from a numpy array and saves them to destination path. Saves\n time to failure for each file/segment in a DataFrame. Divides data into non-overlapping segments of\n segment_size and discards leftovers in the last segment.\"\"\"\n n_segments = int(np.floor(data.shape[0] / segment_size))\n file_labels = pd.DataFrame([], columns=['name', 'label'])\n\n for i in tqdm(range(n_segments), leave=False):\n # Get data window\n x = data[0 + segment_size * i:segment_size + segment_size * i, 0]\n # Get time to failure at last point in the segment\n filename = 'spectrogram_%d.png' % i\n y = data[segment_size + segment_size * i, 1]\n file_labels = file_labels.append(pd.DataFrame([[filename, y]], columns=['name', 'label']),\n ignore_index=True)\n\n # Librosa spectrogram\n stft = librosa.core.stft(x, n_fft=n_fft, hop_length=None, win_length=None, window='hann',\n center=True, dtype=np.complex64, pad_mode='reflect')\n spec = np.abs(stft)\n\n # Normalize to image parameters\n spec = np.uint8(spec / np.max(spec) * 256)\n\n # Save as image\n img = PIL.Image.fromarray(spec)\n img.save(dest_path + '/' + filename, optimize=True)\n\n file_labels.to_csv(dest_path + '/file_labels.df', index=False)\n\n\nif __name__ == '__main__':\n data = np.load('input/lanl-npy/train_f16.npy', fix_imports=False)\n dest_path = 'input/spectrograms'\n transform(data, dest_path)\n","sub_path":"lanl/working/npy2spectrogram.py","file_name":"npy2spectrogram.py","file_ext":"py","file_size_in_byte":1702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"235005002","text":"from __future__ import division\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy import interpolate\nfrom scipy import ndimage, misc\n\n\na = np.zeros((4, 20))\na[0, 2] = 100\na[1, 8] = 100\na[2, 12] = 100\na[3, 15] = 100\n\n# b = np.array([[100, 0, 0, 0, 0, 0, 0], [0, 90, 0, 0, 0, 0, 0], [0, 0, 80, 0, 0, 0, 0],\n# [0, 0, 0, 70, 0, 0, 0], [0, 0, 0, 0, 60, 0, 0], [0, 0, 0, 0, 0, 50, 0], [0, 0, 0, 0, 0, 0, 35]])\n#\nspacing = len(a[0]) - 2\n#\naPrime = np.zeros((spacing + 2, len(a[0])))\naPrime[0] = a[0]\naPrime[-1] = a[-1]\n\n\ndef weightFunc(x):\n return np.exp(-3*x)\n\nfor i in range(len(a[0])):\n for j in range(1, spacing+1):\n\n # For each LHS cell, RHS cell\n weights = []\n\n for l in range(len(a[0])):\n for r in range(len(a[0])):\n\n # The fraction along the line the bisector meets the line joining the two cells\n xr = spacing + 2\n f = (i * (l - r) - l * (l - r) - xr * j) / ((l - r) * (r - l) - xr**2)\n interpolatedValue = a[0, l] + f * (a[-1, r] - a[0, l]) # The interpolation\n gradient = (r - l) / (spacing + 1)\n d = np.abs(gradient * j - i + l) / np.sqrt(gradient**2 + 1)\n weight = weightFunc(d)\n weights.append(weight)\n aPrime[j, i] += weight * interpolatedValue\n\n aPrime[j, i] = aPrime[j, i] / (np.sum(np.array(weights)))\n\n# x = np.arange(len(a))\n# y = np.arange(len(a[0]))\n# # f = interpolate.interp2d(x, y, a.transpose())\n# # xPrime = np.linspace(np.min(x), np.max(x), 7)\n# # aPrime = f(xPrime, y)\n#\n# interpolatingFunc = interpolate.RegularGridInterpolator((x, y), a)\n#\n# xnew = np.linspace(np.min(x), np.max(x), 10*len(x))\n#\n# aPrime = np.zeros((len(xnew), len(y)))\n#\n# for i in range(len(xnew)):\n# for j in range(len(y)):\n# aPrime[i, j] = interpolatingFunc(np.array([xnew[i], y[j]]))\n\nplt.imshow(aPrime.transpose())\nplt.show()\n\n\n# plt.imshow(b)\n# plt.show()\n","sub_path":"SpectrumFieldDependence/Interp/Interp.py","file_name":"Interp.py","file_ext":"py","file_size_in_byte":1959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"181038822","text":"import os\nimport json\nfrom flask import Flask, jsonify, request, render_template\nfrom datetime import datetime\nfrom application.clientshit import ClientMethods\n\napp = Flask(__name__)\napp.debug = True\n\n# instantiate client methods\nclientmethod = ClientMethods()\n\n@app.route(\"/\")\n@app.route(\"/index\")\ndef main():\n client_files = os.listdir(\"clients\")\n all_clients = {}\n for client_file in client_files:\n with open(\"clients\\\\\"+client_file, \"r\") as cfile:\n cdata = json.load(cfile)\n all_clients[client_file] = cdata\n print(all_clients)\n return render_template(\"index.html\", all_clients=all_clients)\n\n@app.route(\"/api/v1/add-client\")\ndef add_client():\n # create a new client file on the server\n client_config = {\"clientid\": request.args.get(\"clientid\"),\n \"hostname\": request.args.get(\"hostname\"),\n \"username\": request.args.get(\"username\"),\n \"last_checkin\": str(datetime.now())}\n client_filepath = os.path.join(\"clients\", client_config[\"clientid\"])\n # if client file does not already exist, create it\n if not os.path.isfile(client_filepath):\n with open(client_filepath, \"w\") as clientfile:\n clientfile.write(json.dumps(client_config))\n return jsonify({'client': 'created'})\n else:\n return jsonify({'client': 'exists'})\n\n# check server status\n@app.route(\"/api/v1/status\")\ndef heartbeat():\n data = {\"status\": \"online\"}\n return jsonify(data)\n\n# beacon api for clients\n@app.route(\"/api/v1/beacon\")\ndef command():\n # create path to command file\n clientmethod.last_checkin(request.args.get(\"clientid\"))\n commands = os.path.join(\"commands\", request.args.get(\"clientid\"))\n if os.path.isfile(commands):\n return jsonify({\"commands\":\"here are your orders\"})\n elif not os.path.isfile(commands):\n return jsonify({\"commands\":\"no_commands\"})\n\n","sub_path":"application/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1911,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"410691246","text":"\"\"\"\nCode for querying DataONE and verifying query results. Specifically used for\n finding datasets based on the url and for listing package contents. Some of\n these methods are used elsewhere in the WholeTale plugin, specifically in the harvester.\n\"\"\"\n\nimport re\nimport json\nimport six.moves.urllib as urllib\nimport requests\nimport rdflib\n\nfrom girder import logger\nfrom girder.api.rest import RestException\n\n# http://blog.crossref.org/2015/08/doi-regular-expressions.html\n_DOI_REGEX = re.compile('(10.\\d{4,9}/[-._;()/:A-Z0-9]+)', re.IGNORECASE)\nD1_BASE = \"https://cn.dataone.org/cn/v2\"\n\n\ndef esc(value):\n \"\"\"Escape a string so it can be used in a Solr query string\"\"\"\n\n return urllib.parse.quote_plus(value)\n\n\ndef unesc(value):\n \"\"\"Unescapes a string so it can uesd in URLS\"\"\"\n return urllib.parse.unquote_plus(value)\n\n\ndef query(q, fields=[\"identifier\"], rows=1000, start=0):\n \"\"\"Query a DataONE Solr index.\"\"\"\n\n fl = \",\".join(fields)\n query_url = \"{}/query/solr/?q={}&fl={}&rows={}&start={}&wt=json\".format(\n D1_BASE, q, fl, rows, start)\n\n req = requests.get(query_url)\n content = json.loads(req.content.decode('utf8'))\n\n # Fail if the Solr query failed rather than fail later\n if content['responseHeader']['status'] != 0:\n raise RestException(\n \"Solr query was not successful.\\n{}\\n{}\".format(query_url, content))\n\n # Stop if the number of results is equal to the number of rows requested\n # Fix this in the future by supporting paginated queries.\n if content['response']['numFound'] == rows:\n raise RestException(\n \"Number of results returned equals number of rows requested. \"\n \"This could mean the query result is truncated. \"\n \"Implement paged queries.\")\n\n return content\n\n\ndef find_resource_pid(pid):\n \"\"\"\n Find the PID of the resource map for a given PID, which may be a resource map\n \"\"\"\n\n result = query(\n \"identifier:\\\"{}\\\"\".format(esc(pid)),\n fields=[\"identifier\", \"formatType\", \"formatId\", \"resourceMap\"])\n result_len = int(result['response']['numFound'])\n\n if result_len == 0:\n raise RestException('No object was found in the index for {}.'.format(pid))\n elif result_len > 1:\n raise RestException(\n 'More than one object was found in the index for the identifier '\n '{} which is an unexpected state.'.format(pid))\n\n # Find out if the PID is an OAI-ORE PID and return early if so\n try:\n if result['response']['docs'][0]['formatType'] == 'RESOURCE':\n return(result['response']['docs'][0]['identifier'])\n except KeyError:\n raise RestException('Unable to find a resource file in the data package')\n\n try:\n if len(result['response']['docs'][0]['resourceMap']) == 1:\n return result['response']['docs'][0]['resourceMap'][0]\n except KeyError:\n raise RestException('Unable to find a resource map for the data package')\n\n if len(result['response']['docs'][0]['resourceMap']) > 1:\n # Extract all of the candidate resource map PIDs (list of lists)\n resmaps = [doc['resourceMap'] for doc in result['response']['docs']]\n\n # Flatten the above result out and query\n # Flattening is required because the above 'resourceMap' field is a\n # Solr array type so the result is a list of lists\n nonobs = find_nonobsolete_resmaps(\n [item for items in resmaps for item in items]\n )\n\n # Only return of one non-obsolete Resource Map was found\n # If we find multiple, that implies the original PID we queried for\n # is a member of multiple packages and what to do isn't implemented\n if len(nonobs) == 1:\n return nonobs[0]\n\n # Error out if the document passed in has multiple resource maps. What I can\n # still do here is determine the most likely resource map given the set.\n # Usually we do this by rejecting any obsoleted resource maps and that\n # usually leaves us with one.\n raise RestException(\n \"Multiple resource maps were for the data package, which isn't supported.\")\n\n\ndef find_nonobsolete_resmaps(pids):\n \"\"\"\n Given one or more resource map pids, returns the ones that are not obsoleted\n by any other Object.\n This is done by querying the Solr index with the -obsoletedBy:* query param\n \"\"\"\n\n result = query(\n \"identifier:(\\\"{}\\\")+AND+-obsoletedBy:*\".format(\"\\\" OR \\\"\".join(pids),\n fields=\"identifier\")\n )\n result_len = int(result['response']['numFound'])\n\n if result_len == 0:\n raise RestException('No results were found for identifier(s): {}.'.format(\", \".join(pids)))\n\n return [doc['identifier'] for doc in result['response']['docs']]\n\n\ndef find_initial_pid(path):\n \"\"\"\n Given some arbitrary path, which may be a landing page, resolve URI or\n something else, find the PID the user intended (the package PID).\n\n This can parse the PID out of the HTTP and HTTPS versions of...\n - The MetacatUI landing page (#view)\n - The D1 v2 Object URI (/object)\n - The D1 v2 Resolve URI (/resolve)\n \"\"\"\n\n doi = _DOI_REGEX.search(path)\n if re.search(r'^http[s]?:\\/\\/search.dataone.org\\/#view\\/', path):\n return re.sub(\n r'^http[s]?:\\/\\/search.dataone.org\\/#view\\/', '', path)\n elif re.search(r'^http[s]?://cn.dataone.org/cn/d1/v[\\d]/\\w+/', path):\n return re.sub(\n r'^http[s]?://cn.dataone.org/cn/d1/v[\\d]/\\w+/', '', path)\n elif doi is not None:\n return 'doi:{}'.format(doi.group())\n else:\n return path\n\n\ndef get_aggregated_identifiers(pid):\n \"\"\"Process an OAI-ORE aggregation into a set of aggregated identifiers.\"\"\"\n\n g = rdflib.Graph()\n\n graph_url = \"{}/resolve/{}\".format(D1_BASE, esc(pid))\n g.parse(graph_url, format='xml')\n\n ore_aggregates = rdflib.term.URIRef(\n 'http://www.openarchives.org/ore/terms/aggregates')\n dcterms_identifier = rdflib.term.URIRef(\n 'http://purl.org/dc/terms/identifier')\n\n aggregated = g.objects(None, ore_aggregates)\n\n pids = set()\n\n # Get the PID of the aggregated Objects in the package\n for object in aggregated:\n identifiers = g.objects(object, dcterms_identifier)\n [pids.add(unesc(id)) for id in identifiers]\n\n return pids\n\n\ndef verify_results(pid, docs):\n aggregation = get_aggregated_identifiers(pid)\n pids = set([unesc(doc['identifier']) for doc in docs])\n\n if aggregation != pids:\n raise RestException(\n \"The contents of the Resource Map don't match what's in the Solr \"\n \"index. This is unexpected and unhandled.\")\n\n\ndef get_documenting_identifiers(pid):\n \"\"\"\n Find the set of identifiers in an OAI-ORE resource map documenting\n other members of that resource map.\n \"\"\"\n\n g = rdflib.Graph()\n\n graph_url = \"{}/resolve/{}\".format(D1_BASE, esc(pid))\n g.parse(graph_url, format='xml')\n\n cito_isDocumentedBy = rdflib.term.URIRef(\n 'http://purl.org/spar/cito/isDocumentedBy')\n dcterms_identifier = rdflib.term.URIRef(\n 'http://purl.org/dc/terms/identifier')\n\n documenting = g.objects(None, cito_isDocumentedBy)\n\n pids = set()\n\n # Get the PID of the documenting Objects in the package\n for object in documenting:\n identifiers = g.objects(object, dcterms_identifier)\n [pids.add(unesc(id)) for id in identifiers]\n\n return pids\n\n\ndef get_package_pid(path):\n \"\"\"Get the pid of a package from its path.\"\"\"\n\n initial_pid = find_initial_pid(path)\n logger.debug('Parsed initial PID of {}.'.format(initial_pid))\n return find_resource_pid(initial_pid)\n\n\ndef extract_metadata_docs(docs):\n metadata = [doc for doc in docs if doc['formatType'] == 'METADATA']\n if not metadata:\n raise RestException('No metadata file was found in the package.')\n return metadata\n\n\ndef extract_data_docs(docs):\n data = [doc for doc in docs if doc['formatType'] == 'DATA']\n# if not data:\n# raise RestException('No data found.')\n return data\n\n\ndef extract_resource_docs(docs):\n resource = [doc for doc in docs if doc['formatType'] == 'RESOURCE']\n return resource\n\n\ndef D1_lookup(path):\n \"\"\"Lookup and return information about a package on the\n DataONE network.\n \"\"\"\n\n package_pid = get_package_pid(path)\n logger.debug('Found package PID of {}.'.format(package_pid))\n\n docs = get_documents(package_pid)\n\n # Filter the Solr result by TYPE so we can construct the package\n metadata = [doc for doc in docs if doc['formatType'] == 'METADATA']\n if not metadata:\n raise RestException('No metadata found.')\n\n # Compute package size (sum of 'size' values)\n total_size = sum([int(doc.get('size', 0)) for doc in docs])\n\n dataMap = {\n 'dataId': package_pid,\n 'size': total_size,\n 'name': metadata[0].get('title', 'no title'),\n 'doi': metadata[0].get('identifier', 'no DOI').split('doi:')[-1],\n 'repository': 'DataONE',\n }\n return dataMap\n\n\ndef get_documents(package_pid):\n \"\"\"\n Retrieve a list of all the files in a data package. The metadata\n record providing information about the package is also in this list.\n \"\"\"\n\n result = query('resourceMap:\"{}\"'.format(esc(package_pid)),\n [\"identifier\", \"formatType\", \"title\", \"size\", \"formatId\",\n \"fileName\", \"documents\"])\n\n if 'response' not in result or 'docs' not in result['response']:\n raise RestException(\n \"Failed to get a result for the query\\n {}\".format(result))\n\n return result['response']['docs']\n\n\ndef check_multiple_maps(documenting):\n if len(documenting) > 1:\n raise RestException(\n \"Found two objects in the resource map documenting other objects. \"\n \"This is unexpected and unhandled.\")\n elif len(documenting) == 0:\n raise RestException('No object was found in the resource map.')\n\n\ndef check_multiple_metadata(metadata):\n if len(metadata) > 1:\n raise RestException(\"Multiple documenting metadata objects found. \"\n \"This is unexpected and unhandled.\")\n\n\ndef get_package_list(path, package=None, isChild=False):\n \"\"\"\"\"\"\n if package is None:\n package = {}\n\n package_pid = get_package_pid(path)\n logger.debug('Found package PID of {}.'.format(package_pid))\n\n docs = get_documents(package_pid)\n\n # Filter the Solr result by TYPE so we can construct the package\n metadata = extract_metadata_docs(docs)\n data = extract_data_docs(docs)\n children = extract_resource_docs(docs)\n\n # Determine the folder name. This is usually the title of the metadata file\n # in the package but when there are multiple metadata files in the package,\n # we need to figure out which one is the 'main' or 'documenting' one.\n primary_metadata = [doc for doc in metadata if 'documents' in doc]\n\n check_multiple_metadata(primary_metadata)\n\n data += [doc for doc in metadata if doc['identifier'] != primary_metadata[0]['identifier']]\n\n fileList = get_package_files(data, metadata, primary_metadata)\n\n # Add a new entry in the package structure\n # if isChild:\n # package[-1][primary_metadata[0]['title']] = {'fileList': []}\n # else:\n package[primary_metadata[0]['title']] = {'fileList': []}\n\n package[primary_metadata[0]['title']]['fileList'].append(fileList)\n if children is not None and len(children) > 0:\n for child in children:\n get_package_list(child['identifier'], package[primary_metadata[0]['title']], True)\n return package\n\n\ndef get_package_files(data, metadata, primary_metadata):\n fileList = {}\n for fileObj in data:\n fileName = fileObj.get('fileName', fileObj.get('identifier', ''))\n\n fileSize = int(fileObj.get('size', 0))\n\n fileList[fileName] = {\n 'size': fileSize\n }\n\n # Also add the metadata to the file list\n fileList[primary_metadata[0]['fileName']] = {\n 'size': primary_metadata[0].get('size', 0)\n }\n\n return fileList\n","sub_path":"server/dataone_register.py","file_name":"dataone_register.py","file_ext":"py","file_size_in_byte":12131,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"357323347","text":"from ..object import get as db\nfrom ..object import Listing, Category\n\nfrom flask import Blueprint, render_template, url_for, redirect, request\n\nblueprint = Blueprint('category', __name__, template_folder='templates')\n\n\n@blueprint.route('/')\n@blueprint.route('/')\ndef root(category=None, title=\"listings\"):\n\tcategories = []\n\tif category is None:\n\t\tcategories = Category.get_all()\n\telse:\n\t\tcategories = [Category.get(int(category))]\n\t\ttitle = \"category: %s\" % categories[0].name\n\treturn render_template(\"category/root.html\", categories=categories, title=title)\n\n\ndef handle_add(name):\n\tif request.method == \"POST\":\n\t\t\tname = request.form['name']\n\tname = name.strip()\n\tif not name:\n\t\tstatus = \"invalid name\"\n\n\tdb().create_category(name)\n\n\treturn \"success! added category: \" + name\n\n\n@blueprint.route('/add/', methods=['POST', 'GET'])\n@blueprint.route('/add/')\ndef add(name=None):\n\tstatus = \"\"\n\tif request.method == \"POST\" or not (name is None):\n\t\tstatus = handle_add(name)\n\treturn render_template(\"category/add.html\", status=status)\n\n\n@blueprint.route('/remove/')\n@blueprint.route('/remove/')\ndef remove(id=None):\n\tif id is not None:\n\t\ttry:\n\t\t\tid = int(id)\n\t\texcept ValueError:\n\t\t\treturn render_template(\"error.html\", error=\"not a valid integer\")\n\t\tdb().remove_category(id)\n\treturn redirect(url_for(\"routes.root\"))\n","sub_path":"mokuroku/routes/category.py","file_name":"category.py","file_ext":"py","file_size_in_byte":1333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"346455259","text":"# -*- coding: utf-8 -*-\n\nimport cv2\nimport numpy as np\n\ndef drawRotatedRect(rect, image):\n box = cv2.boxPoints(rect)\n x0, y0 = box[0]\n for i in range(3):\n x, y = box[i]\n x1, y1 = box[i + 1]\n cv2.line(image, (x, y), (x1, y1), (0, 0, 255), 2)\n if i is 2:\n cv2.line(image, (x1, y1), (x0, y0), (0, 0, 255), 2)\n\nimg = cv2.imread(r'C:\\Users\\ASUS\\Desktop\\pycharm__pro\\opencv_py\\bankcard\\card\\1.jpeg')\n# 图像预处理\ngrayImg = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n_, binImg = cv2.threshold(grayImg, 100, 255, cv2.THRESH_BINARY)\n# 寻找轮廓\n_, contours, hierarchy = cv2.findContours(binImg, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)\n\n# 得到矩形框的相关参数\nrect = cv2.minAreaRect(contours[1])\nx, y = rect[0]\nw, h = rect[1]\nangle = rect[2]\nprint('center:'+str(int(x))+','+str(int(y))+' w,h:'+str(int(w))+','+str(int(h))+' angle:'+str(int(angle)))\n# 画出旋转的矩形框\ndrawRotatedRect(rect, img)\n\ncv2.imshow('Contours Image', img)\ncv2.waitKey()\ncv2.destroyAllWindows()()","sub_path":"卡号识别/pycharm__pro/opencv_py/bankcard/card/standard/test_trangles.py","file_name":"test_trangles.py","file_ext":"py","file_size_in_byte":1012,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"560004689","text":"from __future__ import unicode_literals\n\nfrom .....lib import Basename, Charset, TestOnTempDir, TestTextFeed\nfrom os.path import join\n\nclass T(TestOnTempDir):\n def test0(self):\n r'''\n `_feed` and `_expected_expected` has been manually prepared reading\n `_tree_template`. `expected` is programmatically prepared from\n _tree_template.\n '''\n from ... import TestTree\n from .....util.nameshiftrehearsal2 import name_shift\n from .....lib import StringIO\n from itertools import chain\n fix = (r'p1.py', r'p0.py'),\n b = StringIO()\n _tree_template.feed(b, 'p1.py\\n', '', '0')\n _tree_template.feed(b, 'p1+/\\n', '', '1')\n feed = b.getvalue()\n expected = chain(_tree_template.expected(), fix)\n expected = tuple(expected)\n abs_dir = join(self.tmp, 'a')\n abs_dir = TestTree(abs_dir, feed)\n actual = tuple(name_shift(abs_dir))\n self.assertEqual(expected, actual)\n\nclass _TreeTemplate(TestTextFeed):\n def expected(self):\n digits = Charset.digits\n for y in self.format('1').splitlines():\n if y.endswith('/'):\n y = y[:-1]\n b = Basename(y)\n dirname, basename = b.dirname(), b()\n start = digits.find(basename) + 1\n end = digits.extend(basename, start)\n infix = '2' + basename[start:end]\n prefix, suffix = basename[:1], basename[end:]\n rename = join(dirname, prefix + infix + suffix)\n yield y, rename\n\n__tree_template = r'''\np{0}a/m{0}0/m{0}00/m{0}000.py\np{0}a/m{0}0/m{0}00/\np{0}a/m{0}0/m{0}00.py\np{0}a/m{0}0/\np{0}a/m{0}0.py\np{0}a/\n'''[1:]\n_tree_template = _TreeTemplate(__tree_template)\n# \n# r'''\n# must be edited when `_tree_template` is changed.\n# '''\n# _expected_expected = (\n# ('p1a/m10/m100/m1000.py', 'p1a/m10/m100/m2000.py'),\n# ('p1a/m10/m100', 'p1a/m10/m200'),\n# ('p1a/m10/m100.py', 'p1a/m10/m200.py'),\n# ('p1a/m10', 'p1a/m20'),\n# ('p1a/m10.py', 'p1a/m20.py'),\n# ('p1a', 'p2a'),\n# ('p1.py', 'p0.py'),\n# )\n# _feed0 = r'''\n# p1.py\n# p0a/m00/m000/m0000.py\n# p0a/m00/m000/\n# p0a/m00/m000.py\n# p0a/m00/\n# p0a/m00.py\n# p0a/\n# '''[1:]\n# _feed1 = r'''\n# p1+/\n# p1a/m10/m100/m1000.py\n# p1a/m10/m100/\n# p1a/m10/m100.py\n# p1a/m10/\n# p1a/m10.py\n# p1a/\n# '''[1:]\n# _feed = _feed0 + _feed1\n","sub_path":"apymake/util/test/t0nameshift/t00rehearsal/t003.py","file_name":"t003.py","file_ext":"py","file_size_in_byte":2366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"328849801","text":"from setuptools import setup, find_packages\nimport os\n\nNAME = 'ml_utils'\nDESCRIPTION = 'A library for machine learning utilities'\nURL = 'https://lspgitlab01.alm.brand.dk/abanbn/ab_models'\nEMAIL = 'abanbn@almbrand.dk'\nAUTHOR = 'Anders Bogsnes'\nREQUIRES_PYTHON = '>=3.6.0'\nVERSION = None\n\nREQUIRED = [\n 'scikit-learn',\n 'scipy',\n 'pandas',\n 'numpy',\n 'matplotlib',\n 'gitpython'\n]\n\nTESTS_REQUIRED = [\n 'pytest',\n 'pytest-cov'\n]\n\nhere = os.path.abspath(os.path.dirname(__file__))\nabout = {}\n\nif not VERSION:\n with open(os.path.join(here, 'src', NAME, '__version__.py')) as f:\n exec(f.read(), about)\nelse:\n about['__version__'] = VERSION\n\n\nsetup(\n name=NAME,\n version=about['__version__'],\n author=AUTHOR,\n author_email=EMAIL,\n python_requires=REQUIRES_PYTHON,\n url=URL,\n packages=find_packages('src'),\n package_dir={'': 'src'},\n install_requires=REQUIRED,\n tests_require=TESTS_REQUIRED,\n license='MIT',\n package_data={'': ['*.mplstyle']}\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1009,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"39051854","text":"import cv2, fcv, fastthresh, fastihls\nimport numpy as np\n\ni = cv2.imread('/mnt/vboxshare/FullIJCNN2013/00001.ppm', 1)\nfastihls.rgb2ihls(i)\n\nh,w,c = i.shape\n\nm = np.zeros((h,w), 'uint8')\nfastthresh.fast_thresh(i, m, 230, 11, 30)\n\nm2 = m.copy()\nkern = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3,3))\nm2 = cv2.morphologyEx(m2, cv2.MORPH_OPEN, kern)\n\nseg = fcv.segment(m2)\n\n","sub_path":"pyColorStat/seg_test.py","file_name":"seg_test.py","file_ext":"py","file_size_in_byte":374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"189403117","text":"# -*- coding:utf-8 -*-\n# @Time: 2020/7/8 18:20\n# @Author: duiya duiyady@163.com\n\n\n\"\"\"\n给出集合 [1,2,3,…,n],其所有元素共有 n! 种排列。\n按大小顺序列出所有排列情况,并一一标记,当 n = 3 时, 所有排列如下:\n\"123\"\n\"132\"\n\"213\"\n\"231\"\n\"312\"\n\"321\"\n给定 n 和 k,返回第 k 个排列。\n说明:\n给定 n 的范围是 [1, 9]。\n给定 k 的范围是[1,  n!]。\n示例 1:\n输入: n = 3, k = 3\n输出: \"213\"\n\"\"\"\n\ndef getPermutation(n, k):\n nums = [str(i) for i in range(1, n + 1)]\n result = []\n tmp1 = n\n tmp = 1\n while tmp1 > 0:\n tmp = tmp * tmp1\n tmp1 -= 1\n k = k - 1\n for i in range(n):\n tmp = tmp // (n - i)\n index = k // tmp\n k = k % tmp\n result.append(nums.pop(index))\n return \"\".join(result)\n\n\n\nif __name__ == '__main__':\n print(getPermutation(4, 9))\n","sub_path":"src/main/num001_100/60_第k个排列.py","file_name":"60_第k个排列.py","file_ext":"py","file_size_in_byte":878,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"466917712","text":"from lxml_anchor_tag_extractor import LXMLAnchorTagExtractor\nparse_anchor_tags_from_url = LXMLAnchorTagExtractor.parse_anchor_tags_from_url\n\n# things to implement:\n# robot.txt file reader and understander\n# what happens to super large html files?\n# how to avoid calendars\n# non ascii characters?\n\n\nclass LXMLTrapCrawler:\n def __init__(self):\n self.traversedURLs = set()\n self.frontier = []\n\n def traverse(self, url):\n self.frontier.append(url)\n\n while self.frontier:\n current = self.frontier.pop(0)\n print(\"Mining \" + current)\n self.traversedURLs.add(current)\n self.frontier.extend([x for x in parse_anchor_tags_from_url(current) if x not in self.traversedURLs and x not in self.frontier])\n\n print(\"Finished mining frontier!\")\n\nif __name__ == \"__main__\":\n t = LXMLTrapCrawler()\n t.traverse(\"http://www.teekwak.com\")\n","sub_path":"lxml_standalone/standalone/lxml_trap_crawler.py","file_name":"lxml_trap_crawler.py","file_ext":"py","file_size_in_byte":907,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"570476870","text":"from django.shortcuts import render, get_object_or_404\nfrom .forms import ContactForm, NotificationForm\nfrom django.core.mail import send_mail,send_mass_mail\nfrom django.contrib.auth.models import User\n\n\ndef contact_us( request):\n\tform = ContactForm()\n\tif request.method == \"POST\":\n\t\tform = ContactForm(request.POST)\n\t\tif form.is_valid():\n\t\t\t\n\t\t\tfName=form.cleaned_data['firstName']\n\t\t\tlName=form.cleaned_data['lastName'] \n\t\t\tfrom_email=form.cleaned_data['email'] \n\t\t\tto_email=['info.dgtit@gmail.com','tusharkoley@gmail.com']\n\t\t\tsubject=form.cleaned_data['subject'] \n\t\t\tmessage=form.cleaned_data['message']\n\n\t\t\tmessage=\" Sender Name : {} {} . \\n Sender Email Id {}. \\n The message is : {} \". format(fName, lName,from_email, message)\n\n\n\t\t\tsend_mail(subject,message,'runakoley3@gmail.com',to_email,fail_silently=False,)\n\t\t\treturn render (request, \"contact_confirm.html\",{})\n \n\t\telse:\n\t\t\tform = ContactForm()\n \n\treturn render (request, \"contact_us.html\",{'form': form})\n\n\ndef send_notification_all( request):\n\tform = NotificationForm()\n\tif request.method == \"POST\":\n\t\tform = NotificationForm(request.POST)\n\t\tif form.is_valid():\t\t\t\t\n\t\t\tfrom_email='admin@dgtit.com'\n\t\t\tusers=User.objects.all()\n\t\t\tto_email=[user.email for user in users]\n\t\t\tsender_email='info.dgtit@gmail.com'\n\t\t\tsubject=form.cleaned_data['subject'] \n\n\t\t\tmsg_list=[]\n\n\t\t\tfor user in users:\n\t\t\t\tmessage=form.cleaned_data['message']\n\t\t\t\tmessage=\" Hi {}, \\n {}\".format(user.username.title(), message)\n\t\t\t\tto_email=[]\n\t\t\t\tto_email.append(user.email)\n\t\t\t\n\t\t\t\tmsg=(subject,message,sender_email,to_email)\n\t\t\t\tmsg_list.append(msg)\n\n\t\t\tmesg_tuple=tuple(msg_list)\n\t\t\tsend_mass_mail(mesg_tuple,fail_silently=True)\n\t\t\t\n\t\t\t\n\t\t\t\n\n\t\t\t\n\t\t\t\n\t\t\t\n\t\t\t\n\t\t\treturn render (request, \"notification_confirm.html\",{})\n \n\t\telse:\n\t\t\tform = NotificationForm()\n \n\treturn render (request, \"send_notification.html\",{'form': form})\n\n\n\n\n\ndef home(request):\n\treturn render(request,\"home.html\", {}) \n\ndef team( request):\n\treturn render (request, \"team.html\",{})\n\ndef index( request):\n return render (request, \"apps_index.html\",{})\n","sub_path":"mysite/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2087,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"365361325","text":"import os\nimport shutil\nimport ssl\nimport sys\nimport pytest\n\nsys.path.append(os.path.join(os.path.dirname(__file__), 'helpers'))\n\n# without this, craps out on (some?) https links...\nssl._create_default_https_context = ssl._create_unverified_context\n\ndef pytest_addoption(parser):\n parser.addoption(\n \"--runnetwork\", action=\"store_true\", default=False, help=\"run network tests\"\n )\n\ndef pytest_collection_modifyitems(config, items):\n if config.getoption(\"--runnetwork\"):\n # --runnetwork given in cli: do not skip network tests\n return\n skip_network = pytest.mark.skip(reason=\"need --runnetwork option to run\")\n for item in items:\n if \"network\" in item.keywords:\n item.add_marker(skip_network)\n\n###############################################################\n\norig_cwd = None\n\nTESTDATADIR = \"testdata\"\nDEFAULT_DATA = \"default_data\"\n\ndef _get_cwd():\n global orig_cwd\n if orig_cwd is None:\n orig_cwd = os.getcwd()\n\n@pytest.fixture(scope=\"function\")\ndef testdatadir(request):\n _get_cwd()\n def testdatadir_teardown():\n os.chdir(orig_cwd)\n shutil.rmtree(TESTDATADIR, ignore_errors=True)\n testdatadir_teardown()\n os.mkdir(TESTDATADIR)\n shutil.copytree(os.path.join(orig_cwd, DEFAULT_DATA),\n os.path.join(TESTDATADIR, DEFAULT_DATA))\n os.chdir(TESTDATADIR)\n request.addfinalizer(testdatadir_teardown)\n","sub_path":"tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":1407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"178296751","text":"from flask.ext.login import LoginManager, login_user, logout_user, login_required, current_user\nimport atp_classes\nfrom functools import wraps\nfrom flask import make_response\n\nclass AppLogin:\n\n def __init__(self, app):\n self.lm = LoginManager()\n self.lm.init_app(app)\n self.lm.login_view = '/handleLogin'\n\n self.required_login = login_required\n self.current_user = current_user\n\n @self.lm.user_loader\n def load_user(id):\n user = atp_classes.User.find_user_by_id(id)\n if user:\n return atp_classes.User(str(user._id), user.username)\n else:\n return None\n\n def log_user_in(self, user):\n login_user(user)\n return True\n\n def log_user_out(self):\n logout_user()\n return True\n\n def required_admin(self, f):\n @wraps(f)\n def decorator(*args, **kwargs):\n if self.current_user.is_admin():\n response = f(*args, **kwargs)\n else:\n response = make_response(\"Invalid privileges\")\n\n return response\n\n return decorator\n","sub_path":"atp_classes/AppLogin.py","file_name":"AppLogin.py","file_ext":"py","file_size_in_byte":1135,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"612975065","text":"class Solution(object):\n\n def DFS(self, l, left, right):\n if left == right == self.n:\n self.solutions.append(\"\".join(l))\n return\n if left < self.n:\n l.append(\"(\")\n self.DFS(l, left + 1, right)\n l.pop()\n\n if left > right and right < self.n:\n l.append(\")\")\n self.DFS(l, left, right + 1)\n l.pop()\n\n def generateParenthesis(self, n):\n self.solutions = []\n self.n = n\n self.DFS([], 0, 0)\n\n return self.solutions\n\n\ntestClass = Solution()\n\nprint(testClass.generateParenthesis(3))\n","sub_path":"22-generate-parantheses/22.py","file_name":"22.py","file_ext":"py","file_size_in_byte":616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"82071204","text":"# -*- coding: utf-8 -*-\nimport scrapy\n\n\nclass ScheduleSpider(scrapy.Spider):\n name = 'Schedule'\n allowed_domains = ['https://9anime.to/schedule']\n start_urls = ['https://9anime.to/schedule/']\n\n def parse(self, response):\n\n for sched in response.css('div.day-block'):\n item = {\n 'Date':sched.css('div.date::text').extract(),\n 'Name':sched.css('a.name::text').extract(),\n 'Info':sched.css('div.release::text').extract()\n }\n yield item\n \n prev_page = response.css('span.prev > a::attr(href)').extract_first()\n if (prev_page):\n prev_prev = response.urljoin(prev_page)\n yield scrapy.Request(url = prev_prev, callback = self.parse)\n \n","sub_path":"Assignment/spiders/Schedule Activity 1.py","file_name":"Schedule Activity 1.py","file_ext":"py","file_size_in_byte":783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"381942938","text":"\"\"\"\nUse HPPC battery module data to determine curve fit coefficients for each SOC\nsection. Curve fit coefficients are from one time constant (OTC) and two time\nconstant (TTC) functions. OTC represents one RC pair and TTC represents two RC\npairs.\n\"\"\"\n\nimport matplotlib.pyplot as plt\n\nimport params\nfrom ecm import ModuleHppcData\nfrom ecm import ModuleEcm\nfrom ecm import config_ax\n\n# Battery module HPPC data and equivalent circuit model\n# ----------------------------------------------------------------------------\n\nfile = '../data/module1-electchar-65ah-23deg.csv'\ndata = ModuleHppcData(file)\necm = ModuleEcm(data, params)\n\n# indices representing start (id3) and end (id4) of curve in each SOC section\n_, _, id2, _, id4 = data.get_indices_discharge()\nid2 = id2[:-1]\n\n# Print curve fit coefficients\n# ----------------------------------------------------------------------------\n\nfunc_otc = ecm.func_otc\nfunc_ttc = ecm.func_ttc\n\ncoeffs_otc = ecm.curve_fit_coeff(func_otc, 3)\ncoeffs_ttc = ecm.curve_fit_coeff(func_ttc, 5)\n\nprint('\\nCurve fit coefficients from OTC')\nprint('a\\tb\\talpha')\nfor c in coeffs_otc:\n print(f'{c[0]:.4f}\\t{c[1]:.4f}\\t{c[2]:.4f}')\n\nprint('\\nCurve fit coefficients from TTC')\nprint('a\\tb\\tc\\talpha\\tbeta')\nfor c in coeffs_ttc:\n print(f'{c[0]:.4f}\\t{c[1]:.4f}\\t{c[2]:.4f}\\t{c[3]:.4f}\\t{c[4]:.4f}')\nprint('')\n\n# Plot curve fit\n# ----------------------------------------------------------------------------\n\nfor i in range(len(id2)):\n start = id2[i]\n end = id4[i]\n t_curve = data.time[start:end]\n v_curve = data.voltage[start:end]\n t_scale = t_curve - t_curve[0]\n\n vfit1 = ecm.func_otc(t_scale, *coeffs_otc[i])\n vfit2 = ecm.func_ttc(t_scale, *coeffs_ttc[i])\n\n fig, ax = plt.subplots(tight_layout=True)\n ax.plot(t_curve, v_curve, 'C3', marker='.', label='data')\n ax.plot(t_curve, vfit1, label='otc')\n ax.plot(t_curve, vfit2, label='ttc')\n config_ax(ax, xylabels=('Time [s]', 'Voltage [V]'), title=f'SOC section {i}', loc='best')\n\nfig, ax = plt.subplots(tight_layout=True)\nax.plot(data.time, data.voltage, 'C3', label='data')\nax.plot(data.time[id2], data.voltage[id2], '*', label='id2')\n# ax.plot(data.time[id3], data.voltage[id3], '*', label='id3')\nax.plot(data.time[id4], data.voltage[id4], '*', label='id4')\nconfig_ax(ax, xylabels=('Time [s]', 'Voltage [V]'), loc='best')\n\nplt.show()\n","sub_path":"examples/module/hppc_curvefit.py","file_name":"hppc_curvefit.py","file_ext":"py","file_size_in_byte":2346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"516380697","text":"import numpy as np\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.svm import SVR\nfrom sklearn.model_selection import train_test_split\nimport pandas as pd\nfrom sklearn.metrics import mean_squared_error, r2_score\nimport matplotlib.pyplot as plt\nfrom collections import OrderedDict\nfrom joblib import dump, load \n\ndef genPlotModel(ticker):\n df = pd.read_csv('data/%s.csv'%ticker, sep = ',', header = 0)\n\n df.shape\n \"\"\"\n plt.figure(figsize=(16,8))\n plt.title('Close Price History')\n plt.plot(df['Close'])\n plt.xlabel('Date', fontsize=18)\n plt.ylabel('Close Price USD ($)', fontsize=18)\n #plt.show()\n plt.close()\n \"\"\"\n\n X = np.array(df['Close'])\n\n y = np.array(df['Prediction'])\n\n n=30 # number of days on which predictions are made\n\n x_train1 = np.array(df.drop(['Date', 'Prediction'],1))[:-n]\n x_test1 = np.array(df.drop(['Date', 'Prediction'],1))[-n:]\n x_train = X[:-n]\n x_test = X[-n:]\n y_train = y[:-n]\n y_test = y[-n:]\n\n lr = LinearRegression()\n lr.fit(x_train1, y_train)\n dump(lr, 'models/models_%s.joblib'%ticker)\n\n lr_confidence = lr.score(x_test1, y_test)\n print(\"lr confidence for %s: \"%ticker, lr_confidence)\n\n x_forecast = np.array(df.drop(['Date', 'Prediction'],1))[-1:]\n lr_prediction = lr.predict(x_forecast)\n print('Prediction for the 1 day out:', lr_prediction)\n\n lr_prediction = lr.predict(x_test1)\n train = x_train1\n valid = df.drop(['Date', 'Prediction'],1)[-n:]\n valid['Predictions'] = lr_prediction\n plt.figure(figsize=(16,8))\n plt.title('Model')\n plt.xlabel('Date', fontsize=18)\n plt.ylabel('Close Price USD ($)', fontsize=18)\n plt.plot(df['Close'])\n plt.plot(valid[['Close', 'Predictions']])\n plt.legend(['Train', 'Val', 'Predictions'])\n #plt.show()\n\n plt.savefig('figs/%s.png'%ticker)\n plt.close()\n\nif __name__ == '__main__':\n dow_jones_dict = OrderedDict()\n dow_jones_dict['aapl'] = 'Apple'\n dow_jones_dict['amgn'] = 'Amgen'\n dow_jones_dict['axp'] = 'American Express'\n dow_jones_dict['ba'] = 'Bank of America'\n dow_jones_dict['cat'] = 'Caterpillar Inc.'\n dow_jones_dict['crm'] = 'Salesforce'\n dow_jones_dict['csco'] = 'Cisco Systems'\n dow_jones_dict['cvx'] = 'Chevron Corporation'\n dow_jones_dict['dis'] = 'Disney'\n dow_jones_dict['^dji'] = 'Dow Jones Index'\n dow_jones_dict['dow'] = 'Dow Inc.'\n dow_jones_dict['gs'] = 'Goldman Sachs'\n dow_jones_dict['hd'] = 'The Home Depot'\n dow_jones_dict['hon'] = 'Honeywell'\n dow_jones_dict['ibm'] = 'IBM'\n dow_jones_dict['intc'] = 'intel'\n dow_jones_dict['jnj'] = 'Johnson & Johnson'\n dow_jones_dict['jpm'] = 'JPMorgan Chase'\n dow_jones_dict['ko'] = 'Coca-Cola'\n dow_jones_dict['mcd'] = \"McDonald's\"\n dow_jones_dict['mmm'] = '3M'\n dow_jones_dict['mrk'] = 'Merck & Co.'\n dow_jones_dict['msft'] = 'Microsoft'\n dow_jones_dict['nke'] = 'Nike'\n dow_jones_dict['pg'] = 'Procter & Gamble'\n dow_jones_dict['trv'] = 'The Travelers Companies'\n dow_jones_dict['unh'] = 'UnitedHealth Group'\n dow_jones_dict['v'] = 'Visa'\n dow_jones_dict['vz'] = 'Verizon'\n dow_jones_dict['wba'] = 'Walgreens'\n dow_jones_dict['wmt'] = 'Walmart'\n\n dow_jones_stocks = {\n 'aapl' : 'Apple', \n 'amgn' : 'Amgen', \n 'axp' : 'American Express', \n 'ba' : 'Bank of America', \n 'cat' : 'Caterpillar Inc', \n 'crm' : 'Salesforce', \n 'csco' : 'Cisco Systems',\n 'cvx' : 'Chevron Corporation', \n 'dis' : 'Disney', \n '^dji' : 'Dow Jones Index',\n 'dow' : 'Dow Inc.', \n 'gs' : 'Goldman Sachs', \n 'hd' : 'The Home Depot', \n 'hon' : 'Honeywell', \n 'ibm' : 'IBM', \n 'intc' : 'Intel',\n 'jnj' : 'Johnson & Johnson', \n 'jpm' : 'JPMorgan Chase', \n 'ko' : 'Coca-Cola', \n 'mcd' : \"McDonald's\", \n 'mmm' : '3M', \n 'mrk' : 'Merck & Co.', \n 'msft' : 'Microsoft', \n 'nke' : 'Nike',\n 'pg' : 'Procter & Gamble', \n 'trv' : 'The Travelers Companies', \n 'unh' : 'UnitedHealth Group', \n 'v' : 'Visa', \n 'vz' : 'Verizon', \n 'wba' : 'Walgreens', \n 'wmt' : 'Walmart'\n }\n\n for stock in list(dow_jones_dict.keys()):\n genPlotModel(stock)","sub_path":"modelanalysis.py","file_name":"modelanalysis.py","file_ext":"py","file_size_in_byte":4295,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"249154290","text":"import unittest, requests, sqlite3\n\nclass TestApi(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n # Connect to sqlite db\n conn = sqlite3.connect('db/blog.db')\n cursor = conn.cursor()\n\n # Remove all posts from db\n cursor.execute('DELETE FROM posts')\n conn.commit()\n\n # Close connection\n conn.close()\n \n def test1PostNoData(self):\n # Call API /post via POST\n response = requests.post('http://127.0.0.1:5000/post')\n\n # Should have HTTP status of 400\n self.assertEqual(response.status_code, 400)\n\n def test2PostBadData(self):\n # Payload data to be sent. 'text' is incorrect\n payload = {\n 'title': 'A Test Post', \n 'text': 'This is the first test post',\n }\n\n # Call API /post via POST\n response = requests.post('http://127.0.0.1:5000/post', json=payload)\n\n # Should have HTTP status of 400\n self.assertEqual(response.status_code, 400)\n\n def test3GetEmptyData(self):\n # Call API /posts via GET\n response = requests.get('http://127.0.0.1:5000/posts')\n\n # Get response data\n data = response.json()\n\n # Status should be 200 OK\n self.assertEqual(response.status_code, 200)\n\n # No posts have been made correctly, should be an empty list\n self.assertEqual(len(data), 0)\n\n def test4PostData(self):\n # Create a correctly formed payload\n payload = {\n 'title': 'A Test Post', \n 'body': 'This is the first good test post',\n }\n\n # Call API /post via POST\n response = requests.post('http://127.0.0.1:5000/post', json=payload)\n\n # Status should be 200 OK\n self.assertEqual(response.status_code, 200)\n\n def test5FirstGet(self):\n # Call API /posts via GET\n response = requests.get('http://127.0.0.1:5000/posts')\n\n # Get response json data\n data = response.json()\n\n # Status should be 200 OK\n self.assertEqual(response.status_code, 200)\n\n # List should contain 1 item\n self.assertEqual(len(data), 1)\n\n # The title of the first and only item should match what was POSTed\n self.assertEqual(data[0][\"title\"], \"A Test Post\") \n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"Testing/TestApi.py","file_name":"TestApi.py","file_ext":"py","file_size_in_byte":2347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"325525613","text":"import socket\nimport sys\nimport sender\nimport encrypt\n\n\ndef sendMsg(msgs):\n res = b''\n for msg in msgs:\n res = b\"\".join([res, msg])\n\n return res\n\ndef prepdata(data):\n prev = 0\n res = []\n num = 0\n s = b''\n for i in data:\n temp = i.encode(\"ascii\")\n s = b''.join([s, temp])\n if num == 1:\n res.append(s)\n num = 0\n s = b''\n else:\n num = num+1\n\n return res\n\nif __name__ == '__main__':\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n if len(sys.argv) != 6:\n raise ValueError('Incorrect num of args')\n sender.sIP = socket.gethostbyname(socket.gethostname())\n # if isinstance(sender.sIP, str):\n # print(\"worked\")\n sender.dIP = socket.gethostbyname(sys.argv[1])\n sender.de_port = int(sys.argv[2])\n sender.so_port = sender.de_port\n\n sender.sIP = sender.processIP(sender.sIP)\n tempIP = sender.dIP\n sender.dIP = sender.processIP(sender.dIP)\n\n sender.info = sys.argv[3] + \"@\" + sys.argv[4] + \"@\" + sys.argv[5] + \"@\"\n sender.info = prepdata(sender.info)\n sender.udpL = sender.calcUdpLen() # number of bytes\n print(\"LENGTH: \" + str(sender.udpL))\n\n keys = encrypt.readKeys(\"keyall1\", 'rb')\n sender.info = encrypt.encrypt(sender.info, keys)\n sender.addpadding(sender.info)\n sender.check = sender.checksum()\n print(\"THE CHECK: \" + str(sender.check))\n datag = sender.setDatagram()\n datag = sendMsg(datag)\n s.sendto(datag, (tempIP, sender.de_port)) # send the data\n # s.settimeout(2) # wait for response\n\n response, addr = s.recvfrom(2056)\n response = response.decode('utf-8')\n print(response)\n s.close()\n","sub_path":"ECE456/Lab6/udp/rcmd.py","file_name":"rcmd.py","file_ext":"py","file_size_in_byte":1695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"530778512","text":"#!/usr/bin/env python\n\nimport os, sys, getopt, signal\nimport gobject, gtk, pango\nimport random, time, math\n\nTABSTOP = 4 # One tab stop worth of spaces\n\n# ------------------------------------------------------------------------\n\nclass MainWin():\n\n def __init__(self):\n \n self.window = window = gtk.Window(gtk.WINDOW_TOPLEVEL)\n self.ex = 0; self.ey = 0; \n self.maxx = 0; self.maxy = 0; self.minx = 0; self.miny = 0\n self.coords = []; self.coords2 = []; self.coords3 = []; \n \n window.set_title(\"PyNeural\")\n window.set_position(gtk.WIN_POS_CENTER)\n \n #ic = gtk.Image(); ic.set_from_stock(gtk.STOCK_DIALOG_INFO, gtk.ICON_SIZE_BUTTON)\n #window.set_icon(ic.get_pixbuf())\n \n www = gtk.gdk.screen_width(); hhh = gtk.gdk.screen_height();\n window.set_default_size(www/2, hhh/2)\n \n window.set_flags(gtk.CAN_FOCUS | gtk.SENSITIVE)\n \n window.set_events(gtk.gdk.ALL_EVENTS_MASK)\n \n window.connect(\"destroy\", self.OnExit)\n window.connect(\"key-press-event\", self.key_press_event) \n window.connect(\"button-press-event\", self.button_press_event) \n\n self.setfont()\n \n try:\n window.set_icon_from_file(\"icon.png\")\n except:\n pass\n \n hbox2 = gtk.HBox()\n area = gtk.DrawingArea(); hbox2.pack_start(area)\n area.set_events(gtk.gdk.ALL_EVENTS_MASK)\n area.connect(\"expose-event\", self.area_expose_cb)\n area.connect(\"motion-notify-event\", self.area_motion)\n\n vbox = gtk.VBox(); vbox.pack_start(hbox2, True)\n hbox = gtk.HBox(); lab1 = gtk.Label(\"\"); hbox.pack_start(lab1)\n #butt1 = gtk.Button(\" _New \")\n #butt1.connect(\"clicked\", self.show_new, window)\n #hbox.pack_start(butt1, False)\n \n butt2 = gtk.Button(\" E_xit \")\n butt2.connect(\"clicked\", self.OnExit, window)\n hbox.pack_start(butt2, False)\n \n lab2 = gtk.Label(\"\"); hbox.pack_start(lab2)\n vbox.pack_start(hbox, False)\n \n window.add(vbox); window.show_all()\n\n\n def setfont(self, fam = None, size = None):\n self.pangolayout = self.window.create_pango_layout(\"a\")\n \n if fam or size:\n fd = pango.FontDescription()\n if fam:\n fd.set_family(fam)\n if size:\n fd.set_size(size * pango.SCALE); \n self.pangolayout.set_font_description(fd)\n\n # Get Pango steps\n self.cxx, self.cyy = self.pangolayout.get_pixel_size()\n \n # Get Pango tabs\n self.tabarr = pango.TabArray(80, False)\n for aa in range(self.tabarr.get_size()):\n self.tabarr.set_tab(aa, pango.TAB_LEFT, aa * TABSTOP * self.cxx * pango.SCALE)\n \n self.pangolayout.set_tabs(self.tabarr)\n ts = self.pangolayout.get_tabs()\n \n if ts != None: \n al, self.tabstop = ts.get_tab(1)\n self.tabstop /= self.cxx * pango.SCALE\n\n def area_motion(self, area, event): \n #print \"motion\", area, event\n self.ex = event.x; self.ey = event.y\n rect = area.get_allocation()\n xx, yy = self.measure_text(\"a\" * 40)\n rect.x = rect.width - xx - 1; rect.width = xx\n rect.y = rect.height - yy - 1; rect.height = yy\n # This is a test to see the invalidate rectangle\n #gc = area.get_style().fg_gc[gtk.STATE_NORMAL]\n #area.window.draw_rectangle(gc, False, rect.x, rect.y, rect.width - 1, rect.height -1)\n area.window.invalidate_rect(rect, True)\n\n # Adjust coordinates. Scale / Shift / Convert, and Draw\n def draw_line(self, area, gcr, xx0, yy0, xx1, yy1):\n ww, hh = area.window.get_size()\n # Scale\n try:\n raty = float((hh/2 - 20)) / self.maxy\n ratx = float((ww - 20)) / self.maxx\n # Adjust\n xx00 = ratx * xx0 ; xx01 = ratx * xx1\n yy00 = -raty * yy0; yy01 = -raty * yy1\n # Convert\n area.window.draw_line(gcr, int(xx00) + 10, int(yy00) + hh/2, int(xx01) + 10, int(yy01) + hh/2)\n except:\n pass\n \n # Draw a text with curent font\n def draw_text(self, area, gc, x, y, text, foreground = None, background = None):\n self.pangolayout.set_text(text)\n xx, yy = self.pangolayout.get_pixel_size()\n area.draw_layout(gc, x, y, self.pangolayout, foreground, background)\n return xx, yy\n \n def measure_text(self, text):\n self.pangolayout.set_text(text)\n xx, yy = self.pangolayout.get_pixel_size()\n return xx, yy\n\t\n def area_expose_cb(self, area, event):\n #print \"expose\", area, event, len(self.coords)\n colormap = gtk.widget_get_default_colormap() \n style = area.get_style()\n self.gc = style.fg_gc[gtk.STATE_NORMAL]\n gcx = gtk.gdk.GC(area.window); gcx.copy(self.gc)\n gcr = gtk.gdk.GC(area.window); gcr.copy(self.gc)\n gcr.set_foreground(colormap.alloc_color(\"#ff0000\"))\n gcg = gtk.gdk.GC(area.window); gcg.copy(self.gc)\n gcg.set_foreground(colormap.alloc_color(\"#00ff00\"))\n gcb = gtk.gdk.GC(area.window); gcb.copy(self.gc)\n gcb.set_foreground(colormap.alloc_color(\"#0000ff\"))\n \n self.maxx = 0; self.maxy = 0; self.minx = 0; self.miny = 0\n \n if len(self.coords) > 1:\n # Estabilish boundary numbers\n for aa, bb, cc in self.coords:\n if self.maxy < bb: self.maxy = bb\n if self.miny > bb: self.miny = bb\n if self.maxx < aa: self.maxx = aa\n if self.minx > aa: self.minx = aa\n \n #print \"Drawing Coords:\", len(self.coords)\n olda = self.coords[0][0]; oldb = self.coords[0][1]\n cnt = 0\n for aa, bb, cc in self.coords:\n self.draw_line(area, gcr, olda, oldb, aa, bb)\n olda = aa; oldb = bb\n cnt += 1\n \n if len(self.coords2) > 1:\n for aa, bb, cc in self.coords2:\n if self.maxy < bb: self.maxy = bb\n if self.miny > bb: self.miny = bb\n if self.maxx < aa: self.maxx = aa\n if self.minx > aa: self.minx = aa\n \n olda = self.coords2[0][0]; oldb = self.coords2[0][1]\n cnt = 0\n for aa, bb, cc in self.coords2:\n self.draw_line(area, gcg, olda, oldb, aa, bb)\n olda = aa; oldb = bb\n cnt += 1\n\n if len(self.coords3) > 1:\n for aa, bb, cc in self.coords3:\n if self.maxy < bb: self.maxy = bb\n if self.miny > bb: self.miny = bb\n if self.maxx < aa: self.maxx = aa\n if self.minx > aa: self.minx = aa\n \n olda = self.coords3[0][0]; oldb = self.coords3[0][1]\n cnt = 0\n for aa, bb, cc in self.coords3:\n self.draw_line(area, gcb, olda, oldb, aa, bb)\n olda = aa; oldb = bb\n cnt += 1\n \n # Draw axes \n gcr.set_foreground(colormap.alloc_color(\"#000000\"))\n ww, hh = area.window.get_size()\n area.window.draw_line(gcr, 10, hh/2, ww-10, hh/2)\n area.window.draw_line(gcr, 10, 10, 10, hh-10)\n ah = 1; aw = 1\n \n for aa in range(10, ww - 10, 10):\n area.window.draw_line(gcr, aa, hh/2 - ah, aa, hh/2 + ah)\n \n for aa in range(hh/2, 10, -10):\n area.window.draw_line(gcr, 10-aw, aa, 10 + ah, aa)\n \n for aa in range(hh/2, hh - 10, 10):\n area.window.draw_line(gcr, 10-aw, aa, 10 + ah, aa)\n \n vhh = hh - 40 # Leave space on top / buttom\n \n # Draw text\n if self.maxy and self.maxx:\n raty = float((hh/2 - 20)) / self.maxy\n ratx = float((ww - 20)) / self.maxx\n \n self.draw_text(area.window, gcr, 20, 10, str(self.minx) + \" ... \" + str(self.maxx) \\\n + \" \" + \"%0.3f\" % self.miny + \" ... \" + \"%0.3f\" % self.maxy)\n \n str2 = \"xx = %f yy = %f \" % (self.ex / ratx, ((hh/2) - self.ey) / raty)\n ww2, hh2 = self.measure_text(str2)\n self.draw_text(area.window, gcr, ww - ww2 - 20, hh - hh2, str2)\n \n def invalidate(self):\n rect = self.window.get_allocation()\n #print \"Invalidate\", rect\n self.window.window.invalidate_rect(rect, True)\n\n def OnExit(self, arg, srg2 = None):\n self.exit_all()\n \n def exit_all(self):\n gtk.main_quit()\n \n def key_press_event(self, win, event):\n #print \"key_press_event\", win, event\n pass\n \n def button_press_event(self, win, event):\n #print \"key_press_event\", win, event\n pass\n \n# Start of program:\n\nif __name__ == '__main__':\n\n mainwin = MainWin() \n gtk.main()\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"neural/neural-007/mainwin.py","file_name":"mainwin.py","file_ext":"py","file_size_in_byte":9100,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"631460910","text":"import sys\n\nnum = int(input())\n\na = list(map(int, input().split()))\na.sort()\nMIN = sys.maxsize\nleft = 0\nright = num - 1\naL = 0\naR = 0\n\nwhile left < right:\n Sum = a[left] + a[right]\n\n if abs(Sum) < MIN:\n aL = left\n aR = right\n MIN = abs(Sum)\n if Sum > 0:\n right -= 1\n elif Sum < 0:\n left += 1\n else:\n break\n\nprint(a[aL], a[aR])","sub_path":"BOJ/BOJ Python/PY2467_용액.py","file_name":"PY2467_용액.py","file_ext":"py","file_size_in_byte":383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"559837485","text":"import os\nfrom PIL import Image\nfrom glob import glob\nimport tensorflow as tf\nimport numpy as np\n\ndef get_loader(root, batch_size, scale_size, data_format, split=None, is_grayscale=False, seed=None):\n dataset_name = os.path.basename(root) # data/celeba\n if dataset_name in ['CelebA'] and split:\n root = os.path.join(root, 'splits', split) # default is train, therefore data/celeba/splits/train\n\n for ext in [\"jpg\", \"png\"]:\n rgbpaths = sorted(glob(\"{}/rgb/*.{}\".format(root, ext))) # png\n normalpaths = sorted(glob(\"{}/normal/*.{}\".format(root, ext))) # png\n maskpaths = sorted(glob(\"{}/mask/*.{}\".format(root, ext))) # png\n\n if ext == \"jpg\":\n tf_decode = tf.image.decode_jpeg\n elif ext == \"png\":\n tf_decode = tf.image.decode_png\n\n if len(rgbpaths) != 0:\n break\n\n with Image.open(rgbpaths[0]) as img:\n w, h = img.size\n shape = [h, w, 3]\n\n filename_queue = tf.train.string_input_producer(list(rgbpaths), shuffle=False, seed=seed)\n reader = tf.WholeFileReader()\n filename, data = reader.read(filename_queue)\n image = tf_decode(data, channels=3)\n\n\n Nfilename_queue = tf.train.string_input_producer(list(normalpaths), shuffle=False, seed=seed)\n Nfilename, Ndata = reader.read(Nfilename_queue)\n normal = tf_decode(Ndata, channels=3)\n\n Mfilename_queue = tf.train.string_input_producer(list(maskpaths), shuffle=False, seed=seed)\n Mfilename, Mdata = reader.read(Mfilename_queue)\n mask = tf_decode(Mdata, channels=3)\n\n if is_grayscale:\n pass\n # image = tf.image.rgb_to_grayscale(image)\n\n image.set_shape(shape)\n normal.set_shape(shape)\n mask.set_shape(shape)\n\n min_after_dequeue = 5000\n capacity = min_after_dequeue + 3 * batch_size # 5000+3*16?\n\n rgbqueue, normalqueue, maskqueue = tf.train.batch(\n [image, normal, mask], batch_size=batch_size,\n num_threads=1, capacity=capacity, name='synthetic_inputs')\n\n\n if data_format == 'NCHW':\n rgbqueue = tf.transpose(rgbqueue, [0, 3, 1, 2])\n normalqueue = tf.transpose(normalqueue, [0, 3, 1, 2])\n maskqueue = tf.transpose(maskqueue, [0, 3, 1, 2])\n\n elif data_format == 'NHWC':\n pass\n else:\n raise Exception(\"[!] Unkown data_format: {}\".format(data_format))\n\n return tf.to_float(rgbqueue), tf.to_float(normalqueue), tf.to_float(maskqueue)\n","sub_path":"data_loader.py","file_name":"data_loader.py","file_ext":"py","file_size_in_byte":2405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"566972285","text":"# 14502 백준\r\nimport copy\r\nfrom collections import deque\r\n\r\ndef BFS():\r\n global result # 변수 result를 전역 변수로 선언\r\n temp = copy.deepcopy(lab) # 임시로 연구소를 복사함\r\n\r\n for i in range(N):\r\n for j in range(M):\r\n if temp[i][j] == 2: # 임시 연구소가 감염되었으면\r\n queue.append([i, j]) # 그 위치를 큐에 넣음\r\n\r\n while queue:\r\n x, y = queue.popleft() # x, y값을 큐에서 pop해서 대입\r\n\r\n # 상하좌우로 이동\r\n for i in range(4):\r\n nx = x + dx[i] \r\n ny = y + dy[i]\r\n # 이동한 위치가 0보다 같거나 커야 하고 범위를 벗어나지 말아야 함\r\n if nx >= 0 and nx < N and ny >= 0 and ny < M:\r\n if temp[nx][ny] == 0: # 이동한 곳이 0이면 \r\n temp[nx][ny] = 2 # 바이러스에 감염됨\r\n queue.append([nx, ny]) # 그 위치를 큐에 넣음\r\n\r\n cnt_safe = 0 # 안전 구역을 셀 변수\r\n for i in temp: # 임시 변수 안에서 0인 것을 세서 더해줌\r\n cnt_safe += i.count(0)\r\n result = max(result, cnt_safe) # 결과값과 안전 구역을 비교해서 더 큰 수를 결과값으로 지정\r\n \r\n# 벽을 세울 함수\r\ndef wall(x):\r\n if x == 3: # 벽이 3개 다 세워지면 \r\n BFS() # BFS함수가 시작\r\n return\r\n else:\r\n for i in range(N):\r\n for j in range(M):\r\n if lab[i][j] == 0: # 연구소가 안전구역이면\r\n lab[i][j] = 1 # 벽을 하나 세움\r\n wall(x+1) # 벽을 하나 더 추가\r\n \r\n \r\n\r\nN, M = map(int, input().split()) # 연구소 크기 NxM 입력\r\nlab = [] # 연구소 생성\r\nresult = 0\r\n\r\n# 상하좌우\r\ndx = [0, 0, -1, 1]\r\ndy = [-1, 1, 0 , 0]\r\n\r\nqueue = deque() # 큐 생성\r\n\r\nfor _ in range(M):\r\n lab.append(list(map(int, input().split())))\r\n\r\nwall(0)\r\n\r\nprint(result)\r\n","sub_path":"BFS/연구소(시간 초과).py","file_name":"연구소(시간 초과).py","file_ext":"py","file_size_in_byte":2016,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"209270279","text":"# Lukas - 16.05.18 - TEST SCRIPT, NOT IN USE\n# Successful attempt at accessing the FPGA registers of the PID based on http://forum.redpitaya.com/viewtopic.php?f=14&t=1784&p=6978&hilit=mmap+python#p6966 \n# Code generates the mmap, sets setpoint, integrator and reset parameters, waits 10s and sets them to 0 again\n\nimport mmap\nimport os\nimport time\nimport numpy as np\n\n# Create object setting the datatype of every parameter to 32-bit unsigned int. The ordering of the parameters is based on the RedPitaya Memory Map\nregset_hk = np.dtype([\n ('config' , 'uint32'),\n ('reserved_1' , 'uint32'),\n ('reserved_2' , 'uint32'),\n\t('reserved_3' , 'uint32'),\n ('Sp11' , 'uint32'),\n ('Kp11' , 'uint32'),\n ('Ki11' , 'uint32'),\n ('Kd11' , 'uint32'),\n\t('Sp12' , 'uint32'),\n ('Kp12' , 'uint32'),\n ('Ki12' , 'uint32'),\n ('Kd12' , 'uint32'),\n\t('Sp21' , 'uint32'),\n ('Kp21' , 'uint32'),\n ('Ki21' , 'uint32'),\n ('Kd21' , 'uint32'),\n\t('Sp22' , 'uint32'),\n ('Kp22' , 'uint32'),\n ('Ki22' , 'uint32'),\n ('Kd22' , 'uint32')\n])\n\n# Load the required image (fpgav0.94_sh6.bit) to the FPGA\nos.system('cat /opt/redpitaya/fpga/fpgav0.94_sh6.bit > /dev/xdevcfg')\n\n# Create the mmap and use it as buffer for a numpy array which can be modified. The offset parameter determines which part of the register is accessed (PID in this case), see the Memory Map for details.\nfd = os.open('/dev/mem', os.O_RDWR)\nm = mmap.mmap(fileno=fd, length=mmap.PAGESIZE, offset=0x40300000)\nhk_array = np.recarray(1, regset_hk, buf=m)\nhk = hk_array[0]\n\nprint(\"teschd1\")\n\n#Enable the PID: Set both Setpoint and Ki to 1000 and the reset parameter (first 4 bits of configuration parameter) to 0. Numbers are given in Hex format\nhk.config = 0x000 \nhk.Sp12 = 0x3e8\nhk.Ki12 = 0x3e8\n#Wait 10s\ntime.sleep(10)\n#Disable the PID\nhk.Sp12 = 0x000\nhk.Ki12 = 0x000\n\n#Close the mmap\nm.close()\n\nprint(\"teschd2\")","sub_path":"xmlrpc/sh_server/mmap_test3.py","file_name":"mmap_test3.py","file_ext":"py","file_size_in_byte":2047,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"125656686","text":"#!/usr/bin/env python3\nimport argparse\nimport os\n\nfrom gevent.monkey import patch_all\nfrom gevent.pywsgi import WSGIServer\ntry:\n from psycogreen.gevent import patch_psycopg\nexcept ImportError:\n def patch_psycopg(*_, **__): pass\n\n__all__ = 'main',\n\n\nparser = argparse.ArgumentParser(\n formatter_class=argparse.ArgumentDefaultsHelpFormatter\n)\nparser.add_argument('-H', '--host', default='0.0.0.0')\nparser.add_argument('-p', '--port',\n type=int,\n default=int(os.environ.get('PORT', 2222)),\n help='port number to listen')\nparser.add_argument('-d', '--debug', action='store_true', default=False)\nparser.add_argument('-c', '--config',\n type=str,\n default=str(os.environ.get('WORD_WAY_ENV', 'prod')))\n\n\ndef main():\n args = parser.parse_args()\n\n if not args.debug:\n patch_all()\n patch_psycopg()\n\n from word_way.app import create_app\n\n wsgi_app = create_app(args.config)\n if args.debug:\n wsgi_app.run(host=args.host, port=args.port, debug=True)\n else:\n http_server = WSGIServer((args.host, args.port), wsgi_app)\n try:\n http_server.serve_forever()\n except KeyboardInterrupt:\n raise SystemExit\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":1309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"426569293","text":"def prime(n):\n if n <= 0:\n return \"Not defined\"\n elif n == 1:\n return \"Not prime\"\n for i in range(2, n):\n if n%i == 0:\n return \"not prime\"\n return \"prime\"\ndef list_prime(lst):\n\tprime_list =[ ]\n\tfor i in lst:\n\t\tx = prime(i)\n\t\tif x == \"prime\":\n\t\t\tprime_list.append(i)\n\treturn prime_list\n\nprint(list_prime([1,2,3,4,5,6,7,8,9]))\n","sub_path":"prime.py","file_name":"prime.py","file_ext":"py","file_size_in_byte":369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"452236681","text":"import requests\nimport config\n\ntoken = config.disk_token\nURL = 'https://cloud-api.yandex.net/v1/disk/resources/upload'\n\n\ndef to_disk(file_path):\n params = {\n 'path': file_path,\n 'overwrite': True\n }\n headers = {\n 'Authorization': token\n }\n response = requests.get(URL, params=params, headers=headers)\n # print(response.headers)\n json_ = response.json()\n # print(json_)\n print(json_)\n files = {'file':open(file_path, 'r', encoding='utf-8')}\n resp = requests.put(json_['href'], files=files)\n if resp.status_code == 201:\n print('Файл успешно загружен')\n else:\n print('Произошла ошибка при загрузке файла')\n\n\nif __name__ == '__main__':\n to_disk('translated.txt')\n","sub_path":"translated_text_to_yandex_disk.py","file_name":"translated_text_to_yandex_disk.py","file_ext":"py","file_size_in_byte":786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"225139926","text":"# -*- encoding: utf-8 -*-\n\n# http://zhanzhang.baidu.com/college/courseinfo?id=267&page=2#h2_article_title17\n\nfrom jinja2 import Template\nfrom project_chufang import logger\n\ntmpl = '''\n\n\n {%- for page in pages %}\n \n {{ page.loc }}\n {%- if page.lastmod %}\n {{ page.lastmod }}\n {%- endif %}\n {%- if page.changefreq %}\n {{ page.changefreq }}\n {%- endif %}\n {%- if page.priority %}\n {{ page.priority }}\n {%- endif %}\n \n {%- endfor %}\n\n'''.strip()\n\ndef contains(o, k):\n try:\n if hasattr(o, k) or k in o:\n return True\n except:\n return False\n return False\n\n\ndef render(pages):\n \"\"\"\n page need contains loc\n \"\"\"\n for page in pages:\n if not contains(page, 'loc'):\n raise ValueError('expect loc')\n if not all([contains(page, 'lastmod'), contains(page, 'changefreq'), contains(page, 'priority')]):\n logger.warning('expect lastmod/changefreq/priority')\n template = Template(tmpl)\n return template.render(pages=pages)\n","sub_path":"lib_chufang/sitemap/simple.py","file_name":"simple.py","file_ext":"py","file_size_in_byte":1232,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"408897760","text":"#code for midterm project\n\n#code for midterm project\n\nfrom brian2 import *\nimport numpy as np\n\nstart_scope()\n\n#PARAMETERS\n\n#Iext = constant input current that sets the excitability for the neuron\n#Isyn = synaptic current - these depend on synapses and neuron - define later\n\n# Reversal potentials\nEl = 10.6*mV #leak reversal potential\nENa = 115*mV #sodium reversal potential\nEK = -12*mV #potassium reversal potential \nEA = 60*mV\nEG = -20*mV \n\n# Conductances\ngl = 2.7*np.pi*msiemens #leak current conductance \ngNa = 1080*np.pi*msiemens #sodium current max conductance \ngK = 324*np.pi*msiemens #potassium current max conductance\ngA = 10 * msiemens\ngG = 20 * msiemens # variable \n\n# Constant current, determines neuron excitability\nIext_S = 28e-11 *amp\nIext_I = 28e-11 *amp\nIext_R = 28e-11 *amp # variable\nIext = [Iext_S, Iext_I, Iext_R]\n\n# Membrane capacitance\nCm = pi*9e-6 *farad\n\nalphaA = 1.1 #* nM-1ms-1 #FIGURE OUT THESE UNITS\nbetaA = 0.19 #* ms-1 #UNITS\nalphaG = 5.0 #* mM-1ms-1 #UNITS \nbetaG = 0.30 #* ms-1 #UNITS\n\n# Typical equations\neqs = '''\ndv/dt = (gl * (El-v) + gNa * m**3 * h * (ENa-v) + gK * n**4 * (EK-v) + Iext0 + gA*ra*(v - EA) + gG*rg*(v - EG))/Cm : volt\ndm/dt = alpham * (1-m) - betam * m : 1\ndn/dt = alphan * (1-n) - betan * n : 1\ndh/dt = alphah * (1-h) - betah * h : 1\nalpham = (0.1/mV) * (10*mV-v)/exprel((-v+25*mV)/(10*mV))/ms : Hz\nbetam = 4 * exp(-v/(18*mV))/ms : Hz\nalphah = 0.07 * exp(-v/(20*mV))/ms : Hz\nbetah = 1/(exp((-v+30*mV) / (10*mV)) + 1)/ms : Hz\nalphan = (0.01/mV) * (10*mV-v)/exprel((-v+10*mV)/(10*mV))/ms : Hz\nbetan = 0.125*exp(-v/(80*mV))/ms : Hz\nIext0 : amp\nra : 1\nrg : 1\n'''\n\nneurons = NeuronGroup(3, model = eqs, method = 'exponential_euler', refractory=\"m > 0.4\", threshold=\"m > 0.5\") \nneurons.Iext0 = Iext\n\nA = Synapses(neurons,neurons, \n model = ''' \n dr/dt = alphaA*T*(1-2)-betaA*r : 1\n T = 1 /(1+exprel(-(v_pre-62*mV)/5*mV)) : 1''', \n on_pre = 'ra_post = r'\n )\n \nA.connect(i = [0, 1], j = [1, 2])\n\nG = Synapses(neurons,neurons, \n model = ''' \n dr/dt = alphaG*T*(1-2)-betaG*r : 1\n T = 1 /(1+exprel(-(v_pre-62*mV)/5*mV)) : 1''', \n on_pre = 'rg_post = r'\n )\n\nG.connect(i = [2], j = [1]) \n\nM = StateMonitor(neurons, 'v', record=True)\n#S = SpikeMonitor(neurons, record=True)\n\nrun( 1 * second, report='text' )\n","sub_path":"single_neuron/aymee_code.py","file_name":"aymee_code.py","file_ext":"py","file_size_in_byte":2381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"103044023","text":"import unittest\n# from threading import threading\nfrom ddt import ddt, data, unpack\n\nt = ((1, 1), (2, 4), (3, 6), (4, 16), (5, 25))\n\n\n@ddt\nclass TestSquare(unittest.TestCase):\n\n @data(*t)\n @unpack\n def test(self, origin, result):\n self.assertEqual(origin, result)\n","sub_path":"case.py","file_name":"case.py","file_ext":"py","file_size_in_byte":280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"609268470","text":"from __future__ import absolute_import, division, print_function\n\nimport tensorflow as tf\n# import os\n# os.environ['DISABLE_COLAB_TF_IMPORT_HOOK'] = '1'\n# gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.333)\n\n# sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))\n\nimport numpy as np\nimport re\nimport os\nimport io\nimport time\nimport matplotlib.pyplot as plt\n\nfrom sklearn.model_selection import train_test_split\n\nimport psutil\n\ntf.enable_eager_execution()\n\n# p = psutil.Process(os.getpid())\n# p.nice(19)\n\n# path_zip = tf.keras.utils.get_file('cornell_movie_dialogs_corpus.zip',\n# 'http://www.cs.cornell.edu/~cristian/data/cornell_movie_dialogs_corpus.zip',\n# extract=True)\npath_zip = tf.keras.utils.get_file('spa.txt',\n 'https://download2262.mediafire.com/og22qeeb7qgg/ladcl5u218fqs5n/spa.txt', )\n# extract=True)\n\npath = os.path.dirname(path_zip) + '/spa.txt'\n#path ='./spa.txt'\nprint(path)\n\n\ndef preproccess(sentence):\n sentence = re.sub(r\"([?!.,¿])\", r\" \\1 \", sentence)\n sentence = re.sub(r\"([' ']+)\", r\" \", sentence)\n sentence = re.sub(r\"([^a-zA-z?.!,¿]+)\", r\" \", sentence)\n\n sentence = sentence.rstrip().strip()\n return ' ' + sentence.lower() + ' '\n\n\ndef creat_dataset(path, num_examples):\n file = io.open(path, encoding='UTF-8', errors='ignore')\n\n lines = file.read()\n lines = lines.split('\\n')\n\n pops = []\n real_index = 0\n for i, x in enumerate(lines):\n if len(x.split(' ')) > 20:\n pops.append(i)\n for c in pops:\n # lines.pop(c - real_index)\n\n if c % 2 == 0:\n\n lines.pop(c - real_index)\n lines.pop(c - real_index)\n real_index += 1\n\n real_index += 1\n else:\n\n lines.pop(c - real_index - 1)\n real_index += 1\n lines.pop(c - real_index)\n real_index += 1\n\n '''for i in range(len(lines)):\n if lines[i].rfind('+') != -1:\n lines[i] = lines[i][lines[i].rfind('+') + 1:]\n else:\n lines[i] = lines[i][lines[i].rfind(':') + 1:]'''\n lines = [preproccess(i) for i in lines]\n print(len(lines))\n request_list = lines[:num_examples:2]\n\n response_list = lines[1:num_examples:2]\n\n '''for i, x in enumerate(request_list):\n if len(x.split(' ')) > 10:\n request_list.pop(i)\n response_list.pop(i)\n\n for i, x in enumerate(response_list):\n if len(x.split(' ')) > 10:\n request_list.pop(i)\n response_list.pop(i)'''\n\n return request_list, response_list\n\n\ndef tokenize(lang):\n lang_tokenizer = tf.keras.preprocessing.text.Tokenizer(filters='')\n lang_tokenizer.fit_on_texts(lang)\n\n tensor = lang_tokenizer.texts_to_sequences(lang)\n tensor = tf.keras.preprocessing.sequence.pad_sequences(tensor, padding='post')\n\n return tensor, lang_tokenizer\n\n\ndef load_dataset(path, num_examples):\n input, target = creat_dataset(path, num_examples)\n\n input_tensor, input_tokenizer = tokenize(input)\n target_tensor, target_tokenizer = tokenize(target)\n\n return input_tensor, target_tensor, input_tokenizer, target_tokenizer\n\n\ndef max_tensor(tensor):\n return max([len(c) for c in tensor])\n\n\ninput_tensor, target_tensor, input_tokenizer, target_tokenizer = load_dataset(path, 40000)\n\nmax_input_length = max_tensor(input_tensor)\nmax_target_length = max_tensor(target_tensor)\n\nprint(\"MAX\")\nprint(max_input_length)\nprint(max_target_length)\n\n\ndef convert(tensor, lang):\n for i in tensor:\n if i != 0:\n print(\"{} -----> {}\".format(i, lang.index_word[i]))\n\n\nconvert(input_tensor[0], input_tokenizer)\n\ntrain_input_tensor, test_input_tensor, train_target_tensor, test_target_tensor = train_test_split(input_tensor,\n target_tensor,\n test_size=0.2)\n\nBUFFER_SIZE = len(train_input_tensor)\nBATCH_SIZE = 64\nsteps_per_epoch = BUFFER_SIZE // BATCH_SIZE\n# steps_per_epoch = steps_per_epoch // 8\nprint(\"here\")\nprint(steps_per_epoch)\nembedding_dim = 512\nunits = 1024\n\nvocab_input_size = len(input_tokenizer.word_index) + 1\nvocab_target_size = len(target_tokenizer.word_index) + 1\n\nprint(vocab_input_size)\nprint(vocab_target_size)\n\ndataset = tf.data.Dataset.from_tensor_slices((train_input_tensor, train_target_tensor)).shuffle(BUFFER_SIZE).batch(\n BATCH_SIZE, drop_remainder=True)\n\nexample_input_tensor, example_target_tensor = next(iter(dataset))\n\n\nclass Encoder(tf.keras.Model):\n def __init__(self, vocab_size, embedding_dim, enc_units, batch_size):\n super(Encoder, self).__init__()\n self.embedding = tf.keras.layers.Embedding(vocab_size, embedding_dim)\n self.enc_units = enc_units\n self.batch_size = batch_size\n\n self.gru = tf.keras.layers.GRU(self.enc_units,\n return_sequences=True,\n return_state=True,\n recurrent_initializer='glorot_uniform',\n dropout=0.2,\n recurrent_dropout=0.2)\n\n def call(self, tensors, hidden):\n v = self.embedding(tensors)\n output, hidden = self.gru(v, initial_state=hidden)\n return output, hidden\n\n def initialize_hidden_state(self):\n return tf.zeros((self.batch_size, self.enc_units))\n\n\nencoder = Encoder(vocab_input_size, embedding_dim, units, BATCH_SIZE)\n\nenc_hidden = encoder.initialize_hidden_state()\n\nenc_output, enc_hidden = encoder(example_input_tensor, enc_hidden)\n\nprint('Encoder output shape: (batch size, sequence length, units) {}'.format(enc_output.shape))\nprint('Encoder Hidden state shape: (batch size, units) {}'.format(enc_hidden.shape))\n\n\nclass BahdanauAttention(tf.keras.Model):\n def __init__(self, units):\n super(BahdanauAttention, self).__init__()\n self.W1 = tf.keras.layers.Dense(units)\n self.W2 = tf.keras.layers.Dense(units)\n self.V = tf.keras.layers.Dense(1)\n\n def call(self, tensor, hidden):\n hidden = tf.expand_dims(hidden, axis=1)\n\n score = self.V(tf.nn.tanh(self.W1(tensor) + self.W2(hidden)))\n\n attention_weights = tf.nn.softmax(score, axis=1)\n\n context_vector = attention_weights * tensor\n\n context_vector = tf.reduce_sum(context_vector, axis=1)\n\n return context_vector, attention_weights\n\n\nclass Decoder(tf.keras.Model):\n def __init__(self, vocab_size, embedding_dim, dec_units, batch_size):\n super(Decoder, self).__init__()\n self.embedding = tf.keras.layers.Embedding(vocab_size, embedding_dim)\n self.dec_units = dec_units\n self.batch_size = batch_size\n self.gru = tf.keras.layers.GRU(self.dec_units,\n return_sequences=True,\n return_state=True,\n recurrent_initializer='glorot_uniform',\n dropout=0.2,\n recurrent_dropout=0.2)\n\n self.fc = tf.keras.layers.Dense(vocab_size)\n self.attention = BahdanauAttention(units)\n\n def call(self, dec_input, dec_hidden, enc_output):\n context_vector, attention_weights = self.attention(enc_output, dec_hidden)\n v = self.embedding(dec_input)\n # we expand dimention for context vector because we reduced the axis in the BahdanauAttention\n v = tf.concat([tf.expand_dims(context_vector, 1), v], axis=-1)\n\n dec_output, dec_hidden = self.gru(v, initial_state=dec_hidden)\n dec_output = tf.reshape(dec_output, (-1, dec_output.shape[2]))\n\n v = self.fc(dec_output)\n\n return v, dec_hidden, attention_weights\n\n\ndecoder = Decoder(vocab_target_size, embedding_dim, units, BATCH_SIZE)\ndec_hidden = enc_hidden\nsample_decoder_output, _, _ = decoder(tf.random.uniform((BATCH_SIZE, 1)), dec_hidden, enc_output)\n\noptimizer = tf.train.AdamOptimizer()\n\n\ndef loss_fun(real, pred):\n mask = 1 - np.equal(real, 0)\n loss_ = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=real, logits=pred) * mask\n return tf.reduce_mean(loss_)\n\n\ncheckpoints_dir = './training_checkpoints3'\ncheckpoint_prefix = os.path.join(checkpoints_dir, 'ckpt')\ncheckpoint = tf.train.Checkpoint(optimizer=optimizer,\n encoder=encoder,\n decoder=decoder)\n\n\n# print(tf.expand_dims([target_lang.word_index['']] * BATCH_SIZE, 1))\n# print('------')\n# for x, y in dataset.take(1):\n# for t in range(y.shape[1]):\n# print(tf.expand_dims(y[:, t], 1))\n\ndef train_step(inp, target, enc_hidden):\n loss = 0\n with tf.GradientTape() as tape:\n enc_output, enc_state = encoder(inp, enc_hidden)\n dec_hidden = enc_hidden\n dec_input = tf.expand_dims([target_tokenizer.word_index['']] * BATCH_SIZE, 1)\n\n # we start from 1 because we already checked \n for t in range(1, target.shape[1]):\n predictions, dec_hidden, _ = decoder(dec_input, dec_hidden, enc_output)\n\n loss += loss_fun(target[:, t], predictions)\n\n dec_input = tf.expand_dims(target[:, t], 1)\n\n batch_loss = loss / int(target.shape[1])\n\n variables = encoder.trainable_variables + decoder.trainable_variables\n\n gradients = tape.gradient(loss, variables)\n optimizer.apply_gradients(zip(gradients, variables))\n\n return batch_loss\n\n\n'''EPOCHS = 40\n\nfor epoch in range(EPOCHS):\n start = time.time()\n enc_hidden = encoder.initialize_hidden_state()\n total_loss = 0\n print(steps_per_epoch)\n for (batch, (input, target)) in enumerate(dataset.take(steps_per_epoch)):\n batch_loss = train_step(input, target, enc_hidden)\n total_loss += batch_loss\n print(\"I ======> {}\".format(batch))\n if batch % 100 == 0:\n print(\"Epoch {} Batch {} Loss {:.4f}\".format(epoch, batch, batch_loss))\n\n # if epoch % 2 == 0:\n checkpoint.save(file_prefix=checkpoint_prefix)\n\n print(\"Epoch {} Loss {:.4f}\".format(epoch, total_loss / steps_per_epoch))\n\n print(\"The time taken for 1 epoch is {} sec \\n\".format(time.time() - start))'''\n\n\ndef evaluate(sentence):\n attention_plot = np.zeros((max_target_length, max_input_length))\n print(attention_plot.shape)\n sentence = preproccess(sentence)\n\n inputs = [input_tokenizer.word_index[c] for c in sentence.split(' ')]\n\n inputs = tf.keras.preprocessing.sequence.pad_sequences([inputs],\n maxlen=max_input_length,\n padding='post')\n\n inputs = tf.convert_to_tensor(inputs)\n\n result = ''\n\n hidden = [tf.zeros((1, units))]\n\n enc_output, enc_hidden = encoder(inputs, hidden)\n\n dec_input = tf.expand_dims([target_tokenizer.word_index['']], 0)\n # print('-----')\n # print(inputs)\n # print([inputs])\n\n dec_hidden = enc_hidden\n\n for t in range(max_target_length):\n predictions, dec_hidden, attention_weights = decoder(dec_input, dec_hidden, enc_output)\n # print(predictions.shape)\n # print(attention_weights.shape)\n attention_weights = tf.reshape(attention_weights, (-1,))\n # print(attention_weights.shape)\n attention_plot[t] = attention_weights.numpy()\n\n predicted_id = tf.argmax(predictions[0]).numpy()\n result += target_tokenizer.index_word[predicted_id] + ' '\n dec_input = tf.expand_dims([predicted_id], 0)\n if target_tokenizer.index_word[predicted_id] == '':\n return result, sentence, attention_plot\n\n return result, sentence, attention_plot\n\n\ndef plot_attention(attention, sentence, predicted_sentence):\n fig = plt.figure(figsize=(10, 10))\n\n ax = fig.add_subplot(1, 1, 1)\n ax.matshow(attention, cmap='viridis')\n fontdict = {'fontsize': 14}\n ax.set_xticklabels(['' + sentence], fontdict=fontdict, rotation=90)\n\n ax.set_yticklabels(['' + predicted_sentence], fontdict=fontdict)\n plt.show()\n\n\ndef response(sentence):\n result, sentence, attention_plot = evaluate(sentence)\n print(sentence.split(' '))\n print(result.split(' '))\n print(attention_plot.shape)\n\n attention_plot = attention_plot[:len(result.split(' ')), :len(sentence.split(' '))]\n\n plot_attention(attention_plot, sentence, result)\n\n print(\"Input: {}\".format(sentence))\n print(\"Predicted: {}\".format(result))\n\ncheckpoints_dir = './14.4 training'\ncheckpoint.restore(tf.train.latest_checkpoint(checkpoints_dir))\n#checkpoint.restore(checkpoints_dir+'/ckpt-19')\n# print(tf.train.latest_checkpoint(checkpoints_dir))\n# print(input_tokenizer.word_index['they'])\n# response('Where i can find the Coordintaesٌ')\n\n# print(steps_per_epoch)\n# print(len(train_input_tensor))\n\n#response('hello')\nfor i in range(50):\n sent = input('Sentence:')\n if sent == '123':\n break\n\n response(sent)\n\n","sub_path":"T2Chatbot.py","file_name":"T2Chatbot.py","file_ext":"py","file_size_in_byte":13174,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"417500794","text":"\"\"\"\nNow we consider that the star, not along the line of sight, is a sphere of uniform brightness (black body at T) with a radius R.\n\nOur coordinate are theta, angle formed by the ray from the star and the direction of the centre from a position on the line of sight and phi, polar angle around this axis.\n\nParameters that we can give for this code are:\n L : distance to the gamma-source (au)\n zb : position along the line of sight nearly the star (au)\n b : impact parameter (au)\n R : radius of the star (au)\n T : temperature of the star (K)\n\"\"\"\n\n# Librairies\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom math import *\nfrom Physical_constants import *\nfrom Conversion_factors import *\nfrom Functions import *\n\n# For the vector eps and E\nnumber_bin_E = 40\n\n# Parameters for the code\nL = 20 * AU2cm # the distance to the gamma-source (cm)\nzb = 10 * AU2cm # position along the line of sight nearly the star (cm)\nb = 5 * AU2cm # impact parameter (cm)\nR = 100 * Rsun2cm # radius of the star (cm)\nT = np.array([3000, 6000, 10000]) # temperature of the star (K)\nz = np.linspace(0, L, 100) # position along the line of sight (cm)\nphi = np.linspace(0, 2*np.pi, 10) # angle polar\n\n# Energy of the gamma-photon\nEmin = 1e-2*TeV2erg # Emin = 1e-2 TeV (erg)\nEmax = 1e5*TeV2erg # Emax = 1e5 TeV (erg)\nE = np.logspace(log10(Emin), log10(Emax), number_bin_E) # erg\nE_tev = E/TeV2erg # TeV\n\nf = plt.figure()\nax = f.add_subplot(111)\n\n# Calculation of the transmittance for each temperature\nfor i in range (len(T)):\n tau = calculate_tau(E, z, phi, b, R, T[i], zb)\n plt.plot(E_tev, np.exp(-tau), label = \"T = %.2f K\" %T[i])\n\nb_au = b/AU2cm # au\nL_au = L/AU2cm # au\nR_Rsun = R/Rsun2cm # Rsun\n\nplt.xscale('log')\nplt.xlabel(r'$E_\\gamma$' '(TeV)')\nplt.ylabel(r'$\\exp(-\\tau_{\\gamma \\gamma})$')\nplt.title(u'Transmittance of VHE 'r'$\\gamma$' '-rays in interaction \\n with a star with a radius '+str(round(R_Rsun,2))+' 'r'$R_\\bigodot$')\nplt.text(0.5, 0.5,'L = %.2f au, b = %.2f au' %(L_au, b_au))\nplt.legend(loc='best')\nplt.show()\n","sub_path":"Codes/Source_finie_T.py","file_name":"Source_finie_T.py","file_ext":"py","file_size_in_byte":2397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"238278302","text":"import sys, pygame\nimport random\nimport math\nimport time\nfrom player import Player\nfrom inimigo import Inimigo\n\npygame.init()\n\npreto = (0,0,0)\n\nspeed = 10\n\nwidth, height = 1280, 720\nfont = pygame.font.Font(None,32)\nfont2 = pygame.font.Font(None,128)\nvitoria = font2.render(\"Você ganhou!\",True,(255,255,255))\nderrota = font2.render(\"Você morreu!\",True,(255,255,255))\npygame.display.set_caption(\"Labirinto Microbiano\")\ncameraX = 0\ncameraY = 0\nscreen = pygame.display.set_mode((width,height))\nscreensurf = pygame.display.get_surface()\nlabirinto = pygame.image.load(\"Labirinto.png\")\n# Musica\npygame.mixer.music.load(\"music.mp3\")\n# Imagem do dna\ndna = pygame.image.load(\"../Imagens/dna.png\")\ndnarect = dna.get_rect()\n# Imagem do Portal\nportal = pygame.image.load(\"../Imagens/portal.png\")\nportalrect = portal.get_rect()\n\n# lista para os dna's\nxy = list()\nxy.append((185,300))\nxy.append((185,800))\nxy.append((350,415))\nxy.append((410,180))\nxy.append((1290,190))\nxy.append((1535,665))\nxy.append((1320,1090))\nxy.append((1760,1090))\nxy.append((1090,1550))\nxy.append((405,1780))\nxy.append((1780,860))\nxy.append((1050,868))\nxy.append((1351,1558))\nxy.append((190,1310))\nxy.append((620,870))\n\n\nlab_width = labirinto.get_size()[0]\nlab_height = labirinto.get_size()[1]\nscreensurf = pygame.display.get_surface()\narquivos = [None]*16\nfor count in range(0,16):\n arq = \"Ricks/\"+str(count+1)+\".png\"\n arquivos[count] = pygame.image.load(arq)\n #arquivos[count] = pygame.image.load(f\"Ricks/{count+1}.png\")\ninimigo_arquivos=[None]*2\ninimigo_arquivos[0] = pygame.image.load(\"../Imagens/Inimigo1/1.png\")\ninimigo_arquivos[1] = pygame.image.load(\"../Imagens/Inimigo1/2.png\")\n\ninimigo1 = Inimigo(screen,inimigo_arquivos,width,height,300,400,500,10)\ninimigo_arquivos2=[None]*2\ninimigo_arquivos2[0] = pygame.image.load(\"../Imagens/Inimigo2/1.png\")\ninimigo_arquivos2[1] = pygame.image.load(\"../Imagens/Inimigo2/2.png\")\n\ninimigo2 = Inimigo(screen,inimigo_arquivos2,width,height,940,640,500,12)\n\ninimigo_arquivos3=[None]*2\ninimigo_arquivos3[0] = pygame.image.load(\"../Imagens/Inimigo3/1.png\")\ninimigo_arquivos3[1] = pygame.image.load(\"../Imagens/Inimigo3/2.png\")\ninimigo3 = Inimigo(screen,inimigo_arquivos3,width,height,845,1090,450,15)\n\ninimigo4 = Inimigo(screen,inimigo_arquivos,width,height,638,1760,900,5)\n\nrick = Player(screen,arquivos,width,height,speed,pygame.K_LEFT,pygame.K_RIGHT,pygame.K_UP,pygame.K_DOWN)\nplaying = True\nwhile True:\n for event in pygame.event.get():\n if event.type == pygame.QUIT: sys.exit()\n\n teclas = pygame.key.get_pressed()\n\n screen.fill(preto)\n if((rick.personagemRect.x>(width/2)) and (cameraX < lab_width-width)):\n cameraX+=rick.speed\n rick.personagemRect.x = width/2\n elif((rick.personagemRect.x<(width/2)) and (cameraX > 0)):\n cameraX-=rick.speed\n rick.personagemRect.x = width/2\n\n if((rick.personagemRect.y>(height/2)) and (cameraY < lab_height-height)):\n cameraY+=rick.speed\n rick.personagemRect.y = height/2\n elif((rick.personagemRect.y<(height/2)) and (cameraY > 0)):\n cameraY-=rick.speed\n rick.personagemRect.y = height/2\n\n\n screen.blit(labirinto,(0 - cameraX,0 - cameraY))\n for cords in xy:\n dnarect.x = cords[0] - cameraX\n dnarect.y = cords[1] - cameraY\n screen.blit(dna,dnarect)\n if(dnarect.colliderect(rick.personagemRect)):\n xy.remove(cords)\n rick.vida+=100\n if(rick.vida>100):\n rick.vida = 100\n if(len(xy)==0):\n pygame.mixer.music.play()\n\n portalrect.x = 1700 - cameraX\n portalrect.y = 1950 - cameraY\n screen.blit(portal,portalrect)\n if(portalrect.colliderect(rick.personagemRect)):\n if(len(xy)==0):\n screen.blit(vitoria,(width//3,height//2))\n playing = False\n else:\n cameraX = 0\n cameraY = 0\n rick.personagemRect.y=0 - cameraY\n rick.personagemRect.x=160 - cameraX\n text=font.render(\"DNA's: \"+str(abs(len(xy)-15))+\"/15\",True,(255,255,255))\n\n screen.blit(text,(width-200,10))\n text1 = font.render(\"Vida: \"+str(rick.vida)+\"/100\",True,(255,0,0))\n screen.blit(text1,(10,10))\n #print(f\"({pygame.mouse.get_pos()[0]+cameraX},{pygame.mouse.get_pos()[1]+cameraY})\")\n if(playing):\n rick.mover(teclas)\n rick.desenhar()\n inimigo1.move(\"h\")\n inimigo1.desenhar(cameraX,cameraY)\n if(inimigo1.collid(rick.personagemRect) or inimigo2.collid(rick.personagemRect) or inimigo3.collid(rick.personagemRect) or inimigo4.collid(rick.personagemRect)):\n rick.vida-=1\n inimigo2.move(\"h\")\n inimigo2.desenhar(cameraX,cameraY)\n inimigo3.move(\"v\")\n inimigo3.desenhar(cameraX,cameraY)\n inimigo4.move(\"h\")\n inimigo4.desenhar(cameraX,cameraY)\n if(rick.vida<=0):\n screen.blit(derrota,(width//3,height//2))\n playing = False\n pygame.display.flip()\n","sub_path":"teste/teste1.py","file_name":"teste1.py","file_ext":"py","file_size_in_byte":4900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"459425123","text":"'''\nPROBLEM: Given formatted string, parse and expand\nDIFFICULTY: Hard (would've never discovered on my own)\n\nNOTE: This problem is a waste of time - too many edge cases\n'''\n\n\n# From leetcode (wouldnt have figured it out otherwise)\n# time: O(n)\n# space: O(n)\ndef v1(s):\n stack = []\n currNum = ''\n currStr = ''\n\n for char in s:\n if char.isdigit():\n currNum += char\n\n elif char.isalpha():\n currStr += char\n\n elif char == '[':\n stack.append(currStr)\n stack.append(currNum)\n currNum = ''\n currStr = ''\n\n elif char == ']':\n num = int(stack.pop())\n prevString = stack.pop()\n currStr = prevString + num*currStr\n\n return currStr\n\n\n# WIP (trying to make a more understandable version of leetcode)\ndef v2(s):\n stack = []\n result = ''\n\n currNum = ''\n currStr = ''\n currChunk = ''\n #currSubChunk = ''\n prevLevel = -1\n\n for char in s:\n if char.isnumeric(): \n currNum += char\n\n elif char.isalpha():\n currStr += char\n\n elif char == '[':\n if len(currStr):\n if len(stack):\n stack.append(currStr)\n else:\n result += currStr\n currStr = ''\n\n stack.append(currNum)\n currNum = ''\n currStr = ''\n\n elif char == ']':\n #print(stack, currStr)\n num = int(stack.pop())\n\n if len(stack) == prevLevel:\n currChunk += currStr\n else:\n currChunk = currStr + currChunk\n\n currChunk = num * currChunk\n prevLevel = len(stack)\n\n if len(stack):\n currStr = stack.pop()\n else:\n result += currChunk\n currChunk = ''\n currStr = ''\n currNum = ''\n \n\n return result + currStr\n \n\n# WIP (non-leetcode, recursive, missing edge cases)\ndef v3(s):\n output, s = helper(s)\n return output\n\ndef helper(s):\n output = ''\n while True:\n if len(s) == 0:\n break\n\n if s[0].isalpha():\n end = getEndOfBody(s)\n output += s[:end+1]\n s = s[end+1:]\n if len(s) == 0:\n break\n\n if s[0].isnumeric():\n index = s.find('[')\n if index == -1:\n return (output, s)\n\n num = int(s[:index])\n s = s[index+1:]\n end = getEndOfBody(s)\n body = s[:end+1]\n s = s[end+1:]\n\n if s[0] != ']':\n text, s = helper(s)\n body += text\n\n output += num * body\n\n if s[0] == ']':\n s = s[1:]\n if len(s) > 0 and s[0] == ']':\n break\n \n return (output, s)\n\ndef getEndOfBody(s):\n for i, char in enumerate(s):\n if char == ']':\n return i - 1\n elif char.isnumeric():\n return i - 1\n return len(s) - 1\n\n\n\n# Execute\ndef main():\n import testSuite\n\n # EDGE CASES\n # nested (regular)\n # nested numbers\n # multiple nested on same level\n # first level\n # letters on their own (no brackets)\n\n tests = [\n ['', ''],\n ['a', 'a'],\n\n ['3[a]', 'aaa'],\n ['2[abc]', 'abcabc'],\n ['a2[b]c', 'abbc'],\n ['2[a]2[b]', 'aabb'],\n ['a2[b]c2[d]e', 'abbcdde'],\n\n ['2[3[a]]', 'aaaaaa'],\n ['2[3[4[a]]]', 'aaaaaaaaaaaaaaaaaaaaaaaa'],\n ['2[2[a]2[b]]', 'aabbaabb'],\n ['a2[3[b]c]d', 'abbbcbbbcd'],\n\n\n ['2[a3[b]]', 'abbbabbb'],\n ['2[a3[b4[c]]]', 'abccccbccccbccccabccccbccccbcccc'],\n #[['a2[b2[c]d]e'], 'abccdbccde'],\n\n ['2[X2[ab]2[cd]]', 'XababcdcdXababcdcd'],\n ['a2[2[b]c2[de2[2[f]]]]g2[2[h2[i]]]j', 'abbcdeffffdeffffbbcdeffffdeffffghiihiihiihiij'],\n\n ['zz2[ab2[f]]xx3[de]fg', 'zzabffabffxxdededefg'],\n ['2[ab2[f]1[g]]3[de]', 'abffgabffgdedede'],\n ['3[z]2[2[y]pq4[2[jk]e1[f]]]ef', 'zzzyypqjkjkefjkjkefjkjkefjkjkefyypqjkjkefjkjkefjkjkefjkjkefef'],\n ]\n\n t = testSuite.init(tests)\n t.test(v1)\n #t.test(v2)\n #t.test(v3)\n\nmain()\n","sub_path":"algos/leetcode/394_decodeString.py","file_name":"394_decodeString.py","file_ext":"py","file_size_in_byte":4261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"420767942","text":"\"\"\"\r\n\"\"\"\r\nfrom splinter import Browser\r\nfrom bs4 import BeautifulSoup\r\nimport re\r\nimport pandas as pd\r\nimport pymongo\r\n\r\ndef getNews():\r\n startBrowser()\r\n url = 'https://mars.nasa.gov/news'\r\n browser.visit(url)\r\n\r\n html = browser.html\r\n soup = BeautifulSoup(html, 'html.parser')\r\n\r\n result = soup.find(attrs={\"class\": \"list_text\"})\r\n\r\n with browser:\r\n try:\r\n # Identify and return title of news\r\n news_title = result.a.text\r\n # Identify and return news paragraph\r\n news_p = result.find(attrs={\"class\": \"article_teaser_body\"}).text\r\n\r\n # Run only if news title, and paragraph are available\r\n if (news_title and news_p):\r\n\r\n # Dictionary to be inserted as a MongoDB document\r\n post = {\r\n 'title': news_title,\r\n 'news_p': news_p,\r\n }\r\n except Exception as e:\r\n print(e) \r\n return post\r\n\r\ndef getFeaturedImage():\r\n startBrowser()\r\n img_url = 'https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars'\r\n img_url_base= img_url[:24]\r\n featured_image_url =[]\r\n\r\n browser.visit(img_url)\r\n\r\n html = browser.html\r\n soup = BeautifulSoup(html, 'html.parser')\r\n browser.click_link_by_partial_text('FULL IMAGE')\r\n browser.is_element_present_by_text('fancybox-image', wait_time=4)\r\n with browser:\r\n html = browser.html\r\n soup = BeautifulSoup(html, 'html.parser')\r\n featured_image_url = soup.find('img', attrs={\"class\": \"fancybox-image\"})['src']\r\n featured_image_url = img_url_base + featured_image_url\r\n return featured_image_url\r\n\r\ndef getWeather():\r\n startBrowser()\r\n weather_url = 'https://twitter.com/marswxreport?lang=en'\r\n browser.visit(weather_url)\r\n with browser:\r\n html = browser.html\r\n soup = BeautifulSoup(html, 'html.parser')\r\n mars_weather = soup.find(text=re.compile('Sol'))\r\n return mars_weather\r\n\r\ndef getFacts():\r\n startBrowser()\r\n mars_facts_url = 'https://space-facts.com/mars/'\r\n results = pd.read_html(mars_facts_url, attrs={'id': 'tablepress-mars'}, flavor=['bs4'])\r\n keys=[]\r\n values =[]\r\n for key, value in results[0]._values:\r\n keys.append(key.strip(':'))\r\n values.append(value)\r\n mars_facts = dict(zip(keys, values))\r\n return mars_facts\r\n\r\ndef getHemisphereImages():\r\n startBrowser()\r\n img_hemis_url = 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars'\r\n img_hemis_url_base = img_hemis_url[:29]\r\n hemisphere_image_urls = []\r\n browser.visit(img_hemis_url)\r\n\r\n html = browser.html\r\n soup = BeautifulSoup(html, 'html.parser')\r\n\r\n links_found = soup.find_all(attrs={\"class\": 'description'})\r\n\r\n with browser:\r\n for link in links_found:\r\n try:\r\n title = link.a.text\r\n browser.click_link_by_partial_text(title)\r\n curr_html = browser.html\r\n soup = BeautifulSoup(curr_html, 'html.parser')\r\n result = [x['src'] for x in soup.findAll('img', attrs={\"class\": \"wide-image\"})]\r\n img_url = img_hemis_url_base + result[0]\r\n img_dict = {'title': title, 'img_url': img_url}\r\n hemisphere_image_urls.append(img_dict)\r\n except Exception as e:\r\n print(e)\r\n finally:\r\n browser.back()\r\n return hemisphere_image_urls\r\n\r\ndef startBrowser():\r\n global browser\r\n executable_path = {'executable_path': 'chromedriver.exe'}\r\n browser = Browser('chrome', **executable_path, headless=True)\r\n\r\ndef scrape():\r\n news = getNews()\r\n feature_image = getFeaturedImage()\r\n weather = getWeather()\r\n facts = getFacts()\r\n hemisphere_images = getHemisphereImages()\r\n\r\n scraped = {'news': news,\r\n 'feature_image': feature_image,\r\n 'weather': weather,\r\n 'facts': facts,\r\n 'hemisphere_images': hemisphere_images\r\n }\r\n return scraped\r\n\r\n\r\nif __name__ == '__main__':\r\n conn = 'mongodb://localhost:27017'\r\n client = pymongo.MongoClient(conn)\r\n\r\n # Define database and collection\r\n db = client.mars_mission_db\r\n collection = db.items\r\n\r\n scrape()\r\n\r\n","sub_path":"scrape_mars.py","file_name":"scrape_mars.py","file_ext":"py","file_size_in_byte":4263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"475377997","text":"import pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nclass Perceptron(object):\n \"\"\"パーセプトロンの分類器\n パラメーター\n ------------------\n eta : float\n 学習率(0.0より大きく1.0以下の値\n\n n_iter : int\n トレーニングデータのトレーニング回数\n\n random_state : int\n 重みを初期化するための乱数シード\n\n\n 属性\n -----------------\n w_ : 一次元配列\n 適合後の重み\n errors_ : リスト\n 各エポックでの誤分類(更新)の数\n\n \"\"\"\n\n def __init__(self , eta=0.01 , n_iter=50, random_state=1):\n self.eta = eta\n self.n_iter = n_iter\n self.random_state = random_state\n\n def fit(self, x ,y):\n \"\"\"\n\n\n :param x: 【配列のようなデータ構造】,shape = [n_samples,n_features]\n トレーニングデータ\n n_samplesはサンプルの個数、n_featuresは特徴量の個数\n :param y: 【配列のようなデータ構造】,shape = [n_samples]\n 目的変数\n\n\n :return:\n self : object\n\n \"\"\"\n rgen = np.random.RandomState(self.random_state)\n self.w_ = rgen.normal(loc=0.0 , scale=0.01 ,size=1 + x.shape[1])\n self.errors_ = []\n\n#コードが動かなかった。。���\n # for _ in range(self.n_iter):\n # error = 0\n # for xi, target in zip(x,y):\n # # 各サンプルの重みを更新\n # # 重み(w₁~wm)の更新\n # # ΔW=η(y(i))....\n # update = self.eta * (target - self.predict(xi))\n # self.w_[1:] += update * xi\n # # 重みの更新\n # self.w_[0] += update\n # errors += int(update != 0.0)\n # self.errors_.append(errors)\n # return self\n #\n # def net_imput(self, x):\n # #総入力を計算\n # return np.dot(x, self.w_[1:] + self.w_[0])\n #\n #\n # def predict(self, x):\n # # 1step後のクラスを返す\n # return np.where(self.net_input(x) >= 0.0, 1, -1)\n\n\n for _ in range(self.n_iter):\n errors = 0\n for xi, target in zip(x, y):\n update = self.eta * (target - self.predict(xi))\n self.w_[1:] += update * xi\n self.w_[0] += update\n errors += int(update != 0.0)\n self.errors_.append(errors)\n return self\n\n def net_input(self, x):\n \"\"\"Calculate net input\"\"\"\n return np.dot(x, self.w_[1:]) + self.w_[0]\n\n def predict(self, x):\n \"\"\"Return class label after unit step\"\"\"\n return np.where(self.net_input(t) >= 0.0, 1, -1)\n\n\n\n\n\n\n\n\n# データの取得\ndf = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data',header=None)\n\ndf.tail()\n\nprint(df.tail())\n\n# 1-100行目のデータを取得する\ny = df.iloc[0:100,4].values\n# Iris-setosaを-1、Iris-Virginicaを1に変換\ny = np.where(y =='Iris-setosa',-1,1)\n# 1-100行目の1,3列目の抽出\nx = df.iloc[0:100,[0,2]].values\n# 品種のプロット(赤丸)=setosa\nplt.scatter(x[:50,0],x[:50,1],color='red',marker='o',label='setosa')\n\nplt.scatter(x[50:100,0],x[50:100,1],color='blue',marker='x',label='versicolor')\n\n#軸のラベルの設定\nplt.xlabel('sepal lenght [cm]')\nplt.ylabel('petal lenght [cm]')\n\n# 凡例の設定(左上に配置)\nplt.legend(loc='upper left')\n\n#散布図が表示される\nplt.show()\n\n# パーセプトロンのオブジェクトの生成(インスタンス化)\nppn = Perceptron(eta=0.1,n_iter=10)\n#トレーニングデータへのモデルの適合\nppn.fit(x,y)\n# エポックと誤分類誤差の関係を折れ線グラフにプロット\nplt.plot(range(1,len(ppn.errors_) + 1),ppn.errors_,marker='o')\n\nplt.xlabel('Epochs')\nplt.ylabel('Number of update')\n\n#図の表示\nplt.show()","sub_path":"venv/Scripts/lris_test.py","file_name":"lris_test.py","file_ext":"py","file_size_in_byte":3890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"194603239","text":"import requests\nfrom openpyxl import Workbook\nfrom openpyxl.styles import Font, Alignment\nfrom openpyxl.styles import PatternFill, Color, Border, Side\nfrom datetime import datetime\nfrom dateutil.relativedelta import relativedelta\nfrom facebookads.adobjects.adaccount import AdAccount\nfrom facebookads.api import FacebookAdsApi\n\n\ndef get_values(value):\n my_app_id = '702139329921054'\n my_app_secret = 'b8259f6001e8199ef99fea11a4dc488c'\n FacebookAdsApi.init(my_app_id, my_app_secret,\n 'EAAJZBl41XwB4BAFDImFCmZBFpZCpTXDqHI8u9sSoQFICwzd8obpxtOfAiG3UK0bviE1rZBkuCxhnCwZAOGKbZBL7zzSj2U67oTFlAxthZCrxcU7369ZCVp6ddLXkEmOFZBW5RVShLCW7qeTAbXMibHyBHXWjdUntQ5xX6bFO5yHnrAVWAPp4KwnTZC')\n values_list = []\n\n _currency = \"\"\n try:\n yesterday_spend = \\\n AdAccount('act_' + str(value)).get_insights(params={'date_preset': 'yesterday'},\n fields=['spend','account_currency'])\n _yesterday_spend = yesterday_spend[0]['spend']\n _currency = yesterday_spend[0]['account_currency']\n if _currency == 'KRW':\n _currency = '원'\n elif _currency == 'USD':\n _currency = '$'\n else:\n _currency = yesterday_spend[0]['account_currency']\n except:\n _yesterday_spend = 0\n\n\n try:\n today_spend = AdAccount('act_' + str(value)).get_insights(params={'date_preset': 'today'},\n fields=['spend','account_currency'])\n _today_spend = today_spend[0]['spend']\n _currency = today_spend[0]['account_currency']\n\n if _currency == 'KRW':\n _currency = '원'\n elif _currency == 'USD':\n _currency = '$'\n else:\n _currency = today_spend[0]['account_currency']\n\n except:\n _today_spend = 0\n\n if _currency == '원':\n _yesterday_spend = format(int(_yesterday_spend), ',')\n _today_spend = format(int(_today_spend), ',')\n\n\n\n values_list.append(\n {'id': str(value), 'yesterday': str(_yesterday_spend)+_currency, 'today': str(_today_spend)+_currency})\n\n return values_list\n\n\ndef get_insights(act_id):\n my_app_id = '702139329921054'\n my_app_secret = 'b8259f6001e8199ef99fea11a4dc488c'\n FacebookAdsApi.init(my_app_id, my_app_secret,\n 'EAAJZBl41XwB4BAFDImFCmZBFpZCpTXDqHI8u9sSoQFICwzd8obpxtOfAiG3UK0bviE1rZBkuCxhnCwZAOGKbZBL7zzSj2U67oTFlAxthZCrxcU7369ZCVp6ddLXkEmOFZBW5RVShLCW7qeTAbXMibHyBHXWjdUntQ5xX6bFO5yHnrAVWAPp4KwnTZC')\n try:\n pixel_id = AdAccount(act_id).get_ads_pixels()[0]['id']\n except:\n pixel_id = None\n\n try:\n analytics_id = AdAccount(act_id).get_applications()[0]['id']\n except:\n analytics_id = None\n\n account_name = AdAccount(act_id).remote_read(fields=[AdAccount.Field.name])['name']\n\n data = {'pixel_id': pixel_id, 'analytics_id': analytics_id, 'account_name': account_name}\n\n return data\n\n\ndef create_adaccount(name, currency, timezone_id, invoice):\n url = 'https://graph.facebook.com/v3.0/447036208802117/adaccount'\n data = {'name': name, 'currency': currency, 'timezone_id': timezone_id, 'end_advertiser': 'NONE',\n 'media_agency': 'NONE', 'partner': '702139329921054', 'invoice': invoice,\n 'access_token': 'EAAJZBl41XwB4BAFDImFCmZBFpZCpTXDqHI8u9sSoQFICwzd8obpxtOfAiG3UK0bviE1rZBkuCxhnCwZAOGKbZBL7zzSj2U67oTFlAxthZCrxcU7369ZCVp6ddLXkEmOFZBW5RVShLCW7qeTAbXMibHyBHXWjdUntQ5xX6bFO5yHnrAVWAPp4KwnTZC'}\n res = requests.post(url, data=data)\n\n return res.text\n\n\ndef add_people(act_id):\n _list = [] # 오류난 아이디를 저장할 리스트\n manager_office = ['162452684601037', '733076517083567', '518987761831226', '161102081405527', '448699228877093',\n '171383986709089', '100319830616134']\n # david, jack, bart, kevin, drew, wivlab, wivapi\n advertiser_jeju = ['558907227843449', '749614468762514', '389228444913345', '222319385209786']\n # jj(장찬규,찬규장),shine, mc\n\n url = \"https://graph.facebook.com/v3.0/{}/assigned_users\".format(act_id)\n for i in manager_office:\n data = {'user': i, 'role': 'ADMIN',\n 'access_token': 'EAAJZBl41XwB4BAFDImFCmZBFpZCpTXDqHI8u9sSoQFICwzd8obpxtOfAiG3UK0bviE1rZBkuCxhnCwZAOGKbZBL7zzSj2U67oTFlAxthZCrxcU7369ZCVp6ddLXkEmOFZBW5RVShLCW7qeTAbXMibHyBHXWjdUntQ5xX6bFO5yHnrAVWAPp4KwnTZC'}\n res = requests.post(url, data=data)\n if res.status_code == 400:\n _list.append(i)\n\n for j in advertiser_jeju:\n data = {'user': j, 'role': 'GENERAL_USER',\n 'access_token': 'EAAJZBl41XwB4BAFDImFCmZBFpZCpTXDqHI8u9sSoQFICwzd8obpxtOfAiG3UK0bviE1rZBkuCxhnCwZAOGKbZBL7zzSj2U67oTFlAxthZCrxcU7369ZCVp6ddLXkEmOFZBW5RVShLCW7qeTAbXMibHyBHXWjdUntQ5xX6bFO5yHnrAVWAPp4KwnTZC'}\n res2 = requests.post(url, data=data)\n if res2.status_code == 400:\n _list.append(j)\n\n return _list\n\n\ndef create_excel(*args):\n wb = Workbook()\n ws = wb.active\n ws.title = 'Ad Account Creation'\n ws.row_dimensions[2].height = 40\n\n ws.merge_cells('A1:U1')\n ws['A1'] = '광고계정 생성 요청 양식 (Ad account creation request form)'\n ws1 = ws['A1']\n ws1.font = Font(name='Arial', size=10, bold=True)\n ws1.alignment = Alignment(horizontal='left')\n ws1.fill = PatternFill(patternType='solid', fgColor=Color('81BEF7'))\n\n ws['A2'] = 'Gaming Account/ Non-Gaming Account'\n ws['B2'] = 'Date'\n ws['C2'] = 'Your Contact Email'\n ws['D2'] = 'Applicant (agency)'\n ws['E2'] = 'Media Agency'\n ws['F2'] = 'Ad Account ID '\n ws['G2'] = \"Advertiser's Name - English\"\n ws['H2'] = \"Advertiser's Name -한글\"\n ws['I2'] = 'Parent Company Name 모기업명'\n ws['J2'] = \"Advertiser's External Website (e.g www.amazon.com)\"\n ws['K2'] = 'Advertiser’s FB Page URL'\n ws['L2'] = \"Advertiser's Address (English)\"\n ws['M2'] = 'Postal Code'\n ws['N2'] = \"End Advertiser's Vertical & Sub-Vertical\"\n ws['O2'] = 'Payment Method'\n ws['P2'] = 'Timezone '\n ws['Q2'] = 'Currency'\n ws['R2'] = 'Budget(Growth)'\n ws['S2'] = 'Budget(Net)'\n ws['T2'] = 'Start Date'\n ws['U2'] = 'End Date'\n\n box = Border(left=Side(border_style=\"thin\",\n color='FF000000'),\n right=Side(border_style=\"thin\",\n color='FF000000'),\n top=Side(border_style=\"thin\",\n color='FF000000'),\n bottom=Side(border_style=\"thin\",\n color='FF000000'),\n diagonal=Side(border_style=\"thin\",\n color='FF000000'),\n diagonal_direction=0,\n outline=Side(border_style=\"thin\",\n color='FF000000'),\n vertical=Side(border_style=\"thin\",\n color='FF000000'),\n horizontal=Side(border_style=\"thin\",\n color='FF000000')\n )\n\n columns = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U']\n for i in columns:\n column = i + '2'\n column = ws[column]\n column.border = box\n column.font = Font(name='Arial', size=10, bold=True)\n column.alignment = Alignment(horizontal='center', vertical='center', wrapText=True)\n column.fill = PatternFill(patternType='solid', fgColor=Color('CEE3F6'))\n ws.column_dimensions[i].width = 20\n\n _list = []\n for i in args:\n _list.append(i)\n\n if _list[0] == 'true':\n _list[0] = 'Gaming'\n else:\n _list[0] = 'Non-Gaming'\n\n date = (datetime.today().strftime(\"%m%d%Y\"))\n current = datetime.now()\n months = current + relativedelta(months=3)\n\n _list[11] = format(int(_list[11]), ',')\n\n ws['A3'] = _list[0]\n ws['B3'] = date[0:2] + '-' + date[2:4] + '-' + date[6:9] # 월 - 일 - 년\n ws['C3'] = _list[1]\n ws['D3'] = 'wivlabs'\n ws['E3'] = 'wivlabs'\n ws['F3'] = _list[2][4:]\n ws['G3'] = _list[3]\n ws['H3'] = _list[4]\n ws['I3'] = _list[5]\n ws['J3'] = _list[6]\n ws['K3'] = _list[7]\n ws['L3'] = _list[8]\n ws['M3'] = _list[9]\n ws['N3'] = _list[10]\n ws['O3'] = 'Invoicing'\n ws['P3'] = 'Asia/Seoul'\n ws['Q3'] = 'KRW'\n ws['R3'] = 'KRW' + ' ' + str(_list[11])\n ws['S3'] = 'KRW' + ' ' + str(_list[11])\n ws['T3'] = date[0:2] + '-' + date[2:4]\n ws['U3'] = str(months)[5:10]\n\n for i in columns:\n column = i + '3'\n column = ws[column]\n column.border = box\n column.font = Font(name='Arial', size=10)\n column.alignment = Alignment(horizontal='center', vertical='center', wrapText=True)\n column.fill = PatternFill(patternType='solid', fgColor=Color('FFFF00'))\n ws.column_dimensions[i].width = 20\n\n # excel = 'KR_AdAccountCreationExcel_{}.xlsx'.format(datetime.today().strftime('%m%d'))\n # wb.save(excel)\n\n return wb\n","sub_path":"create/request/to_facebook.py","file_name":"to_facebook.py","file_ext":"py","file_size_in_byte":9017,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"56611214","text":"from tkinter import *\nfrom tkinter import colorchooser\n\n\nroot=Tk()\nroot.title(\"Learn to code\")\nroot.iconbitmap('f:iron.ico')\nroot.geometry(\"400x400\")\n\ndef color():\n my_color=colorchooser.askcolor()[1]\n l=Label(root,text=my_color).pack(pady=10)\n l2=Label(root,text=\"you picked a color\",font=(\"Helvetica\",24),bg=my_color).pack()\nb=Button(root,text=\"pick a color\",command=color)\nb.pack(pady=10)\n\n\n\n\n\nroot.mainloop()","sub_path":"color.py","file_name":"color.py","file_ext":"py","file_size_in_byte":412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"31662875","text":"import time\nimport getTemp\nimport serial\nimport random\nimport math\nimport sys\nfrom config import *\n\nser = serial.Serial(port = \"/dev/ttyUSB0\", baudrate=57600)\nser.parity = serial.PARITY_NONE\nser.bytesize = serial.EIGHTBITS\nser.stopbits = serial.STOPBITS_ONE\nser.dtr = 0\n#ser.open()\n\nser.write((\"mac set devaddr \" + devaddr).encode())\nser.write((\"mac set deveui \" + deveui).encode())\nser.write((\"mac set appeui \" + appeui).encode())\nser.write((\"mac set appkey \" + appkey).encode())\nser.write((\"mac set nwkskey \" + nwkskey).encode())\n\nser.write(\"mac save\".encode())\nser.write(\"mac join abp\".encode())\n\ndef send(data):\n ser.write((\"mac tx cnf 220 \" + data).encode())\n\ndef main():\n period = 10\n while True:\n msg = str(getTemp.getTemp())\n print(ser.read_all().decode(\"utf-8\"))\n print(\"Sending message: \" + msg + \" to broker\")\n send(msg)\n print(\"Waiting for \" + str(period) + \"s before sending again\")\n time.sleep(period)\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"toGateway.py","file_name":"toGateway.py","file_ext":"py","file_size_in_byte":1006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"358735945","text":"import cv2 as cv\nimport numpy as np \nimport csv\nimport os\nimport tkinter as tk\n\npath = ''\n\nroot = tk.Tk()\n\ncanvas1 = tk.Canvas(root, width=400, height=300)\ncanvas1.pack()\n\nentry1 = tk.Entry(root)\ncanvas1.create_window(200, 140, window=entry1)\nlabel1 = tk.Label(root, text='please enter the path to the folder \"Answers\"')\ncanvas1.create_window(200, 230, window=label1)\n\n\ndef getpath():\n global path\n path = entry1.get()\n\n\nbutton1 = tk.Button(text='Enter Path', command=getpath)\ncanvas1.create_window(200, 180, window=button1)\n\nroot.mainloop()\n\nvar1 = os.listdir(path)\nnames = []\nsheets = []\nresults = []\nrows = []\nfor entry in var1:\n if ('.jpg' or '.png' or '.jpeg') in entry:\n epath = path + '\\\\' + entry\n tmp_im = cv.imread(epath, 0)\n sheets.append(tmp_im)\n names.append(entry[0:-4])\n\nmask = cv.imread('mask.jpg', 0) # load the mask of model answer generated by draw.py\norg = cv.imread('Test.jpg', 0) # load the test without answers\norg = cv.resize(org, (460, 654))\n\nfor sheet in sheets:\n\n image = cv.resize(sheet, (460, 654))\n\n answers = cv.bitwise_xor(org, image)\n\n ans_bw = cv.inRange(answers, 90, 255)\n ans_bw_cp = ans_bw.copy()\n msk = cv.resize(ans_bw, (462, 656))\n cv.floodFill(ans_bw, msk, (0, 0), 255)\n\n ans_inv = cv.bitwise_not(ans_bw)\n # ans_img = cv.bitwise_xor(ans_inv, ans_bw_cp)\n ans_img_not_filtered = cv.bitwise_xor(ans_inv, ans_bw_cp)\n\n # ans_img_8bit=image.astype(np.uint8)#8bit convertion (uint8,uint16)\n\n # ans_img =small_reg(ans_img_8bit,50)\n ans_img = cv.medianBlur(ans_img_not_filtered, 9)\n\n cv.imwrite('ans_mask.jpg', ans_img)\n\n _, threshold = cv.threshold(mask, 110, 255, cv.THRESH_BINARY)\n\n # Detecting contours in image.\n contours, _ = cv.findContours(threshold, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE)\n\n # generating photo for every question\n center_contour_list = []\n for c in contours:\n # compute the center of the contour\n M = cv.moments(c)\n x = int(M[\"m10\"] / M[\"m00\"])\n y = int(M[\"m01\"] / M[\"m00\"])\n center_contour_point = (x, y)\n center_contour_list.append(center_contour_point) # save center of contours\n\n # for loop to save the cropped photos in list\n num = 0\n height = mask.shape[0]\n width = mask.shape[1]\n\n croped_list = []\n for num in range(len(contours)):\n\n if num == 0:\n saved_croped = mask[int(((center_contour_list[num][1])+(center_contour_list[num+1][1]))/2): height, 0:width]\n elif num == (len(contours)-1):\n saved_croped = mask[0: int(((center_contour_list[num-1][1])+(center_contour_list[num][1]))/2), 0:width]\n else:\n saved_croped = mask[int(((center_contour_list[num][1])+(center_contour_list[num+1][1]))/2):int(((center_contour_list[num-1][1])+(center_contour_list[num][1]))/2) , 0:width]\n\n croped_list.append(saved_croped) # save the cropped photos in list\n\n # generating photo for every answer\n croped_list_ans = []\n for num in range(len(contours)):\n\n if num == 0:\n saved_croped_ans = ans_img[int(((center_contour_list[num][1])+(center_contour_list[num+1][1]))/2): height, 0:width]\n elif num == (len(contours)-1):\n saved_croped_ans = ans_img[0: int(((center_contour_list[num-1][1])+(center_contour_list[num][1]))/2), 0:width]\n else:\n saved_croped_ans = ans_img[int(((center_contour_list[num][1])+(center_contour_list[num+1][1]))/2):int(((center_contour_list[num-1][1])+(center_contour_list[num][1]))/2), 0:width]\n\n croped_list_ans.append(saved_croped_ans)\n # save the cropped photos in list\n\n # generating xor photo for every question\n xored_croped_list = []\n for num in range(len(contours)):\n\n xored_img = cv.bitwise_xor(croped_list_ans[num], croped_list[num], mask=None)\n xored_croped_list.append(xored_img)\n\n correction_list = []\n for num in range(len(contours)):\n contoures_cropped_ans, _ = cv.findContours(croped_list_ans[num], cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE)\n\n pixel_num_before = cv.countNonZero(croped_list[num])\n pixel_num_after = cv.countNonZero(xored_croped_list[num])\n if len(contoures_cropped_ans) > 1:\n correction_list.append(0)\n else:\n if pixel_num_before > pixel_num_after:\n correction_list.append(1)\n elif pixel_num_before <= pixel_num_after:\n correction_list.append(0)\n\n results.append(correction_list.count(1).__str__()+'\\\\'+len(correction_list).__str__())\n\n\nfor name in names:\n tmp_lst = []\n tmp_lst.append(name)\n tmp_lst.append(results[names.index(name)])\n rows.append(tmp_lst)\n\nf = open('Results.csv', 'w')\n\nwith f:\n writer = csv.writer(f)\n\n for row in rows:\n writer.writerow(row)\n","sub_path":".idea/dictionaries/answer_and_crop_atef.py","file_name":"answer_and_crop_atef.py","file_ext":"py","file_size_in_byte":4811,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"204447140","text":"from mapreduce import base_handler\nfrom oauth2client.appengine import AppAssertionCredentials\nimport mapreduce.third_party.pipeline as pipeline\nimport mapreduce.third_party.pipeline.common as pipeline_common\nimport logging\n\n# define some common query\n\nlogger = logging.getLogger('pipeline')\n\ncredentials = AppAssertionCredentials(\n scope='https://www.googleapis.com/auth/bigquery'\n)\n\nhttp = credentials.authorize(httplib2.Http(memcache))\nservice = build('bigquery', 'v2', http=http)\n\n# https://developers.google.com/bigquery/docs/reference/v2/jobs/insert\n# can only have one child\n\ndef load_module(cls_path):\n module_path, class_name = \".\".join(cls_path.split('.')[:-1]), cls_path.split('.')[-1]\n mod = __import__(module_path, fromlist=[class_name])\n return getattr(mod, class_name)\n\n\nclass Check(base_handler.PipelineBase):\n def run(self, projectId, jobId, delays=10):\n jobs = service.jobs()\n status = jobs.get(\n projectId=projectId,\n jobId=jobId\n ).execute()\n\n if status['status']['state'] == 'PENDING' or status['status']['state'] == 'RUNNING':\n delay = yield pipeline_common.Delay(seconds=delays)\n with pipeline.After(delay):\n yield Check(projectId, jobId, delays)\n else:\n if status['status']['state'] == \"DONE\":\n if 'errorResult' in status['status']:\n logger.error(\"bq failed %s \" % status)\n else:\n logger.info(\"bq success %s\" % status)\n\n return status\n\n\nclass Api(base_handler.PipelineBase):\n def run(self, projectId, resourceType, method, body, *args, **kwargs):\n resource = getattr(service, resourceType)()\n method = getattr(resource, method)\n\n kwargs['projectId'] = projectId\n kwargs['body'] = body\n\n result = method(\n *args,\n **kwargs\n ).execute()\n\n return result\n\n\nclass JobSync(base_handler.PipelineBase):\n def run(self, projectId, method, body, *args, **kwargs):\n jobs = service.jobs()\n method = getattr(resource, method)\n\n kwargs['projectId'] = projectId\n kwargs['body'] = body\n\n result = method(\n *args,\n **kwargs\n ).execute()\n\n checked = yield Check(projectId, result['jobReference']['jobId'])\n with pipeline.After(checked):\n return result['jobReference']['jobId']\n\n\nclass Query(base_handler.PipelineBase):\n def run(self, projectId, query):\n jobId = yield JobSync(projectId, \"query\", {\n \"query\": query,\n })\n return QueryResults(projectId, jobId)\n\n\nclass QueryResults(base_handler.PipelineBase):\n def run(self, projectId, jobId, timeoutMs=0):\n jobs = service.jobs()\n queryReply = jobs.getQueryResults(\n projectId=projectId,\n jobId=jobId,\n timeoutMs=0\n ).execute()\n\n rows = []\n if('rows' in queryReply):\n currentRow = len(queryReply['rows'])\n\n while('rows' in queryReply and currentRow < queryReply['totalRows']):\n queryReply = jobs.getQueryResults(\n projectId=projectId,\n jobId=jobId,\n startIndex=currentRow,\n timeoutMs=timeoutMs\n ).execute()\n\n if('rows' in queryReply):\n currentRow += len(queryReply['rows'])\n rows.extend(queryReply['rows'])\n\n return rows\n\n\nclass Query2Func(base_handler.PipelineBase):\n def run(self, projectId, query, funcPath, funcParams, timeoutMs=0):\n rows = yield Query(projectId, query, timeoutMs)\n yield Results2Func(projectId, rows, funcPath, funcParams)\n\n\nclass Results2Func(base_handler.PipelineBase):\n def run(self, projectId, rows, funcPath, funcParams):\n rows = query_results(projectId, jobId)\n\n func = load_module(funcPath)\n args = funcParams.get('args', [])\n kwargs = funcParams.get('kwargs', {})\n r = func(rows, *args, **kwargs)\n\n return r\n\n\nclass Load(base_handler.PipelineBase):\n def run(self, projectId, datasetId, tableId, sourceUris, fields, sourceFormat=\"CSV\", mode=\"w+\", skipLeadingRows=0, **params):\n createDisposition = \"CREATE_IF_NEEDED\" if '+' in mode else \"CREATE_NEVER\"\n\n if 'w' in mode:\n writeDisposition = 'WRITE_TRUNCATE'\n elif 'a' in mode:\n writeDisposition = 'WRITE_APPEND'\n else:\n writeDisposition = \"WRITE_EMPTY\"\n\n config = {\n \"sourceUris\": sourceUris,\n \"schema\": {\n \"fields\": fields\n },\n \"destinationTable\": {\n \"projectId\": projectId,\n \"datasetId\": datasetId,\n \"tableId\": tableId\n },\n 'createDisposition': createDisposition,\n 'writeDisposition': writeDisposition,\n 'sourceFormat': sourceFormat,\n \"skipLeadingRows\": skipLeadingRows\n }\n\n config.update(params)\n\n return JobSync(\n projectId,\n method=\"insert\",\n body={\n \"projectId\": projectId,\n \"configuration\": {\n \"load\": config\n }\n })\n\n\nclass Extract(base_handler.PipelineBase):\n def run(self, projectId, datasetId, tableId, destinationUris, destinationFormat=\"CSV\", printHeader=True, **params):\n config = {\n \"sourceTable\": {\n \"projectId\": projectId,\n \"datasetId\": datasetId,\n \"tableId\": tableId\n },\n \"destinationUris\": destinationUris,\n \"destinationFormat\": destinationFormat,\n \"printHeader\": printHeader\n }\n\n config.update(params)\n\n return JobSync(\n projectId,\n method=\"extract\",\n body={\n \"projectId\": projectId,\n \"configuration\": {\n \"extract\": config\n }\n })\n","sub_path":"TaskWorker/Common/BigQuery.py","file_name":"BigQuery.py","file_ext":"py","file_size_in_byte":6101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"650466902","text":"import os\nfrom enum import EnumMeta\nfrom itertools import zip_longest\n\nfrom colored import attr\n\nfrom ...util import highlight_module, highlight_val, highlight_name, highlight_val_overwrite\nfrom ...state import like\n\n\nhtml_module = lambda x: x # noqa: E731 no-lambda\nhtml_name = lambda x: x # noqa: E731 no-lambda\nhtml_val = lambda x: x # noqa: E731 no-lambda\nhtml_val_overwrite = lambda x: x # noqa: E731 no-lambda\n\n\ndef listsettings(state, asciicodes=True):\n\n # colors\n color_module = highlight_module if asciicodes else html_module\n color_name = highlight_name if asciicodes else html_name\n color_val = highlight_val if asciicodes else html_val\n color_val_overwrite = highlight_val_overwrite if asciicodes else html_val_overwrite\n linesep = os.linesep if asciicodes else \"\\n\"\n attr_fn = (lambda x: '') if not asciicodes else attr\n\n last_k = []\n if len(state.all) == 0:\n return \"No Settings available.\"\n maxklen = max(len(k) for k in state.all.keys())\n text = \"Folder│\" + color_name(\"module\") + \"│\" + color_module(\"variable\") + (\" \" * (maxklen - 22)) + \" = \" + color_val(\"value\") + linesep\n text += \"—\" * (maxklen + 8) + linesep if asciicodes else ''\n for k, v in sorted(state.all.items()):\n\n # ignore state variables that are not registered for argument parsing\n if k not in state.default:\n continue\n\n klen = len(k)\n korig = k\n overwritten = v != (state.default[k].default if hasattr(state.default[k], 'default') else state.default[k])\n k = k.split(\".\")\n if len(k) > 1:\n k_hidden = [\" \" * len(ki) if ki == ki2 and asciicodes else ki for ki, ki2 in zip_longest(k, last_k) if ki is not None]\n last_k = k\n k_hidden[-2] = color_name(k_hidden[-2])\n k_hidden[-1] = color_module(k_hidden[-1])\n else:\n k_hidden = k\n k_hidden[-1] = color_module(k_hidden[-1])\n\n is_lambda = callable(state.default[korig]) and not isinstance(state.default[korig], type) and not isinstance(state.default[korig], EnumMeta) and not isinstance(state.default[korig], like)\n value_str = attr_fn('dim') + \"λ ⟶ \" + attr_fn('reset') + str(state.default[korig].default) if is_lambda else state.default[korig].str(asciicodes=False) if hasattr(state.default[korig], 'str') else str(state.default[korig])\n append = \"\" if not overwritten else \" ⟶ \" + color_val_overwrite(str(v))\n text += \"│\".join(k_hidden) + (\" \" * (maxklen - klen)) + \" = \" + color_val(value_str) + append + linesep\n\n return text\n\n\ndef init(state):\n print(listsettings(state))\n\n\ndef settings_html(state):\n html = listsettings(state, asciicodes=False)\n html = html.split(\"\\n\")\n html = \"\\n\".join([h.replace('=', '=', 1) for h in html])\n html = html.replace('\\n', '\\n')\n html = html[:-8]\n html = \"
\" + html + \"
\"\n return html\n\n\ndef register(mf):\n mf.register_event(\"init\", init)\n mf.register_event(\"settings_html\", settings_html)\n","sub_path":"util/miniflask/src/miniflask/modules/settings/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3201,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"573234054","text":"import copy\nfrom typing import Optional, List\n\nimport torch\nimport torch.nn.functional as F\nfrom torch import nn, Tensor\nimport argparse\nimport numpy as np\nfrom torchsummary import summary\n\n\nclass Transformer(nn.Module):\n\n def __init__(self, d_model=512, nhead=8, num_encoder_layers=6,\n num_decoder_layers=6, dim_feedforward=2048, dropout=0.1,\n activation=\"relu\", normalize_before=False,\n return_intermediate_dec=False, input_channels=3):\n super().__init__()\n #self.pos_encoder = PositionEmbeddingSine()# TODO: positional encoding, what is hidden dim?\n\n # TODO: also seems that encoder/decoder take sequential info, can you transform this into fmap input?\n self.embedding_conv = nn.Conv2d(1, d_model, kernel_size=(1, input_channels), stride=(1, input_channels))# TODO: add some sort of image embedding\n encoder_layer = TransformerEncoderLayer(d_model, nhead, dim_feedforward,\n dropout, activation, normalize_before)\n encoder_norm = nn.LayerNorm(d_model) if normalize_before else None\n self.encoder = TransformerEncoder(encoder_layer, num_encoder_layers, encoder_norm)\n\n decoder_layer = TransformerDecoderLayer(d_model, nhead, dim_feedforward,\n dropout, activation, normalize_before)\n decoder_norm = nn.LayerNorm(d_model)\n self.decoder = TransformerDecoder(decoder_layer, num_decoder_layers, decoder_norm)\n\n self._reset_parameters()\n\n self.d_model = d_model\n self.nhead = nhead\n self.activation = _get_activation_fn(activation)\n\n def _reset_parameters(self):\n for p in self.parameters():\n if p.dim() > 1:\n nn.init.xavier_uniform_(p)\n\n\n\n def forward(self, src):\n # reshape inputs\n\n # src = X\n # say input is X\n # dim(X) = [batch, channels, width, height]\n\n bs, c, h, w = src.shape\n src = src.permute(0, 2, 3, 1).contiguous()\n # dim(x) = [batch, height, width, channels]\n src = src.view(bs, h, w * c)\n # dim(x) = [batch, height, width * channels]\n\n # what is query_embed?\n\n print(\"transformer:\", src.size())\n # inputs --> embeddings\n src = src.unsqueeze(1)\n print(\"transformer:\", src.size())\n src = self.activation(self.embedding_conv(src))\n print(\"transformer:\", src.size())\n src = src.permute([0, 2, 3, 1]) # move channels to the end\n print(\"transformer:\", src.size())\n #src = src.view(src.shape[0], -1, src.shape[1])\n #print(\"transformer:\", src.size())\n # TODO: figure what the fuck is going on here\n # positional encoding\n # src = self.add_timing_signal(src)\n\n\n tgt = torch.zeros(100, self.d_model)\n # call encoder\n memory = self.encoder(src)\n # call decoder\n hs = self.decoder(tgt, memory)\n\n # flatten NxCxHxW to HWxNxC\n\n #src = src.flatten(2).permute(2, 0, 1)\n #pos_embed = pos_embed.flatten(2).permute(2, 0, 1)\n #query_embed = query_embed.unsqueeze(1).repeat(1, bs, 1)\n #mask = mask.flatten(1)\n\n #tgt = torch.zeros_like(query_embed)\n #memory = self.encoder(src, src_key_padding_mask=mask, pos=pos_embed)\n #hs = self.decoder(tgt, memory, memory_key_padding_mask=mask,\n # pos=pos_embed, query_pos=query_embed)\n return hs.transpose(1, 2), memory.permute(1, 2, 0).view(bs, c, h, w)\n\n\nclass TransformerEncoder(nn.Module):\n\n def __init__(self, encoder_layer, num_layers, norm=None):\n super().__init__()\n self.layers = _get_clones(encoder_layer, num_layers)\n self.num_layers = num_layers\n self.norm = norm\n\n def forward(self, src):\n output = src\n\n for layer in self.layers:\n output = layer(output)\n\n if self.norm is not None:\n output = self.norm(output)\n\n return output\n\n\nclass TransformerDecoder(nn.Module):\n\n def __init__(self, decoder_layer, num_layers, norm=None):\n super().__init__()\n self.layers = _get_clones(decoder_layer, num_layers)\n self.num_layers = num_layers\n self.norm = norm\n\n def forward(self, tgt, memory):\n output = tgt\n\n for layer in self.layers:\n output = layer(output, memory)\n\n if self.norm is not None:\n output = self.norm(output)\n\n return output\n\n\nclass TransformerEncoderLayer(nn.Module):\n\n def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1,\n ffn_layer=\"conv\", norm=\"spectral\", activation=\"relu\"):\n super().__init__()\n\n self.self_attn = MultiHeadAttention(d_model, nhead, dropout=dropout) # TODO: how to edit MultiHeadAttention?\n # Implementation of Feedforward model\n self.ffn1 = nn.Conv2d(d_model, dim_feedforward) if ffn_layer==\"conv\" else nn.Linear(d_model, dim_feedforward)\n self.dropout = nn.Dropout(dropout)\n self.ffn2 = nn.Conv2d(d_model, dim_feedforward) if ffn_layer==\"conv\" else nn.Linear(dim_feedforward, d_model)\n\n self.norm1 = nn.utils.spectral_norm if norm==\"spectral\" else nn.LayerNorm(d_model) # TODO: what does spectral normalization do? nn.SpectralNorm()\n self.norm2 = nn.utils.spectral_norm if norm==\"spectral\" else nn.LayerNorm(d_model)\n self.dropout1 = nn.Dropout(dropout)\n self.dropout2 = nn.Dropout(dropout)\n\n self.activation = _get_activation_fn(activation)\n self.d_model = d_model\n\n # positional encoding\n def add_timing_signal(self, X, min_timescale=1.0, max_timescale=1.0e4):\n num_dims = len(X.shape) - 2 # 2 corresponds to batch and hidden_size dimensions\n num_timescales = self.d_model // (num_dims * 2)\n log_timescale_increment = np.log(max_timescale / min_timescale) / (num_timescales - 1)\n inv_timescales = min_timescale * torch.exp(\n (torch.arange(num_timescales).float() * -log_timescale_increment))\n inv_timescales = inv_timescales.to(X.device)\n total_signal = torch.zeros_like(X) # Only for debugging purposes\n for dim in range(num_dims):\n length = X.shape[dim + 1] # add 1 to exclude batch dim\n position = torch.arange(length).float().to(X.device)\n scaled_time = position.view(-1, 1) * inv_timescales.view(1, -1)\n signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 1)\n prepad = dim * 2 * num_timescales\n postpad = self.d_model - (dim + 1) * 2 * num_timescales\n signal = F.pad(signal, (prepad, postpad))\n for _ in range(1 + dim):\n signal = signal.unsqueeze(0)\n for _ in range(num_dims - 1 - dim):\n signal = signal.unsqueeze(-2)\n print(\"X\", X.size())\n print(\"signal\", signal.size())\n X += signal\n total_signal += signal\n return X\n\n def forward(self, src):\n q = k = self.add_timing_signal(src).view(src.shape[0], -1, src.shape[3])\n v = src.view(src.shape[0], -1, src.shape[3])\n src2 = self.self_attn(q, k, v)[0]\n src = src + self.dropout1(src2)\n src = self.norm1(src)\n src2 = self.ffn2(self.dropout(self.activation(self.ffn1(src))))\n src = src + self.dropout2(src2)\n src = self.norm2(src)\n return src\n\n\n\n\nclass TransformerDecoderLayer(nn.Module):\n\n def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1,\n ffn_layer=None, norm=None, activation=\"relu\"):\n super().__init__()\n self.self_attn = MultiHeadAttention(d_model, nhead, dropout=dropout)\n self.multihead_attn = MultiHeadAttention(d_model, nhead, dropout=dropout)\n # Implementation of Feedforward model\n self.ffn1 = nn.Conv2d(d_model, dim_feedforward, 1) if ffn_layer==\"conv\" else nn.Linear(d_model, dim_feedforward)\n self.dropout = nn.Dropout(dropout)\n self.ffn2 = nn.Conv2d(d_model, dim_feedforward, 1) if ffn_layer==\"conv\" else nn.Linear(dim_feedforward, d_model)\n\n self.norm1 = nn.utils.spectral_norm if norm==\"spectral\" else nn.LayerNorm(d_model) # TODO: what does spectral normalization do? nn.SpectralNorm()\n self.norm2 = nn.utils.spectral_norm if norm==\"spectral\" else nn.LayerNorm(d_model)\n self.norm3 = nn.utils.spectral_norm if norm==\"spectral\" else nn.LayerNorm(d_model)\n self.dropout1 = nn.Dropout(dropout)\n self.dropout2 = nn.Dropout(dropout)\n self.dropout3 = nn.Dropout(dropout)\n\n self.activation = _get_activation_fn(activation)\n self.d_model = d_model\n\n # positional encoding\n def add_timing_signal(self, X, min_timescale=1.0, max_timescale=1.0e4):\n num_dims = len(X.shape) - 2 # 2 corresponds to batch and hidden_size dimensions\n num_timescales = self.d_model // (num_dims * 2)\n log_timescale_increment = np.log(max_timescale / min_timescale) / (num_timescales - 1)\n inv_timescales = min_timescale * torch.exp(\n (torch.arange(num_timescales).float() * -log_timescale_increment))\n inv_timescales = inv_timescales.to(X.device)\n total_signal = torch.zeros_like(X) # Only for debugging purposes\n for dim in range(num_dims):\n length = X.shape[dim + 1] # add 1 to exclude batch dim\n position = torch.arange(length).float().to(X.device)\n scaled_time = position.view(-1, 1) * inv_timescales.view(1, -1)\n signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 1)\n prepad = dim * 2 * num_timescales\n postpad = self.d_model - (dim + 1) * 2 * num_timescales\n signal = F.pad(signal, (prepad, postpad))\n for _ in range(1 + dim):\n signal = signal.unsqueeze(0)\n for _ in range(num_dims - 1 - dim):\n signal = signal.unsqueeze(-2)\n X += signal\n total_signal += signal\n return X\n\n def forward(self, tgt, memory):\n q = k = self.add_timing_signal(tgt)\n tgt2 = self.self_attn(q, k, tgt)[0]\n tgt = tgt + self.dropout1(tgt2)\n tgt = self.norm1(tgt)\n tgt2 = self.multihead_attn(self.add_timing_signal(tgt),\n self.add_timing_signal(memory),\n memory)[0]\n tgt = tgt + self.dropout2(tgt2)\n tgt = self.norm2(tgt)\n tgt2 = self.ffn2(self.dropout(self.activation(self.ffn1(tgt))))\n tgt = tgt + self.dropout3(tgt2)\n tgt = self.norm3(tgt)\n return tgt\n\n\n\nclass MultiHeadAttention(nn.Module):\n ''' Multi-Head Attention module '''\n\n def __init__(self, d_model, n_head, d_k=None, d_v=None, weight_layer=None, norm=\"spectral\", dropout=0.1):\n super().__init__()\n\n self.n_head = n_head\n d_k = d_k if d_k else d_model\n d_v = d_v if d_v else d_model\n self.d_k = d_k\n self.d_v = d_v\n\n self.w_qs = nn.Conv2d(d_model, n_head * self.d_k, 1) if weight_layer == \"conv\" else nn.Linear(d_model, n_head * d_k, bias=False)\n self.w_ks = nn.Conv2d(d_model, n_head * d_k, 1) if weight_layer == \"conv\" else nn.Linear(d_model, n_head * d_k, bias=False)\n self.w_vs = nn.Conv2d(d_model, n_head * d_v, 1) if weight_layer == \"conv\" else nn.Linear(d_model, n_head * d_v, bias=False)\n self.fc = nn.Conv2d(n_head * d_v, d_model, 1) if weight_layer == \"conv\" else nn.Linear(n_head * d_v, d_model, bias=False)\n\n self.attention = ScaledDotProductAttention(temperature=d_k ** 0.5)\n\n self.dropout = nn.Dropout(dropout)\n self.layer_norm = nn.utils.spectral_norm if norm==\"spectral\" else nn.LayerNorm(d_model, eps=1e-6)\n\n\n def forward(self, q, k, v, mask=None):\n\n d_k, d_v, n_head = self.d_k, self.d_v, self.n_head\n sz_b, len_q, len_k, len_v = q.size(0), q.size(1), k.size(1), v.size(1)\n\n print(\"len1:\", d_k, d_v, n_head)\n print(\"len2:\", sz_b, len_q, len_k, len_v)\n print(\"len3\", q.size())\n residual = q\n\n # Pass through the pre-attention projection: b x lq x (n*dv)\n # Separate different heads: b x lq x n x dv\n print(q.size())\n q = self.w_qs(q).view(sz_b, len_q, n_head, d_k)\n print(q.size())\n k = self.w_ks(k).view(sz_b, len_k, n_head, d_k)\n v = self.w_vs(v).view(sz_b, len_v, n_head, d_v)\n\n # Transpose for attention dot product: b x n x lq x dv\n q, k, v = q.transpose(1, 2), k.transpose(1, 2), v.transpose(1, 2)\n\n if mask is not None:\n mask = mask.unsqueeze(1) # For head axis broadcasting.\n\n q, attn = self.attention(q, k, v, mask=mask)\n\n # Transpose to move the head dimension back: b x lq x n x dv\n # Combine the last two dimensions to concatenate all the heads together: b x lq x (n*dv)\n q = q.transpose(1, 2).contiguous().view(sz_b, len_q, -1)\n q = self.dropout(self.fc(q))\n q += residual\n\n q = self.layer_norm(q)\n\n return q, attn\n\nclass ScaledDotProductAttention(nn.Module):\n ''' Scaled Dot-Product Attention '''\n\n def __init__(self, temperature, attn_dropout=0.1):\n super().__init__()\n self.temperature = temperature\n self.dropout = nn.Dropout(attn_dropout)\n\n def forward(self, q, k, v, mask=None):\n\n attn = torch.matmul(q / self.temperature, k.transpose(2, 3))\n\n if mask is not None:\n attn = attn.masked_fill(mask == 0, -1e9)\n\n attn = self.dropout(F.softmax(attn, dim=-1))\n output = torch.matmul(attn, v)\n\n return output, attn\n\ndef _get_clones(module, N):\n return nn.ModuleList([copy.deepcopy(module) for i in range(N)])\n\n\ndef build_transformer():\n parser = get_args_parser()\n args = parser.parse_args()\n return Transformer(\n d_model=args.hidden_dim,\n dropout=args.dropout,\n nhead=args.nheads,\n dim_feedforward=args.dim_feedforward,\n num_encoder_layers=args.enc_layers,\n num_decoder_layers=args.dec_layers,\n normalize_before=args.pre_norm,\n return_intermediate_dec=True,\n )\n\n\n\ndef get_args_parser():\n parser = argparse.ArgumentParser('Set transformer detector', add_help=False)\n \"\"\"parser.add_argument('--lr', default=1e-4, type=float)\n parser.add_argument('--lr_backbone', default=1e-5, type=float)\n parser.add_argument('--batch_size', default=2, type=int)\n parser.add_argument('--weight_decay', default=1e-4, type=float)\n parser.add_argument('--epochs', default=300, type=int)\n parser.add_argument('--lr_drop', default=200, type=int)\n parser.add_argument('--clip_max_norm', default=0.1, type=float,\n help='gradient clipping max norm')\n\n # Model parameters\n parser.add_argument('--frozen_weights', type=str, default=None,\n help=\"Path to the pretrained model. If set, only the mask head will be trained\")\"\"\"\n # * Backbone\n \"\"\"parser.add_argument('--backbone', default='resnet50', type=str,\n help=\"Name of the convolutional backbone to use\")\n parser.add_argument('--dilation', action='store_true',\n help=\"If true, we replace stride with dilation in the last convolutional block (DC5)\")\"\"\"\n # TODO: do i need this\n parser.add_argument('--position_embedding', default='sine', type=str, choices=('sine', 'learned'),\n help=\"Type of positional embedding to use on top of the image features\")\n\n # * Transformer\n parser.add_argument('--enc_layers', default=6, type=int,\n help=\"Number of encoding layers in the transformer\")\n parser.add_argument('--dec_layers', default=6, type=int,\n help=\"Number of decoding layers in the transformer\")\n parser.add_argument('--dim_feedforward', default=2048, type=int,\n help=\"Intermediate size of the feedforward layers in the transformer blocks\")\n parser.add_argument('--hidden_dim', default=256, type=int,\n help=\"Size of the embeddings (dimension of the transformer)\")\n parser.add_argument('--dropout', default=0.1, type=float,\n help=\"Dropout applied in the transformer\")\n parser.add_argument('--nheads', default=8, type=int,\n help=\"Number of attention heads inside the transformer's attentions\")\n parser.add_argument('--num_queries', default=100, type=int,\n help=\"Number of query slots\") # TODO: what is the default value?\n parser.add_argument('--pre_norm', action='store_true')\n parser.add_argument(\"--ffn_layer\", default=\"conv\", type=str, choices=(\"conv\", \"linear\"),\n help=\"LaTransformeryers used in Feed Forward Network\")\n\n\n \"\"\"# * Segmentation\n parser.add_argument('--masks', action='store_true',\n help=\"Train segmentation head if the flag is provided\")\n\n # Loss\n parser.add_argument('--no_aux_loss', dest='aux_loss', action='store_false',\n help=\"Disables auxiliary decoding losses (loss at each layer)\")\n # * Matcher\n parser.add_argument('--set_cost_class', default=1, type=float,\n help=\"Class coefficient in the matching cost\")\n parser.add_argument('--set_cost_bbox', default=5, type=float,\n help=\"L1 box coefficient in the matching cost\")\n parser.add_argument('--set_cost_giou', default=2, type=float,\n help=\"giou box coefficient in the matching cost\")\n # * Loss coefficients\n parser.add_argument('--mask_loss_coef', default=1, type=float)\n parser.add_argument('--dice_loss_coef', default=1, type=float)\n parser.add_argument('--bbox_loss_coef', default=5, type=float)\n parser.add_argument('--giou_loss_coef', default=2, type=float)\n parser.add_argument('--eos_coef', default=0.1, type=float,\n help=\"Relative classification weight of the no-object class\")\n\n # dataset parameters\n parser.add_argument('--dataset_file', default='coco')\n parser.add_argument('--coco_path', type=str)\n parser.add_argument('--coco_panoptic_path', type=str)\n parser.add_argument('--remove_difficult', action='store_true')\n\n parser.add_argument('--output_dir', default='',\n help='path where to save, empty for no saving')\n parser.add_argument('--device', default='cuda',\n help='device to use for training / testing')\n parser.add_argument('--seed', default=42, type=int)\n parser.add_argument('--resume', default='', help='resume from checkpoint')\n parser.add_argument('--start_epoch', default=0, type=int, metavar='N',\n help='start epoch')\n parser.add_argument('--eval', action='store_true')\n parser.add_argument('--num_workers', default=2, type=int)\n\n # distributed training parameters\n parser.add_argument('--world_size', default=1, type=int,\n help='number of distributed processes')\n parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training')\"\"\"\n return parser\n\ndef _get_activation_fn(activation):\n \"\"\"Return an activation function given a string\"\"\"\n #TODO: maybe test with other activation functions?\n if activation == \"relu\":\n return F.relu\n if activation == \"gelu\":\n return F.gelu\n if activation == \"glu\":\n return F.glu\n if activation == \"elu\":\n return F.elu\n raise RuntimeError(F\"activation should be relu/gelu, not {activation}.\")\n\n\ndef main():\n device = torch.device('cpu')\n t = Transformer().to(device)\n \n print(\"Transformer\")\n #print(t)\n summary(t, (3, 256, 256))\n \nmain()","sub_path":"models/transformer.py","file_name":"transformer.py","file_ext":"py","file_size_in_byte":19810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"131315893","text":"# flag whether files should be saved to disk in order to avoid making requests every time, you probably won't need this unless you intend to change the code\nSAVE_FILES = False\n# insert the name of your kicktipp lobby here\nLOBBY_NAME = \"asbuli2020\"\n\nLOCAL_URL = \"files/game_day\"\nBASE_URL = \"https://www.kicktipp.de/\" + LOBBY_NAME + \"/tippuebersicht?&spieltagIndex=\"\nURL = \"\"\n\nif SAVE_FILES:\n URL = LOCAL_URL\nelse:\n URL = BASE_URL","sub_path":"Definition.py","file_name":"Definition.py","file_ext":"py","file_size_in_byte":434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"454881700","text":"#Name: Ken Ko\n#Email: ken.ko17@myhunter.cuny.edu\n#This defines the player class.\n\n#import the pygame library\nimport pygame\n\n#import constants.py file\nfrom constants import *\n\nclass Player(pygame.sprite.Sprite): #Player class extends Sprite\n def __init__(self):\n #equivalent to calling the base class pygame.sprite.Sprite._init_(self)\n super(Player, self).__init__()\n\n # loads person image onto a new surface\n self.surf = pygame.image.load(\"assets/person.png\").convert()\n \n # scales the image to a custom size\n #self.surf = pygame.transform.scale(self.surf, (50,50))\n \n # if any pixel in the image is (255,255,255) i.e. white, make that pixel transparent\n self.surf.set_colorkey((255, 255, 255), RLEACCEL)\n\n #creates a rectangle with the size of the surface and bottom at bottom center of screen\n self.rect = self.surf.get_rect(midbottom = (screen_width/2, 5*screen_height/6),)\n #reduces the player hitbox width by 10 pixels and height by 35 pixels, leaving a 14 pixel width and 5 pixel height for the hitbox\n self.rect.inflate_ip(-10,-35)\n\n self.canJump = False\n self.jump_vel = 0\n self.fall_vel = 0\n self.left_vel = 0\n self.right_vel = 0\n \n # Move the sprite based on user keypresses. Looks up the given keystrokes in the user_input\n # dictionary and whether that key is pressed or not\n def move(self, user_input):\n if user_input[K_LEFT]:\n self.left_vel = self.left_vel - 5 \n self.rect.move_ip(max(-25,self.left_vel), 0)\n elif self.left_vel < 0:\n self.rect.move_ip(max(-25,self.left_vel), 0)\n self.left_vel += 15\n \n if user_input[K_RIGHT]:\n self.right_vel = self.right_vel + 5\n self.rect.move_ip(min(25, self.right_vel), 0)\n elif self.right_vel > 0:\n self.rect.move_ip(min(25, self.right_vel), 0)\n self.left_vel -= 15\n \n # continues moving up in decreasing amounts to mimic momentum of jump\n if self.jump_vel > 0:\n self.rect.move_ip(0,-self.jump_vel)\n self.jump_vel -= 5\n if user_input[K_SPACE]:\n if(self.canJump == True):\n self.jump_vel = 75\n self.canJump = False\n\n #keeps player within boundaries of screen\n if self.rect.left < 0:\n self.rect.left = 0\n if self.rect.right > screen_width:\n self.rect.right = screen_width\n if self.rect.bottom > screen_height:\n self.rect.bottom = screen_height\n #self.kill()\n #if self.rect.top < 0:\n # self.rect.top = 0\n \n def fall(self): #does not take any user input\n self.fall_vel = min(30, self.fall_vel+2)\n self.rect.move_ip(0,self.fall_vel)\n \n","sub_path":"player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":2876,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"117424804","text":"\n# 1、创建窗口\n# 2、加载背景图\n# 3、背景图片贴到窗口里面去\n# 4、刷新窗口\n\n\nimport pygame\n# import time\nfrom pygame.locals import *\nimport sys\n# from pygame.con\n\n# 模拟常量,定义后不再更改\nWINDOW_WIDTH = 512\nWINDOW_HEIGHT = 768\n\nif __name__ == '__main__':\n # 1、初始化 pygame 库,让计算机的硬件准备(声音,文字)\n pygame.init()\n\n # 2、创建窗口\n window = pygame.display.set_mode((WINDOW_WIDTH, WINDOW_HEIGHT))\n\n # 3、加载图片文件,返回图片对象\n bg_img = pygame.image.load(\"res/img_bg_level_1.jpg\")\n hero_plane_img = pygame.image.load(\"res/hero.png\")\n x = 240\n y = 500\n\n while True:\n # 4、贴图(制定坐标,将图片绘制到窗口)\n window.blit(bg_img, (0, 0))\n window.blit(hero_plane_img, (x, y))\n\n # 5、刷新界面\n pygame.display.update()\n for event in pygame.event.get():\n # 判断是否点击了退出按钮\n if event.type == QUIT:\n # 让程序终止\n sys.exit()\n elif event.type == KEYDOWN:\n # 键盘按键\n if event.key == K_SPACE:\n print(\"space\")\n elif event.key == K_LEFT:\n print(\"left\")\n elif event.key == K_RIGHT:\n print(\"right\")\n\n pressed_key = pygame.key.get_pressed()\n if pressed_key[pygame.K_LEFT]:\n if x >= 5:\n x += -5\n if pressed_key[pygame.K_RIGHT]:\n if x <= 407:\n x += 5\n\n\n\n\n\n","sub_path":"day5/1_1_mor_飞机大战/1_2_按键.py","file_name":"1_2_按键.py","file_ext":"py","file_size_in_byte":1610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"315323558","text":"import time\nimport sys\nimport os\nDIRNAME = os.path.dirname(__file__)\nsys.path.append(os.path.join(DIRNAME, '..', '..'))\n\nfrom subprocess import Popen, PIPE\nimport json\nimport numpy as np\nimport pickle\nfrom collections import OrderedDict\nimport pandas as pd\nfrom matplotlib import pyplot as plt\nfrom psutil import virtual_memory, disk_usage\nimport math\n\nfrom src.constrainedChasingEscapingEnv.envMujoco import IsTerminal\nfrom src.constrainedChasingEscapingEnv.reward import RewardFunctionCompete\nfrom exec.trajectoriesSaveLoad import GetSavePath, readParametersFromDf, LoadTrajectories\nfrom src.neuralNetwork.policyValueNet import GenerateModel, Train, saveVariables, sampleData\nfrom src.constrainedChasingEscapingEnv.state import GetAgentPosFromState\nfrom src.neuralNetwork.trainTools import CoefficientCotroller, TrainTerminalController, TrainReporter, LearningRateModifier\nfrom src.replayBuffer import SampleBatchFromBuffer, SaveToBuffer\nfrom exec.preProcessing import AccumulateRewards, AddValuesToTrajectory, RemoveTerminalTupleFromTrajectory\n\n\ndef loadData(path):\n pklFile = open(path, \"rb\")\n dataSet = pickle.load(pklFile)\n pklFile.close()\n\n return dataSet\n\n\nclass GenerateTrajectoriesParallel:\n def __init__(self, codeFileName, numSample, numCmdList, readParametersFromDf):\n self.codeFileName = codeFileName\n self.numSample = numSample\n self.numCmdList = numCmdList\n self.readParametersFromDf = readParametersFromDf\n\n def __call__(self, oneConditionDf):\n startSampleIndexes = np.arange(0, self.numSample, math.ceil(self.numSample/self.numCmdList))\n endSampleIndexes = np.concatenate([startSampleIndexes[1:], [self.numSample]])\n startEndIndexesPair = zip(startSampleIndexes, endSampleIndexes)\n parameters = self.readParametersFromDf(oneConditionDf)\n parametersString = dict([(key, str(value)) for key, value in parameters.items()])\n parametersStringJS = json.dumps(parametersString)\n cmdList = [['python3', self.codeFileName, parametersStringJS, str(startSampleIndex), str(endSampleIndex)] \n for startSampleIndex, endSampleIndex in startEndIndexesPair]\n processList = [Popen(cmd, stdout=PIPE, stderr=PIPE) for cmd in cmdList]\n for proc in processList:\n proc.wait()\n return cmdList\n\nclass ProcessTrajectoryForNN:\n def __init__(self, actionToOneHot, agentId):\n self.actionToOneHot = actionToOneHot\n self.agentId = agentId\n\n def __call__(self, trajectory):\n processTuple = lambda state, actions, actionDist, value: \\\n (np.asarray(state).flatten(), self.actionToOneHot(actions[self.agentId]), value)\n processedTrajectory = [processTuple(*triple) for triple in trajectory]\n\n return processedTrajectory\n\n\nclass PreProcessTrajectories:\n def __init__(self, addValuesToTrajectory, removeTerminalTupleFromTrajectory, processTrajectoryForNN):\n self.addValuesToTrajectory = addValuesToTrajectory\n self.removeTerminalTupleFromTrajectory = removeTerminalTupleFromTrajectory\n self.processTrajectoryForNN = processTrajectoryForNN\n\n def __call__(self, trajectories):\n trajectoriesWithValues = [self.addValuesToTrajectory(trajectory) for trajectory in trajectories]\n filteredTrajectories = [self.removeTerminalTupleFromTrajectory(trajectory) for trajectory in trajectoriesWithValues]\n processedTrajectories = [self.processTrajectoryForNN(trajectory) for trajectory in filteredTrajectories]\n\n return processedTrajectories\n\n\nclass IterativePlayAndTrain:\n def __init__(self, windowSize, numIterations, trajectoriesPath, initializedNNModel, saveNNModel,\n getGenerateTrajectoriesParallel, readParametersFromDf, loadTrajectories, preProcessTrajectories, saveToBuffer,\n getSampleBatchFromBuffer, getTrainNN):\n self.windowSize = windowSize\n self.numIterations = numIterations\n self.trajectoriesPath = trajectoriesPath\n self.initializedNNModel = initializedNNModel\n self.saveNNModel = saveNNModel\n self.getGenerateTrajectoriesParallel = getGenerateTrajectoriesParallel\n self.readParametersFromDf = readParametersFromDf\n self.loadTrajectories = loadTrajectories\n self.preProcessTrajectories = preProcessTrajectories\n self.saveToBuffer = saveToBuffer\n self.getSampleBatchFromBuffer = getSampleBatchFromBuffer\n self.getTrainNN = getTrainNN\n\n def __call__(self, oneConditionDf):\n numTrajectoriesPerIteration = oneConditionDf.index.get_level_values('numTrajectoriesPerIteration')[0]\n miniBatchSize = oneConditionDf.index.get_level_values('miniBatchSize')[0]\n learningRate = oneConditionDf.index.get_level_values('learningRate')[0]\n\n generateTrajectoriesParallel = self.getGenerateTrajectoriesParallel(numTrajectoriesPerIteration)\n sampleBatchFromBuffer = self.getSampleBatchFromBuffer(miniBatchSize)\n trainNN = self.getTrainNN(learningRate)\n\n NNModel = self.initializedNNModel\n buffer = []\n length = []\n usedVirtualMemory = []\n # percentDiskUsage = []\n for iterationIndex in range(self.numIterations):\n conditionDfOneIteration = pd.concat([oneConditionDf], keys = [iterationIndex], names = ['iterationIndex'])\n self.saveNNModel(NNModel, conditionDfOneIteration)\n print(\"iteration: \", iterationIndex)\n cmdListGenerateTra = generateTrajectoriesParallel(conditionDfOneIteration)\n trajectories = self.loadTrajectories(self.readParametersFromDf(conditionDfOneIteration))\n print(len(trajectories))\n processedTrajectories = self.preProcessTrajectories(trajectories)\n updatedBuffer = self.saveToBuffer(buffer, processedTrajectories)\n print(len(buffer))\n if len(updatedBuffer) >= miniBatchSize:\n sampledBatch = sampleBatchFromBuffer(updatedBuffer) \n trainData = [list(varBatch) for varBatch in zip(*sampledBatch)] \n updatedNNModel = trainNN(NNModel, trainData) \n NNModel = updatedNNModel \n\n buffer = updatedBuffer\n \n length.append(np.mean([len(trajectory) for trajectory in trajectories]))\n virtualMemory = virtual_memory()\n usedVirtualMemory.append(virtualMemory.used)\n # diskUsage = disk_usage(self.trajectoriesPath)\n # percentDiskUsage.append(diskUsage.percent)\n \n # plt.plot(percentDiskUsage, marker='o')\n # plt.title('percent disk usage')\n # plt.xlabel('iteration')\n # plt.show()\n plt.plot(usedVirtualMemory, marker='o')\n plt.title('virtual memory')\n plt.xlabel('iteration')\n plt.show()\n return length\n\ndef main():\n manipulatedVariables = OrderedDict()\n manipulatedVariables['numTrajectoriesPerIteration'] = [100]#[256]\n manipulatedVariables['miniBatchSize'] = [512]\n manipulatedVariables['learningRate'] = [0.01]\n\n levelNames = list(manipulatedVariables.keys())\n levelValues = list(manipulatedVariables.values())\n modelIndex = pd.MultiIndex.from_product(levelValues, names=levelNames)\n toSplitFrame = pd.DataFrame(index=modelIndex)\n\n actionSpace = [(10, 0), (7, 7), (0, 10), (-7, 7), (-10, 0), (-7, -7), (0, -10), (7, -7)]\n numActionSpace = len(actionSpace)\n\n # neural network init and save path\n numStateSpace = 12\n regularizationFactor = 1e-4\n sharedWidths = [128]\n actionLayerWidths = [128]\n valueLayerWidths = [128]\n generateModel = GenerateModel(numStateSpace, numActionSpace, regularizationFactor)\n initializedNNModel = generateModel(sharedWidths, actionLayerWidths, valueLayerWidths)\n\n maxRunningSteps = 10\n numSimulations = 50\n qPosInit = (0, 0, 0, 0)\n qPosInitNoise = 9.7\n NNFixedParameters = {'maxRunningSteps': maxRunningSteps, 'qPosInitNoise': qPosInitNoise, 'qPosInit': qPosInit,\n 'numSimulations': numSimulations}\n dirName = os.path.dirname(__file__)\n NNModelSaveDirectory = os.path.join(dirName, '..', '..', 'data', 'trainMCTSNNIteratively', 'replayBuffer',\n 'trainedNNModels')\n if not os.path.exists(NNModelSaveDirectory):\n os.makedirs(NNModelSaveDirectory)\n NNModelSaveExtension = ''\n getNNModelSavePath = GetSavePath(NNModelSaveDirectory, NNModelSaveExtension, NNFixedParameters)\n NNPathFromDf = lambda oneConditionDf: getNNModelSavePath(readParametersFromDf(oneConditionDf)) \n saveNNModel = lambda NNmodel, oneConditionDf: saveVariables(NNmodel, NNPathFromDf(oneConditionDf)) \n \n #generate trajectory \n generateTrajectoriesCodeName = 'generateTrajectoryMCTSNNPriorRolloutPolicySheepChaseWolfMujoco.py'\n numToUseCores = 4\n getGenerateTrajectoriesParallel = lambda numTrajectoriesPerIteration: GenerateTrajectoriesParallel(generateTrajectoriesCodeName, numTrajectoriesPerIteration,\n numToUseCores, readParametersFromDf)\n \n #trajectory path to load\n trajectoryFixedParameters = {'maxRunningSteps': maxRunningSteps, 'qPosInit': qPosInit,\n 'qPosInitNoise': qPosInitNoise, 'numSimulations': numSimulations}\n trajectorySaveDirectory = os.path.join(dirName, '..', '..', 'data', 'trainMCTSNNIteratively', 'replayBuffer',\n 'trajectories')\n if not os.path.exists(trajectorySaveDirectory):\n os.makedirs(trajectorySaveDirectory)\n trajectoryExtension = '.pickle'\n getTrajectorySavePath = GetSavePath(trajectorySaveDirectory, trajectoryExtension, trajectoryFixedParameters)\n\n #load trajectories\n fuzzySearchParameterNames = ['sampleIndex']\n loadTrajectories = LoadTrajectories(getTrajectorySavePath, loadData, fuzzySearchParameterNames)\n\n # pre-process the trajectory for training the neural network\n sheepId = 0\n wolfId = 1\n xPosIndex = [2, 3]\n getSheepXPos = GetAgentPosFromState(sheepId, xPosIndex)\n getWolfXPos = GetAgentPosFromState(wolfId, xPosIndex)\n playAliveBonus = -1/maxRunningSteps\n playDeathPenalty = 1\n playKillzoneRadius = 0.5\n playIsTerminal = IsTerminal(playKillzoneRadius, getSheepXPos, getWolfXPos)\n playReward = RewardFunctionCompete(playAliveBonus, playDeathPenalty, playIsTerminal)\n\n decay = 1\n accumulateRewards = AccumulateRewards(decay, playReward)\n addValuesToTrajectory = AddValuesToTrajectory(accumulateRewards)\n\n actionToOneHot = lambda action: np.asarray([1 if (np.array(action) == np.array(actionSpace[index])).all() else 0\n for index in range(len(actionSpace))])\n actionIndex = 1\n getTerminalActionFromTrajectory = lambda trajectory: trajectory[-1][actionIndex]\n removeTerminalTupleFromTrajectory = RemoveTerminalTupleFromTrajectory(getTerminalActionFromTrajectory)\n processTrajectoryForNN = ProcessTrajectoryForNN(actionToOneHot, sheepId)\n preProcessTrajectories = PreProcessTrajectories(addValuesToTrajectory, removeTerminalTupleFromTrajectory,\n processTrajectoryForNN)\n\n # replay buffer\n windowSize = 2000\n saveToBuffer = SaveToBuffer(windowSize)\n getUniformSamplingProbabilities = lambda buffer: [(1/len(buffer)) for _ in buffer]\n getSampleBatchFromBuffer = lambda miniBatchSize: SampleBatchFromBuffer(miniBatchSize, getUniformSamplingProbabilities)\n\n # function to train NN model\n batchSizeForTrainFunction = 0\n terminalThreshold = 1e-6\n lossHistorySize = 10\n initActionCoeff = 1\n initValueCoeff = 1\n initCoeff = (initActionCoeff, initValueCoeff)\n afterActionCoeff = 1\n afterValueCoeff = 1\n afterCoeff = (afterActionCoeff, afterValueCoeff)\n terminalController = TrainTerminalController(lossHistorySize, terminalThreshold)\n coefficientController = CoefficientCotroller(initCoeff, afterCoeff)\n reportInterval = 25\n numTrainStepsPerIteration = 1\n trainReporter = TrainReporter(numTrainStepsPerIteration, reportInterval)\n learningRateDecay = 1\n learningRateDecayStep = 1\n learningRateModifier = lambda learningRate: LearningRateModifier(learningRate, learningRateDecay, learningRateDecayStep)\n getTrainNN = lambda learningRate: Train(numTrainStepsPerIteration, batchSizeForTrainFunction, sampleData,\n learningRateModifier(learningRate),\n terminalController, coefficientController,\n trainReporter)\n # functions to iteratively play and train the NN\n numIterations = 20#40#150\n iterativePlayAndTrain = IterativePlayAndTrain(windowSize, numIterations, trajectorySaveDirectory, initializedNNModel,\n saveNNModel, getGenerateTrajectoriesParallel, readParametersFromDf, loadTrajectories,\n preProcessTrajectories, saveToBuffer, getSampleBatchFromBuffer, getTrainNN)\n startTime = time.time()\n performanceDf = toSplitFrame.groupby(levelNames).apply(iterativePlayAndTrain)\n endTime = time.time()\n print(\"time for {} iterations = {}\".format(numIterations, (endTime-startTime)))\n plt.plot(performanceDf.values[0])\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"exec/trainMCTSNNIteratively/trainMCTSNNIterativelyParallel.py","file_name":"trainMCTSNNIterativelyParallel.py","file_ext":"py","file_size_in_byte":13446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"492099975","text":"import synapse.exc as s_exc\nimport synapse.cortex as s_cortex\n\nimport synapse.tests.utils as s_tests\nimport synapse.lib.modelrev as s_modelrev\n\ndef nope(*args, **kwargs):\n raise Exception('nope was called')\n\nclass ModelRevTest(s_tests.SynTest):\n\n async def test_cortex_modelrev_init(self):\n\n with self.getTestDir(mirror='testcore') as dirn:\n\n async with await s_cortex.Cortex.anit(dirn) as core:\n layr = core.getLayer()\n self.true(layr.fresh)\n self.eq(s_modelrev.maxvers, await layr.getModelVers())\n\n # no longer \"fresh\", but lets mark a layer as read only\n # and test the bail condition for layers which we cant update\n async with await s_cortex.Cortex.anit(dirn) as core:\n\n layr = core.getLayer()\n layr.canrev = False\n\n mrev = s_modelrev.ModelRev(core)\n\n mrev.revs = mrev.revs + (((9999, 9999, 9999), nope),)\n\n with self.raises(s_exc.CantRevLayer):\n await mrev.revCoreLayers()\n\n # no longer \"fresh\"\n async with await s_cortex.Cortex.anit(dirn) as core:\n\n layr = core.getLayer()\n self.false(layr.fresh)\n\n self.eq(s_modelrev.maxvers, await layr.getModelVers())\n\n mrev = s_modelrev.ModelRev(core)\n\n layr.woot = False\n\n async def woot(layers):\n layr.woot = True\n\n mrev.revs = mrev.revs + (((9999, 9999, 9999), woot),)\n\n await mrev.revCoreLayers()\n\n self.true(layr.woot)\n self.eq((9999, 9999, 9999), await layr.getModelVers())\n\n async def test_modelrev_pre010(self):\n\n sorc = 'a6246e97d7b02e2dcc90dd117611a981'\n plac = '36c0959d703b9d16e7566a858234bece'\n pers = '83fc390015c0c0ed054d09e87aa31853'\n evnt = '7156b48f84de79d3a375baa3c7904387'\n clus = 'fae28e60d8af681f12109d6da0c48555'\n\n async with self.getRegrCore('pre-010') as core:\n\n self.len(1, await core.nodes(f'meta:source={sorc} +#hehe +.seen=2019'))\n self.len(1, await core.nodes(f'meta:seen=({sorc}, (inet:dns:a, (vertex.link, 1.2.3.4))) +#hehe +.seen=2019'))\n self.len(1, await core.nodes(f'graph:event={evnt} +#hehe +.seen=2019'))\n self.len(1, await core.nodes(f'graph:cluster={clus} +#hehe +.seen=2019'))\n self.len(1, await core.nodes(f'edge:has=((ps:person, {pers}), (geo:place, {plac})) +#hehe +.seen=2019'))\n self.len(1, await core.nodes(f'edge:refs=((ps:person, {pers}), (geo:place, {plac})) +#hehe +.seen=2019'))\n self.len(1, await core.nodes(f'edge:wentto=((ps:person, {pers}), (geo:place, {plac}), 2019) +#hehe +.seen=2019'))\n\n self.len(1, await core.nodes('meta:source#hehe'))\n self.len(1, await core.nodes('meta:source.seen=2019'))\n self.len(1, await core.nodes('meta:source +#hehe +.seen=2019'))\n\n self.len(1, await core.nodes('meta:seen#hehe'))\n self.len(1, await core.nodes('meta:seen.seen=2019'))\n self.len(1, await core.nodes('meta:seen +#hehe +.seen=2019'))\n\n self.len(1, await core.nodes('edge:has#hehe'))\n self.len(1, await core.nodes('edge:has.seen=2019'))\n self.len(1, await core.nodes('edge:has +#hehe +.seen=2019'))\n\n self.len(2, await core.nodes('edge:refs#hehe'))\n self.len(2, await core.nodes('edge:refs.seen=2019'))\n self.len(2, await core.nodes('edge:refs +#hehe +.seen=2019'))\n\n self.len(1, await core.nodes('edge:wentto#hehe'))\n self.len(1, await core.nodes('edge:wentto.seen=2019'))\n self.len(1, await core.nodes('edge:wentto +#hehe +.seen=2019'))\n\n self.len(1, await core.nodes('graph:cluster#hehe'))\n self.len(1, await core.nodes('graph:cluster.seen=2019'))\n self.len(1, await core.nodes('graph:cluster +#hehe +.seen=2019'))\n\n self.len(1, await core.nodes('graph:edge#hehe'))\n self.len(1, await core.nodes('graph:edge.seen=2019'))\n self.len(1, await core.nodes('graph:edge +#hehe +.seen=2019'))\n\n self.len(1, await core.nodes('graph:timeedge#hehe'))\n self.len(1, await core.nodes('graph:timeedge.seen=2019'))\n self.len(1, await core.nodes('graph:timeedge +#hehe +.seen=2019'))\n\n self.len(1, await core.nodes('meta:source -> meta:seen :node -> * +inet:dns:a'))\n\n self.len(1, await core.nodes('ps:person -> edge:has +:n1:form=ps:person +:n2:form=geo:place -> geo:place'))\n self.len(1, await core.nodes('ps:person -> edge:refs +:n1:form=ps:person +:n2:form=geo:place -> geo:place'))\n self.len(1, await core.nodes('ps:person -> edge:wentto +:n1:form=ps:person +:n2:form=geo:place +:time=2019 -> geo:place'))\n\n # check secondary ndef property index\n self.len(1, await core.nodes('graph:cluster -> edge:refs -> inet:fqdn'))\n\n # check secondary compound property index\n sorc = (await core.nodes('meta:source'))[0].ndef[1]\n self.len(1, await core.nodes(f'meta:seen:source={sorc}'))\n\n async def test_modelrev_0_1_1(self):\n\n cont0 = '7b3bbf19a8e4d3f5204da8c7f6395494'\n cont1 = 'dd0c914ec06bd7851009d5bad7430ff1'\n\n async with self.getRegrCore('0.1.0') as core:\n\n opts = {'vars': {'cont0': cont0, 'cont1': cont1}}\n\n node0 = (await core.nodes('ps:contact=$cont0', opts=opts))[0]\n node1 = (await core.nodes('ps:contact=$cont1', opts=opts))[0]\n\n self.eq('this is not changed', node0.get('address'))\n self.eq('this has one space', node1.get('address'))\n","sub_path":"synapse/tests/test_lib_modelrev.py","file_name":"test_lib_modelrev.py","file_ext":"py","file_size_in_byte":5777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"608483258","text":"\n\nfrom xai.brain.wordbase.nouns._reject import _REJECT\n\n#calss header\nclass _REJECTING(_REJECT, ):\n\tdef __init__(self,): \n\t\t_REJECT.__init__(self)\n\t\tself.name = \"REJECTING\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"reject\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_rejecting.py","file_name":"_rejecting.py","file_ext":"py","file_size_in_byte":242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"131625595","text":"import cv2\r\nimport methods as mt\r\nfrom matplotlib import pyplot as plt\r\nimport numpy as np\r\nimport imutils\r\n\r\n\r\nimg = cv2.imread('pics/lecture_pic6.png')\r\nimg = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\r\nborder = 5\r\nout = mt.add_borders(img, border, border, border, border)\r\ncount = 0\r\n\r\nwhile True:\r\n out, flag = mt.detect_board(out)\r\n mt.edge_detector(out)\r\n if flag and count == 1:\r\n plt.imshow(out), plt.show()\r\n break\r\n if count == 2:\r\n break\r\n plt.imshow(out), plt.show()\r\n out = mt.crop_borders(out, border+1)\r\n count += 1\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"face_detection.py","file_name":"face_detection.py","file_ext":"py","file_size_in_byte":585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"38"} +{"seq_id":"432143335","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on 07/03/2019\n@author: Haipeng \n\"\"\"\nimport pandas as pd \nimport numpy as np\n\n# read in the sleep dataset\ndf = pd.read_csv('shhs2-dataset-0.13.0.csv', encoding= \"ISO-8859-1\")\n\n\"\"\"\nFeature selecting:\n\navg23bpd_s2: Average Diastolic blood pressure (BP)\navg23bps_s2: Average Systolic blood pressure (BP)\nai_all: Arousal Index\nrdi0p: Overall Respiratory Disturbance Index (RDI) all oxygen desaturations -- The prevalence of obstructive sleep apnea (OSA) depends on the definition of the respiratory disturbance index (RDI) or apnea–hypopnea index (AHI) criteria,(Obstructive Sleep Apnea in Adults: Epidemiology and Variants)\nnsupinep: Percent Time non-supine\npctlt75: Percent of sleep time with less than 75% oxygen saturation (SaO2)\npctlt80: Percent of sleep time with less than 80% oxygen saturation (SaO2)\npctlt85: Percent of sleep time with less than 85% oxygen saturation (SaO2)\npctlt90: Percent of sleep time with less than 90% oxygen saturation (SaO2), Ratio of the number of minutes with oxygen saturation (SaO2) under 90% to the total sleep time expressed in hours.\nslp_eff: Percentage of time in bed that was spent sleeping, or the ratio of total sleep time to total time in bed, expressed as a percentage.\nslp_lat: Time from lights out time to beginning of sleep, rounded to nearest minute\nslpprdp: Sleep Time\nslptime: Total Sleep Time\nsupinep: Percent Time supine\ntimes34p: Percent Time in Stage 3/4\ntimest1p: Percent Time in Stage 1\ntimest2p: Percent Time in Stage 2\nwaso: Total amount of time spent awake after going to sleep\nms204a: Morning Survey (Sleep Heart Health Study Visit Two (SHHS2)), Quality of sleep light/deep, Rate the actual quality of your sleep last night (Do not compare to usual sleep quality). My sleep last night was (circle a number for each): a. [5 point Likert scale from \"Light\" to \"Dark\"]\nms204b: Morning Survey (Sleep Heart Health Study Visit Two (SHHS2)): Quality of sleep: short/long\nms204c: Morning Survey (Sleep Heart Health Study Visit Two (SHHS2)): Quality of sleep: restless/restful\n\n\"\"\"\nbuffer_df = df.loc[:,['avg23bpd_s2','avg23bps_s2','ai_all','rdi0p', 'nsupinep', 'pctlt75', 'pctlt80', 'pctlt85', 'pctlt90',\n 'slp_eff', 'slp_lat', 'slpprdp', 'slptime', 'supinep', 'times34p', 'timest1p', 'timest2p', 'waso',\n 'ms204a', 'ms204b', 'ms204c']]\n\n# Begin data cleaning\n# Beacause it has too much null values, so we drop the \"slptime\"\nbuffer_df = buffer_df.drop(['slptime'], axis = 1)\nbuffer_df = buffer_df.dropna(\n axis = 0,\n how = 'all')\n# We choose rows who have at least 16 non-empty values\nbuffer_df = buffer_df.dropna(axis=0, thresh=16)\n# Calculate the average of each column\n# buffer_df.mean()\nbuffer_df['avg23bpd_s2'] = buffer_df['avg23bpd_s2'].fillna(value=70.677064)\nbuffer_df['avg23bps_s2'] = buffer_df['avg23bps_s2'].fillna(value=127.766135)\nbuffer_df['ai_all'] = buffer_df['ai_all'].fillna(value=18.372544)\nbuffer_df['rdi0p'] = buffer_df['rdi0p'].fillna(value=27.944731)\nbuffer_df['nsupinep'] = buffer_df['nsupinep'].fillna(value=64.623896)\nbuffer_df['pctlt75'] = buffer_df['pctlt75'].fillna(value=0.046970)\nbuffer_df['pctlt80'] = buffer_df['pctlt80'].fillna(value=0.156439)\nbuffer_df['pctlt85'] = buffer_df['pctlt85'].fillna(value=0.635985)\nbuffer_df['pctlt90'] = buffer_df['pctlt90'].fillna(value=4.264773)\nbuffer_df['slp_eff'] = buffer_df['slp_eff'].fillna(value=79.182438)\nbuffer_df['slp_lat'] = buffer_df['slp_lat'].fillna(value=25.909047)\nbuffer_df['slpprdp'] = buffer_df['slpprdp'].fillna(value=374.065481)\nbuffer_df['supinep'] = buffer_df['supinep'].fillna(value=35.378943)\nbuffer_df['times34p'] = buffer_df['times34p'].fillna(value=15.948032)\nbuffer_df['timest1p'] = buffer_df['timest1p'].fillna(value=5.753917)\nbuffer_df['timest2p'] = buffer_df['timest2p'].fillna(value=57.712648)\nbuffer_df['waso'] = buffer_df['waso'].fillna(value=80.084671)\n# For labels, we drop those null values\nbuffer_df.dropna(axis=0, how='any',inplace=True)\n\nbuffer_df.to_csv('SleepQuality_After_Cleaning.csv',index=False,header=True)\n\n\n\n\n\n\n\n","sub_path":"preprocessing.py","file_name":"preprocessing.py","file_ext":"py","file_size_in_byte":4087,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"343107662","text":"\"\"\" main.py\nASTR598 - Deep Learning final project by Stephan Hagel.\n\nThe purpose of this project is to take data from simulated proton-proton collisions in the ATLAS detector at the LHC in\nCern. This project was motivated by the 2014 ATLAS Higgs Machine Learning challenge. The data used for this project has\nbeen provided for said challenge, which consists of 818238 events with 30 dimensions.\n\nSome notes about the dataframe:\nThe following indices are of significant importance:\n\nindex 0: EventID\n An integer, that uniquely labels each event. Not to be used as an input parameter.\n\nindex 1: DER_mass_MMC\n The mass of the Higgs candidate. Should be around the physical value of ~125 GeV.\n Contains 122143 points of missing data, which have to be taken care of.\n\nindex 5-7:\n Only defined, if two or more jets are generated. 568698 points of systematically missing data.\n\nindex 13:\n Only defined, if two or more jets are generated. 568698 points of systematically missing data.\n\nindex 23: PRI_jet_num\n An integer that indicates, how many jets have been generated in that event.\n\nindex 24-26:\n Only defined, if one or more jets are generated. 320850 points of systematically missing data.\n\nindex 27-29:\n Only defined, if two or more jets are generated. 568698 points of systematically missing data.\n\nindex 31: Weight\n Used in the original challenge, obsolete for this project.\n\nindex 32: Label\n The actual label that is used for the classification. 'b' stands for background, 's' stands for signal.\n\nindex 33-34: Kaggle*\n Used in the original challenge, obsolete for this project.\n\nUSAGE: python3 main.py flags\nwhere flags can be one or more of\nIGNORE_MASS_DATA\nIGNORE_JET_DATA\nIGNORE_MULTIJET_DATA\nREMOVE_HIGGS_NAN\nSIMPLE_IMPUTE\n\"\"\"\n\nimport sys\nimport numpy as np\nimport pandas as pd\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.model_selection import train_test_split\nfrom keras import backend as K\nfrom keras import save_model_hdf5\nfrom keras import load_model_hdf5\nimport tensorflow as tf\n\n# These are the important indices mentioned above.\nKAGGLE_WEIGHT_INDEX = 34\nKAGGLE_SET_INDEX = 33\nWEIGHT_INDEX = 31\nLABEL_INDEX = 32\nEVENTID_INDEX = 0\nDER_MASS_INDEX = 1\nMULTIJET_INDEX = [5, 6, 7, 13, 27, 28, 29]\nJETNUMBER_INDEX = 23\nSOLOJET_INDEX = [24, 25, 26]\n\n# These Booleans will define how the missing data is handled.\nIGNORE_MASS_DATA = False\nIGNORE_JET_DATA = False\nIGNORE_MULTIJET_DATA = False\nREMOVE_HIGGS_NAN = False\nSIMPLE_IMPUTE = False\n\n# This Boolean shows, if the model should be saved in the end.\nSAVE_MODEL = False\n\n# This Boolean shows, if a model should be loaded from a file. If this is the case, no new model will be trained.\nLOAD_MODEL = False\n\nPHYSICAL_HIGGS_MASS = 125.18\n\n\ndef main():\n # START GPU CONFIG\n\n config = tf.ConfigProto(device_count={'GPU': 1, 'CPU': 24})\n sess = tf.Session(config=config)\n keras.backend.set_session(sess)\n\n # END GPU CONFIG\n # START SETTING FLAGS\n\n global IGNORE_MASS_DATA, IGNORE_JET_DATA, REMOVE_HIGGS_NAN, SIMPLE_IMPUTE, ADVANCED_IMPUTE, IGNORE_MULTIJET_DATA\n\n if \"IGNORE_JET_DATA\" in sys.argv:\n print(\"Ignoring all jet data.\")\n IGNORE_JET_DATA = True\n\n if \"IGNORE_MULTIJET_DATA\" in sys.argv:\n print(\"Ignoring data with multiple jets.\")\n IGNORE_MULTIJET_DATA = True\n\n if \"IGNORE_MASS_DATA\" in sys.argv:\n print(\"Ignoring mass data.\")\n IGNORE_MASS_DATA = True\n\n if \"REMOVE_HIGGS_NAN\" in sys.argv:\n print(\"Removing NaN's in Higgs mass.\")\n REMOVE_HIGGS_NAN = True\n\n if \"SIMPLE_IMPUTE\" in sys.argv:\n print(\"Using simple imputer for Higgs mass.\")\n SIMPLE_IMPUTE = True\n\n if \"ADVANCED_IMPUTE\" in sys.argv:\n print(\"Using advanced imputer for Higgs mass.\")\n ADVANCED_IMPUTE = True\n\n # Set the flags for saving and loading the model.\n global SAVE_MODEL, LOAD_MODEL\n model_path = \"models/model.h5\"\n\n if \"LOAD_MODEL\" in sys.argv:\n LOAD_MODEL = True\n print(\"Model will be loaded from \" + model_path + \".\")\n\n if \"SAVE_MODEL\" in sys.argv:\n SAVE_MODEL = True\n print(\"Model will be saved to \" + model_path + \".\")\n\n # END SETTING FLAGS\n\n # START READING DATA\n try:\n datafile = \"data/data.csv\"\n print(\"Reading data from \" + datafile)\n dataframe = pd.read_csv(datafile, header=None)\n except FileNotFoundError:\n print(\"File \" + datafile + \" not found. Please make sure you are running this program from the src folder.\")\n quit()\n\n # First we delete the columns that contain data, which will not be used to in the model\n print(\"Removing unused columns.\")\n del dataframe[KAGGLE_WEIGHT_INDEX]\n del dataframe[KAGGLE_SET_INDEX]\n del dataframe[WEIGHT_INDEX]\n del dataframe[EVENTID_INDEX]\n\n # And we also remove the header\n dataframe = dataframe.iloc[1:]\n\n # Next we convert the strings in the dataframe to actual numbers. The option errors='ignore' guarantees, that the\n # values for the label are kept and not converted to NaN.\n print(\"Converting to numbers.\")\n dataframe = dataframe.apply(pd.to_numeric, errors='ignore')\n\n # Now we can replace the -999.0 entries with NaN.\n print(\"Replacing missing data with NaN.\")\n dataframe = dataframe.replace({-999.0: np.NaN})\n\n # We will further convert the label from 'b' and 's' to 0 and 1 respectively.\n print(\"Converting labels.\")\n dataframe[LABEL_INDEX] = 1 - pd.factorize(dataframe[LABEL_INDEX])[0]\n\n # END READING DATA\n\n # START PREPARING DATA\n\n # Depending on the option set above, we will handle the NaN's in different ways:\n if IGNORE_JET_DATA:\n # Case 1: Just ignore the columns with jet dara in them.\n print(\"Mode IGNORE_JET_DATA is set to True.\")\n print(\"Deleting missing data.\")\n for i in SOLOJET_INDEX:\n del dataframe[i]\n for j in MULTIJET_INDEX:\n del dataframe[j]\n\n elif IGNORE_MULTIJET_DATA:\n # Case 2: Ignore the data with multiple jets generated.\n print(\"Mode IGNORE_MULTIJET_DATA is set to True.\")\n print(\"Deleting missing data.\")\n\n # Delete the columns, that contain Multijet data\n for i in MULTIJET_INDEX:\n del dataframe[i]\n\n # Deleting the rows, which contain no jet data\n dataframe = dataframe[dataframe[JETNUMBER_INDEX] > 0]\n\n else:\n # Case 3: Using all jet data\n print(\"Only Events with multiple jets produced will be used.\")\n print(\"Deleting missing data.\")\n\n # Delete the rows with no jets produced\n dataframe = dataframe[dataframe[JETNUMBER_INDEX] > 1]\n\n # If the IGNORE_MASS_DATA flag is set, we will also delete the mass column.\n if IGNORE_MASS_DATA:\n del dataframe[DER_MASS_INDEX]\n # If the REMOVE_HIGGS_NAN flag is set, we remove the NaN's.\n elif REMOVE_HIGGS_NAN:\n dataframe.dropna(inplace=True)\n # For the simple imputing we use the physical higgs mass to replace NaN's.\n elif SIMPLE_IMPUTE:\n dataframe.fillna(PHYSICAL_HIGGS_MASS, inplace=True)\n\n # Now we can convert the dataframe to a numpy matrix.\n print(\"Converting data to Matrix.\")\n data_matrix = dataframe.as_matrix().astype(np.float)\n\n # Let's do some garbage collection.\n del dataframe\n\n # We will use the MinMaxScaler form sklearn to normalize the data\n scaler = MinMaxScaler()\n scaler.fit(data_matrix)\n print(\"Normalizing data.\")\n data_matrix_norm = scaler.transform(data_matrix)\n del data_matrix\n\n # We furthermore need to separate the labels from the actual training data\n print(\"Separating labels.\")\n target = data_matrix_norm[:, -1]\n train = data_matrix_norm[:, :-1]\n del data_matrix_norm\n\n # Depending on the flags, that have been set, the dimension of our training data might vary.\n # Therefore we need to extract the input dimension to use it to make our network a reasonable size.\n input_dim_ = len(train[0])\n\n # Now we can finally split our data into training and test data and start training our model\n print(\"Splitting test and training data\")\n x_train, x_test, y_train, y_test = train_test_split(train, target, test_size=0.15, random_state=1)\n\n print(\"Finished data preparation.\")\n # END PREPARING DATA\n\n # START TRAINING MODEL\n\n from keras.models import Sequential\n from keras.layers import Dense, Dropout\n\n if not LOAD_MODEL:\n # Here is where the actual model training begins.\n model = Sequential()\n\n mean = (input_dim_ + 1) // 2\n\n model.add(Dense(input_dim_, input_dim=input_dim_, activation='relu'))\n model.add(Dense(mean, activation='relu'))\n model.add(Dense(1, activation='sigmoid'))\n\n model.compile(loss='binary_crossentropy',\n optimizer='rmsprop',\n metrics=['accuracy'])\n\n else:\n model = load_model_hdf5(model_path)\n\n model.fit(x_train, y_train,\n epochs=20,\n batch_size=128)\n\n loss_and_metrics = model.evaluate(x_test, y_test, batch_size=128)\n print(loss_and_metrics)\n\n if SAVE_MODEL:\n save_model_hdf5(model, model_path)\n\n # END TRAINING MODEL\n\n # START OPTIONAL CODE\n\n # TODO: Implement some of the improvements mentioned in the HiggsML talk.\n\n # END OPTIONAL CODE\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"src/main_gpu.py","file_name":"main_gpu.py","file_ext":"py","file_size_in_byte":9353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"614712291","text":"### list \r\n\r\n## construct a list\r\nlist = [1,2]\r\n\r\n## add and delete\r\nlist.append(x)\r\n#Add an item \r\nlist.extend(L)\r\n#Extend the list by appending all the items in the given list\r\nlist.insert(i, x)\r\n#Insert an item at a given position.\r\nlist.remove(x)\r\n#Remove the first item from the list whose value is x.\r\nlist.pop([i])\r\n#Remove the item at the given position in the list, and return it. If no index is specified, a.pop() removes and returns the last item in the list. (The square brackets around the i in the method signature denote that the parameter is optional, not that you should type square brackets at that position. You will see this notation frequently in the Python Library Reference.)\r\nlist.index(x)\r\n#Return the index in the list of the first item whose value is x. It is an error if there is no such item.\r\nlist.count(x)\r\n#Return the number of times x appears in the list.\r\nlist.sort(cmp=None, key=None, reverse=False)\r\n#Sort the items of the list in place (the arguments can be used for sort customization, see sorted() for their explanation).\r\nlist.reverse()\r\n#Reverse the elements of the list, in place.","sub_path":"data_structures.py","file_name":"data_structures.py","file_ext":"py","file_size_in_byte":1122,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"199792429","text":"\r\nimport operator\r\nimport random \r\nfrom sklearn import metrics\r\n\r\nimport Clustering as cl\r\nimport GeneticAlgorithm as ga\r\nimport KMedoids as km\r\nimport MatrixFunctions as mf\r\nimport ReadSimilarities as rs\r\nimport UtilitiesSCOP as scop\r\n\r\nfrom sklearn.cluster import AgglomerativeClustering\r\n\r\ndef generateIndividual():\r\n p1 = generateRandomInt()\r\n p2 = generateRandomInt()\r\n p3 = generateRandomInt()\r\n weights = [p1,p2,p3]\r\n #weights = [x / 100 for x in weights]\r\n return weights\r\n\r\ndef generateRandomInt():\r\n num = random.randint(0,100)\r\n num /= 100\r\n return num\r\n\r\ndef generatePopulation(population_size):\r\n population = []\r\n i = 0\r\n while i < population_size:\r\n population.append(generateIndividual())\r\n i += 1\r\n return population\r\n\r\ndef fitnessFunction(individual, algorithm, n_labels, ground_truth, m1, m2, m3):\r\n\r\n w1 = individual[0]\r\n w2 = individual[1]\r\n # w3 = individual[2]\r\n w3 = 0\r\n corr = mf.calculateCorrelationMatrix(m1, m2, m3, w1, w2, w3)\r\n\r\n if algorithm == 'complete':\r\n agglomerative = AgglomerativeClustering(affinity='precomputed', n_clusters=n_labels, linkage='complete').fit(corr)\r\n labels = agglomerative.labels_\r\n elif algorithm == 'average':\r\n agglomerative = AgglomerativeClustering(affinity='precomputed', n_clusters=n_labels, linkage='average').fit(corr)\r\n labels = agglomerative.labels_\r\n elif algorithm == 'kmedoids':\r\n _, clusters = km.kMedoids(corr, n_labels, 100)\r\n labels = km.sortLabels(clusters)\r\n \r\n #fitness = metrics.homogeneity_score(labels, ground_truth)\r\n #fitness = metrics.adjusted_rand_score(labels, ground_truth)\r\n #fitness = sum(individual)\r\n fitness = metrics.calinski_harabaz_score(corr, labels)\r\n\r\n corr = None\r\n m1 = None\r\n m2 = None\r\n m3 = None\r\n\r\n return fitness\r\n \r\ndef calculatePopulationFitness(population, algorithm, n_labels, ground_truth, m1, m2, m3):\r\n population_fitness = []\r\n for i in range(len(population)):\r\n fitness = fitnessFunction(population[i], algorithm, n_labels, ground_truth, m1, m2, m3)\r\n population_fitness.append(fitness)\r\n\r\n to_sort = list(zip(population, population_fitness))\r\n sorted_by_fitness = sorted(to_sort, key=lambda x: x[1], reverse=True)\r\n return sorted_by_fitness\r\n\r\ndef selectFromPopulation(population, n_fittest, n_random):\r\n next_generation = []\r\n for i in range(n_fittest):\r\n next_generation.append(population[i][0])\r\n\r\n for j in range(n_random):\r\n index = random.randint(0, len(population)-1)\r\n next_generation.append(population[index][0])\r\n\r\n random.shuffle(next_generation)\r\n return next_generation\r\n\r\ndef createChild(individual1, individual2):\r\n # create child with weights chosen randomly from parents\r\n child = []\r\n for i in range(len(individual1)):\r\n choice = random.randint(1,2)\r\n if choice == 1:\r\n child.append(individual1[i])\r\n elif choice == 2:\r\n child.append(individual2[i])\r\n return child\r\n\r\ndef createChildren(breeders, number_of_children):\r\n\tnext_population = []\r\n\tfor i in range(int(len(breeders)/2)):\r\n\t\tfor j in range(number_of_children):\r\n\t\t\tnext_population.append(createChild(breeders[i], breeders[len(breeders) -1 -i]))\r\n\treturn next_population\r\n\r\ndef mutateIndividual(individual):\r\n #choose mutation\r\n rand = random.randint(0,1)\r\n if rand == 0:\r\n return swapWeightsMutation(individual)\r\n else:\r\n return generateNewWeightsMutation(individual)\r\n\r\ndef generateNewWeightsMutation(individual):\r\n position = random.randint(0,2)\r\n new_weight = generateRandomInt()\r\n individual[position] = new_weight\r\n return individual\r\n\r\ndef swapWeightsMutation(individual):\r\n #swap weight positions\r\n position1 = random.randint(0,2)\r\n position2 = random.randint(0,2)\r\n while position1 == position2:\r\n position2 = random.randint(0,2)\r\n tmp = individual[position1]\r\n individual[position1] = position2\r\n individual[position2] = tmp\r\n return individual\r\n\r\ndef mutatePopulation(population, mutation_chance):\r\n for i in range(len(population)):\r\n chance = generateRandomInt()\r\n if chance >= mutation_chance:\r\n population[i] = mutateIndividual(population[i])\r\n return population\r\n\r\n# def getFittestIndividual(population, labels, ground_truth):\r\n\r\n# max_fitness = 0\r\n# best_individual = []\r\n\r\n# for individual in population:\r\n# if fitnessFunction(individual, labels, ground_truth) > max_fitness:\r\n# best_individual = individual\r\n# max_fitness = fitnessFunction(individual, labels, ground_truth)\r\n\r\n# return best_individual, max_fitness \r\n","sub_path":"Clustering/GeneticAlgorithm.py","file_name":"GeneticAlgorithm.py","file_ext":"py","file_size_in_byte":4749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"548935663","text":"import os\nimport sys\nimport numpy as np\nimport tensorflow as tf\nimport cv2\nfrom math_convert import *\nfrom object_detection.utils import label_map_util\nfrom object_detection.utils import visualization_utils as vis_util\n\ncap = cv2.VideoCapture(0)\n\nsys.path.append(\"..\")\n\nMODEL_NAME = 'model'\n\nPATH_TO_CKPT = MODEL_NAME + '/frozen_inference_graph.pb'\n\nPATH_TO_LABELS = os.path.join('training', 'object-detection.pbtxt')\nNUM_CLASSES = 24\n\ndetection_graph = tf.Graph()\n\nwith detection_graph.as_default():\n od_graph_def = tf.compat.v1.GraphDef()\n with tf.io.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:\n serialized_graph = fid.read()\n od_graph_def.ParseFromString(serialized_graph)\n tf.import_graph_def(od_graph_def, name='')\n\nlabel_map = label_map_util.load_labelmap(PATH_TO_LABELS)\ncategories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)\ncategory_index = label_map_util.create_category_index(categories)\n\n\nwith detection_graph.as_default():\n with tf.compat.v1.Session(graph=detection_graph) as sess:\n image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')\n detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')\n detection_scores = detection_graph.get_tensor_by_name('detection_scores:0')\n detection_classes = detection_graph.get_tensor_by_name('detection_classes:0')\n num_detections = detection_graph.get_tensor_by_name('num_detections:0')\n\n while True:\n ret, frame = cap.read()\n height = 720\n width = 1280\n image_np_expanded = np.expand_dims(frame, axis=0)\n (boxes, scores, classes, num) = sess.run(\n [detection_boxes, detection_scores, detection_classes, num_detections],\n feed_dict={image_tensor: image_np_expanded})\n\n vis_util.visualize_boxes_and_labels_on_image_array(\n frame,\n np.squeeze(boxes),\n np.squeeze(classes).astype(np.int32),\n np.squeeze(scores),\n category_index,\n use_normalized_coordinates=True,\n line_thickness=5)\n\n od_list = [[category_index.get(value).get('name'), boxes[0][index][1] * width] for index,\n value in enumerate(classes[0]) if scores[0, index] > 0.65]\n od_list_seq = sorted(od_list, key=lambda x:(-x[1], x[0]), reverse=True)\n od_list_co = [seq[0] for seq in od_list_seq]\n od_list_co = convop(od_list_co)\n co_num_list = combint(od_list_co)\n exp_result = chkfl(co_num_list)\n result = getresult(co_num_list, exp_result)\n\n if str(result) == '...':\n obj = str(exp_result)\n else:\n obj = str(exp_result) + ' is ' + str(result)\n\n font = cv2.FONT_HERSHEY_SIMPLEX\n cv2.putText(frame, obj, (0, 430), font, 2, (0, 0, 0), 0, cv2.LINE_AA)\n cv2.imshow('object detection', cv2.resize(frame, (width, height)))\n if cv2.waitKey(25) & 0xFF == ord('q'):\n cap.release()\n # videoFile.release()\n cv2.destroyAllWindows()\n break\n","sub_path":"DeepLearningMinor/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"26339799","text":"# Printing the content of a file\r\nfin = open('words.txt')\r\nfor line in fin:\r\n word = line.strip()\r\n #print(word)\r\n\r\n# Printing word that has greater than 20 characters\r\nfin = open('words.txt')\r\nfor line in fin:\r\n word = line.strip()\r\n if len(word) >= 20:\r\n print(word)\r\n\r\n# Seaching for letter 'e'\r\ndef has_no_e(word):\r\n for letter in word:\r\n if letter == 'e':\r\n return False\r\n return True\r\n \r\n \r\n","sub_path":"ReadingFile(words.txt).py","file_name":"ReadingFile(words.txt).py","file_ext":"py","file_size_in_byte":446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"233254023","text":"import numpy as np\nimport scipy.signal\n\ndef fn_conv(input, params, hyper_params, backprop, dv_output=None):\n \"\"\"\n Args:\n input: The input data to the layer function. [in_height] x [in_width] x [num_channels] x [batch_size] array\n params: Weight and bias information for the layer.\n params['W']: layer weights, [filter_height] x [filter_width] x [filter_depth] x [num_filters] array\n params['b']: layer bias, [num_filters] x 1 array\n hyper_params: Optional, could include information such as stride and padding.\n backprop: Boolean stating whether or not to compute the output terms for backpropagation.\n dv_output: The partial derivative of the loss with respect to each element in the output matrix. Only passed in when backprop is set to true. Same size as output.\n\n Returns:\n output: Output of layer, [out_height] x [out_width] x [num_filters] x [batch_size] array\n dv_input: The derivative of the loss with respect to the input. Same size as input.\n grad: The gradient term that you will use to update the weights defined in params and train your network. Dictionary with same structure as params.\n grad['W']: gradient wrt weights, same size as params['W']\n grad['b']: gradient wrt bias, same size as params['b']\n \"\"\"\n in_height, in_width, num_channels, batch_size = input.shape\n filter_height, filter_width, filter_depth, num_filters = params['W'].shape\n out_height = in_height - params['W'].shape[0] + 1\n out_width = in_width - params['W'].shape[1] + 1\n\n assert params['W'].shape[2] == input.shape[2], 'Filter depth does not match number of input channels'\n\n # Initialize\n output = np.zeros((out_height, out_width, num_filters, batch_size))\n dv_input = np.zeros(0)\n grad = {'W': np.zeros(0),\n 'b': np.zeros(0)}\n \n # TODO: FORWARD CODE\n # Update output with values\n W = params['W']\n b = params['b']\n for f in range(num_filters):\n for batch in range(batch_size):\n temp = scipy.signal.convolve(input[:, :, :, batch], np.flip(W[:, :, :, f]), mode = \"valid\")\n temp = np.reshape(temp, (out_height, out_width))\n output[:, :, f, batch] = temp + b[f]\n\n if backprop:\n assert dv_output is not None\n dv_input = np.zeros(input.shape)\n grad['W'] = np.zeros(params['W'].shape)\n grad['b'] = np.zeros(params['b'].shape)\n \n # TODO: BACKPROP CODE\n # Update dv_input and grad with values\n # to calculate dL/dw, convolve dL/dy with the input image after flipping \n # go one batch at a time\n grad_w = np.zeros((W.shape[0], W.shape[1], W.shape[2], W.shape[3], batch_size))\n for batch in range(batch_size):\n # go through all the filters for this batch and convolve with the flipped input, and then save into array\n for filter in range(num_filters):\n for channel in range(num_channels):\n curr_filter = dv_output[:, :, filter, batch]\n curr_flipped_batch_input = np.flip(input[:, :, channel, batch])\n # do the convolution\n temp = scipy.signal.convolve(curr_flipped_batch_input, curr_filter, mode = \"valid\")\n # this is one image with one filter derivative with dimensions [filter_height] x [filter_width] x [filter_depth], so save this to output\n grad_w[:, :, channel, filter, batch] = temp\n # need to sum and divide all by batch_size\n grad_w_final = np.zeros((W.shape[0], W.shape[1], W.shape[2], W.shape[3]))\n for batch in range(batch_size):\n grad_w_final = grad_w_final + grad_w[:, :, :, :, batch]\n grad_w_final = grad_w_final / batch_size\n grad['W'] = grad_w_final\n for i in range(num_filters):\n for j in range(filter_depth):\n grad['W'][:, :, j, i] = np.rot90(grad['W'][:, :, j, i], 2)\n\n # to calculate dL/db, calculate dy/db and multiply that with dL/dy\n # get dL/db per batch and then sum and divide by batch_size\n dy_db = np.zeros((output.shape[0], output.shape[1], output.shape[2], num_filters, batch_size))\n for batch in range(batch_size):\n # dy_{a, b, filter}/db_{filter} is equal to 1 if they are associated with the same filter, otherwise zero\n for filter in range(num_filters):\n dy_db[:, :, filter, filter, batch] = 1\n\n # use chain rule with weighted sum dL/db = dL/dy * dy/db\n dL_db = np.zeros((num_filters, batch_size))\n for batch in range(batch_size):\n for b in range(num_filters):\n sum = 0\n for i in range(output.shape[0]):\n for j in range(output.shape[1]):\n for k in range(output.shape[2]):\n sum = sum + dv_output[i, j, k, batch] * dy_db[i, j, k, b, batch]\n dL_db[b, batch] = sum\n\n # need to sum and divide all by batch_size\n grad_b = np.zeros(num_filters)\n for batch in range(batch_size):\n grad_b = grad_b + dL_db[:, batch]\n grad_b = grad_b / batch_size\n grad['b'] = grad_b.reshape(grad_b.shape + (1,))\n\n # to calculate dL/dx, calculate dy/dx and multiply that with dL/dy\n # get dy/dx per batch\n \n dy_dx = np.zeros((out_height, out_width, num_filters, in_height, in_width, num_channels, batch_size))\n for batch in range(batch_size):\n for filter in range(num_filters):\n for height in range(out_height):\n for width in range(out_width):\n dy_dx[height, width, filter, height:height + filter_height, width:width + filter_width , :, batch] = W[:, :, :, filter]\n \n # use chain rule with weighted sum dL/dx = dL/dy * dy/dx\n dL_dx = np.zeros((in_height, in_width, num_channels, batch_size))\n for batch in range(batch_size):\n for x_height in range(in_height):\n for x_width in range(in_width):\n for channel in range(num_channels):\n # for a particular dx, sum over product of all dy's\n sum = 0\n for i in range(out_height):\n for j in range(out_width):\n for k in range(num_filters):\n sum = sum + dv_output[i, j, k, batch] * dy_dx[i, j, k, x_height, x_width, channel, batch]\n dL_dx[x_height, x_width, channel, batch] = sum\n\n dv_input = dL_dx\n \n\n return output, dv_input, grad\n","sub_path":"fn_conv.py","file_name":"fn_conv.py","file_ext":"py","file_size_in_byte":6721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"} +{"seq_id":"451819358","text":"#!/usr/bin/env python\n# vim: set et sw=4 sts=4 fileencoding=utf-8:\n#\n# A library for reading Microsoft's OLE Compound Document format\n# Copyright (c) 2014 Dave Hughes \n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nfrom __future__ import (\n unicode_literals,\n absolute_import,\n print_function,\n division,\n )\nstr = type('')\n\n\nimport compoundfiles\nimport pytest\nimport warnings\nfrom collections import namedtuple\n\nDirEntry = namedtuple('DirEntry', ('name', 'isfile', 'size'))\n\ndef setup_module(module):\n warnings.simplefilter('always')\n\ndef verify_contents(doc, contents):\n for entry in contents:\n assert entry.name in doc.root\n assert doc.root[entry.name].isfile == entry.isfile\n assert not doc.root[entry.name].isdir == entry.isfile\n if entry.isfile:\n assert doc.root[entry.name].size == entry.size\n else:\n assert len(doc.root[entry.name]) == entry.size\n\n\ndef test_function_sample1_doc():\n with compoundfiles.CompoundFileReader('tests/sample1.doc') as doc:\n contents = (\n DirEntry('1Table', True, 8375),\n DirEntry('\\x01CompObj', True, 106),\n DirEntry('ObjectPool', False, 0),\n DirEntry('WordDocument', True, 9280),\n DirEntry('\\x05SummaryInformation', True, 4096),\n DirEntry('\\x05DocumentSummaryInformation', True, 4096),\n )\n verify_contents(doc, contents)\n\ndef test_function_sample1_xls():\n with compoundfiles.CompoundFileReader('tests/sample1.xls') as doc:\n contents = (\n DirEntry('Workbook', True, 11073),\n DirEntry('\\x05SummaryInformation', True, 4096),\n DirEntry('\\x05DocumentSummaryInformation', True, 4096),\n )\n verify_contents(doc, contents)\n\ndef test_function_sample2_doc():\n with compoundfiles.CompoundFileReader('tests/sample2.doc') as doc:\n contents = (\n DirEntry('Data', True, 8420),\n DirEntry('1Table', True, 19168),\n DirEntry('\\x01CompObj', True, 113),\n DirEntry('WordDocument', True, 25657),\n DirEntry('\\x05SummaryInformation', True, 4096),\n DirEntry('\\x05DocumentSummaryInformation', True, 4096),\n )\n verify_contents(doc, contents)\n\ndef test_function_sample2_xls():\n with compoundfiles.CompoundFileReader('tests/sample2.xls') as doc:\n contents = (\n DirEntry('\\x01Ole', True, 20),\n DirEntry('\\x01CompObj', True, 73),\n DirEntry('Workbook', True, 1695),\n DirEntry('\\x05SummaryInformation', True, 228),\n DirEntry('\\x05DocumentSummaryInformation', True, 116),\n )\n verify_contents(doc, contents)\n\n","sub_path":"tests/test_function.py","file_name":"test_function.py","file_ext":"py","file_size_in_byte":3750,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"39"}